diff --git a/dynare++/Makefile.include b/dynare++/Makefile.include
new file mode 100644
index 0000000000000000000000000000000000000000..1f257d644bdc5f01b32c502235778b03fbfc79ea
--- /dev/null
+++ b/dynare++/Makefile.include
@@ -0,0 +1,27 @@
+# $Id: Makefile 843 2006-07-28 08:54:19Z tamas $
+# Copyright 2008, Ondra Kamenik
+
+CC = g++
+
+
+#LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c
+LD_LIBS := -L /opt//intel/Compiler/11.0/074/mkl/lib/em64t -lmkl_intel_thread -lmkl_lapack -lmkl -lmkl_em64t -L /opt//intel/Compiler/11.0/074/lib/intel64 -lguide -lstdc++
+
+CC_FLAGS := -Wall 
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DPOSIX_THREADS -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+	LD_LIBS := -mno-cygwin -mthreads $(LD_LIBS)  -lpthreadGC1
+else
+	CC_FLAGS := -fPIC $(CC_FLAGS)
+	LD_LIBS := $(LD_LIBS) -lpthread
+endif
+
+
+
diff --git a/dynare++/change_log.html b/dynare++/change_log.html
new file mode 100644
index 0000000000000000000000000000000000000000..3a1e53aa5464f99ff982639d5a3701957527d2ca
--- /dev/null
+++ b/dynare++/change_log.html
@@ -0,0 +1,280 @@
+<HTML>
+<TITLE>
+Dynare++ Change Log
+</TITLE>
+<!-- $Header$ -->
+<BODY>
+<TABLE CELLSPACING=2 ALIGN="CENTER" BORDER=1>
+<TR>
+<TD BGCOLOR="#d0d0d0" WIDTH="85"> <b>Revision</b>    </TD>
+<TD BGCOLOR="#d0d0d0" WIDTH="85"> <b>Version</b></TD>
+<TD BGCOLOR="#d0d0d0" WIDTH="80"> <b>Date</b>   </TD>
+<TD BGCOLOR="#d0d0d0" WIDTH="600"> <b>Description of changes</b></TD>
+</TR>
+
+<TR>
+<TD>
+<TD>1.3.7
+<TD>2008/01/15
+<TD>
+
+<TR><TD><TD><TD> <TD> Corrected a serious bug in centralizing a
+decision rule. This bug implies that all results based on simulations
+of the decision rule were wrong. However results based on stochastic
+fix points were correct. Thanks to Wouter J. den Haan and Joris de Wind!
+
+<TR><TD><TD><TD> <TD> Added options --centralize and --no-centralize.
+
+<TR><TD><TD><TD> <TD> Corrected an error of a wrong
+variance-covariance matrix in real-time simulations (thanks to Pawel
+Zabzcyk).
+
+<TR><TD><TD><TD> <TD> Corrected a bug of integer overflow in refined
+faa Di Bruno formula if one of refinements is empty. This bug appeared
+when solving models without forward looking variables.
+
+<TR><TD><TD><TD> <TD> Corrected a bug in the Sylvester equation
+formerly working only for models with forward looking variables.
+
+<TR><TD><TD><TD> <TD> Corrected a bug in global check printout.
+
+<TR><TD><TD><TD> <TD> Added generating a dump file.
+
+<TR><TD><TD><TD> <TD> Fixed a bug of forgetting repeated assignments
+(for example in parameter settings and initval).
+
+<TR><TD><TD><TD> <TD> Added a diff operator to the parser.
+
+<TR>
+<TD>1539
+<TD>1.3.6
+<TD>2008/01/03
+<TD>
+
+<TR><TD><TD><TD> <TD> Corrected a bug of segmentation faults for long
+names and path names. 
+
+<TR><TD><TD><TD> <TD> Changed a way how random numbers are
+generated. Dynare++ uses a separate instance of Mersenne twister for
+each simulation, this corrects a flaw of additional randomness caused
+by operating system scheduler. This also corrects a strange behaviour
+of random generator on Windows, where each simulation was getting the
+same sequence of random numbers.
+
+<TR><TD><TD><TD> <TD> Added calculation of conditional distributions
+controlled by --condper and --condsim.
+
+<TR><TD><TD><TD> <TD> Dropped creating unfoled version of decision
+rule at the end. This might consume a lot of memory. However,
+simulations might be slower for some models.
+
+<TR>
+<TD>1368
+<TD>1.3.5
+<TD>2007/07/11
+<TD>
+
+<TR><TD><TD><TD> <TD> Corrected a bug of useless storing all derivative
+indices in a parser. This consumed a lot of memory for large models.
+
+<TR><TD><TD><TD> <TD> Added an option <tt>--ss-tol</tt> controlling a
+tolerance used for convergence of a non-linear solver.
+
+<TR><TD><TD><TD> <TD> Corrected buggy interaction of optimal policy
+and forward looking variables with more than one period.
+
+<TR><TD><TD><TD> <TD> Variance matrices can be positive
+semidefinite. This corrects a bug of throwing an error if estimating
+approximation errors on ellipse of the state space with a
+deterministic variable.
+
+<TR><TD><TD><TD> <TD> Implemented simulations with statistics
+calculated in real-time. Options <tt>--rtsim</tt> and <tt>--rtper</tt>.
+
+<TR>
+<TD>1282
+<TD>1.3.4
+<TD>2007/05/15
+<TD>
+
+<TR><TD><TD><TD> <TD>Corrected a bug of wrong representation of NaN in generated M-files.
+
+<TR><TD><TD><TD> <TD>Corrected a bug of occassionaly wrong evaluation of higher order derivatives of integer powers.
+
+<TR><TD><TD><TD> <TD>Implemented automatic handling of terms involving multiple leads.
+
+<TR><TD><TD><TD> <TD>Corrected a bug in the numerical integration, i.e. checking of the precision of the solution.
+
+<TR>
+<TD>1090
+<TD>1.3.3
+<TD>2006/11/20
+<TD>
+
+<TR><TD><TD><TD> <TD>Corrected a bug of non-registering an auxiliary variable in initval assignments.
+
+<TR>
+<TD>988
+<TD>1.3.2
+<TD>2006/10/11
+<TD>
+
+<TR><TD><TD><TD> <TD>Corrected a few not-serious bugs: segfault on
+some exception, error in parsing large files, error in parsing
+matrices with comments, a bug in dynare_simul.m
+
+<TR><TD><TD><TD> <TD>Added posibility to specify a list of shocks for
+which IRFs are calculated
+
+<TR><TD><TD><TD> <TD>Added --order command line switch
+
+<TR><TD><TD><TD> <TD>Added writing two Matlab files for steady state
+calcs
+
+<TR><TD><TD><TD> <TD>Implemented optimal policy using keyword
+planner_objective and planner_discount
+
+<TR><TD><TD><TD> <TD>Implemented an R interface to Dynare++ algorithms
+(Tamas Papp)
+
+<TR><TD><TD><TD> <TD>Highlevel code reengineered to allow for
+different model inputs
+
+<TR>
+<TD>799
+<TD>1.3.1
+<TD>2006/06/13
+<TD>
+
+<TR><TD><TD><TD> <TD>Corrected few bugs: in error functions, in linear algebra module.
+
+<TR><TD><TD><TD> <TD>Updated dynare_simul.
+
+<TR><TD><TD><TD> <TD>Updated the tutorial.
+
+<TR><TD><TD><TD> <TD>Corrected an error in summing up tensors where
+setting up the decision rule derivatives. Thanks to Michel
+Juillard. The previous version was making deterministic effects of
+future volatility smaller than they should be.
+
+<TR>
+<TD>766
+<TD>1.3.0
+<TD>2006/05/22
+<TD>
+
+<TR><TD><TD><TD> <TD>The non-linear solver replaced with a new one.
+
+<TR><TD><TD><TD> <TD>The parser and derivator replaced with a new
+code. Now it is possible to put expressions in parameters and initval
+sections.
+
+<TR>
+<TD>752
+<TD>1.2.2
+<TD>2006/05/22
+<TD>
+
+<TR><TD><TD><TD> <TD>Added an option triggering/suppressing IRF calcs..
+
+<TR><TD><TD><TD> <TD>Newton algortihm is now used for fix-point calculations.
+
+<TR><TD><TD><TD> <TD> Vertical narrowing of tensors in Faa Di Bruno
+formula to avoid multiplication with zeros..
+
+<TR>
+<TD>436
+<TD>1.2.1
+<TD>2005/08/17
+<TD>
+
+<TR><TD><TD><TD> <TD>Faa Di Bruno for sparse matrices optimized. The
+implementation now accommodates vertical refinement of function stack
+in order to fit a corresponding slice to available memory. In
+addition, zero slices are identified. For some problems, this implies
+significant speedup.
+
+<TR><TD><TD><TD> <TD>Analytic derivator speedup.
+
+<TR><TD><TD><TD> <TD>Corrected a bug in the threading code. The bug
+stayed concealed in Linux 2.4.* kernels, and exhibited in Linux 2.6.*,
+which has a different scheduling. This correction also allows using
+detached threads on Windows.
+
+<TR>
+<TD>410
+<TD>1.2
+<TD>2005/07/29
+<TD>
+
+<TR><TD><TD><TD> <TD>Added Dynare++ tutorial.
+
+<TR><TD><TD><TD> <TD>Changed and enriched contents of MAT-4 output
+file.
+
+<TR><TD><TD><TD> <TD>Corrected a bug of wrong variable indexation
+resulting in an exception. The error occurred if a variable appeared
+at time t-1 or t+1 and not at t.
+
+<TR><TD><TD><TD> <TD>Added Matlab interface, which allows simulation
+of a decision rule in Matlab.
+
+<TR><TD><TD><TD> <TD>Got rid of Matrix Template Library.
+
+<TR><TD><TD><TD> <TD>Added checking of model residuals by the
+numerical integration. Three methods: checking along simulation path,
+checking along shocks, and on ellipse of states.
+
+<TR><TD><TD><TD> <TD>Corrected a bug in calculation of higher moments
+of Normal dist.
+
+<TR><TD><TD><TD> <TD>Corrected a bug of wrong drawing from Normal dist
+with non-zero covariances.
+
+<TR><TD><TD><TD>
+<TD>Added numerical integration module. Product and Smolyak
+quadratures over Gauss-Hermite and Gauss-Legendre, and quasi Monte
+Carlo.
+
+<TR>
+<TD>152
+<TD>1.1
+<TD>2005/04/22
+<TD>
+
+<TR><TD><TD><TD>
+<TD>Added a calculation of approximation at a stochastic steady state
+(still experimental).
+
+<TR><TD><TD><TD>
+<TD>Corrected a bug in Cholesky decomposition of variance-covariance
+matrix with off-diagonal elements.
+
+<TR>
+<TD>89
+<TD>1.01
+<TD>2005/02/23
+<TD>
+
+<TR><TD><TD><TD>
+<TD>Added version printout.
+
+<TR><TD><TD><TD>
+<TD>Corrected the bug of multithreading support for P4 HT processors running on Win32.
+
+<TR><TD><TD><TD>
+<TD>Enhanced Kronecker product code resulting in approx. 20% speedup.
+
+<TR><TD><TD><TD>
+<TD>Implemented vertical stack container refinement, and another
+method for sparse folded Faa Di Bruno (both not used yet).
+
+<TR>
+<TD>5
+<TD>1.0
+<TD>2005/02/23
+<TD>The first released version.
+
+</TABLE>
+</BODY>
+</HTML>
diff --git a/dynare++/doc/compiling-notes.txt b/dynare++/doc/compiling-notes.txt
new file mode 100644
index 0000000000000000000000000000000000000000..275b76d100525dd3ae51205da20a0a3d2441950a
--- /dev/null
+++ b/dynare++/doc/compiling-notes.txt
@@ -0,0 +1,23 @@
+It is suggested that you compile Dynare++ with gcc version 3.4.  If you have
+other versions of gcc installed on your system, you need to select version
+3.4, for example,
+
+$ make "CC=gcc-3.4"
+
+For linking, you need to compile the required linear algebra libraries
+(blas, atlas, etc).  Alternatively, you can install precompiled versions of
+these -- make sure that you select the version that matches your CPU.
+
+For example, if you have an SSE2 capable CPU, then on Debian GNU/Linux
+(etch) you need to install the following packages to get the precompiled
+linear algebra libraries:
+
+lapack3-dev
+atlas3-sse2-dev
+
+Then set the environment variable LD_LIBRARY_PATH, eg
+
+$ export LD_LIBRARY_PATH /usr/lib/sse2
+
+before calling make.  This will include the shared libraries in the search
+path for ld.
diff --git a/dynare++/doc/dynare++-ramsey.tex b/dynare++/doc/dynare++-ramsey.tex
new file mode 100755
index 0000000000000000000000000000000000000000..0044cb2d99eb282ee06adb449675cd87f44a8a29
--- /dev/null
+++ b/dynare++/doc/dynare++-ramsey.tex
@@ -0,0 +1,157 @@
+\documentclass[10pt]{article} 
+\usepackage{array,natbib,times}
+\usepackage{amsmath, amsthm, amssymb}
+
+%\usepackage[pdftex,colorlinks]{hyperref}
+
+\begin{document}
+
+\title{Implementation of Ramsey Optimal Policy in Dynare++, Timeless Perspective}
+
+\author{Ondra Kamen\'\i k}
+
+\date{June 2006}
+\maketitle
+
+\textbf{Abstract:} This document provides a derivation of Ramsey
+optimal policy from timeless perspective and describes its
+implementation in Dynare++.
+
+\section{Derivation of the First Order Conditions}
+
+Let us start with an economy populated by agents who take a number of
+variables exogenously, or given. These may include taxes or interest
+rates for example. These variables can be understood as decision (or control)
+variables of the timeless Ramsey policy (or social planner). The agent's
+information set at time $t$ includes mass-point distributions of these
+variables for all times after $t$. If $i_t$ denotes an interest rate
+for example, then the information set $I_t$ includes
+$i_{t|t},i_{t+1|t},\ldots,i_{t+k|t},\ldots$ as numbers. In addition
+the information set includes all realizations of past exogenous
+innovations $u_\tau$ for $\tau=t,t-1,\ldots$ and distibutions
+$u_\tau\sim N(0,\Sigma)$ for $\tau=t+1,\ldots$. These information sets will be denoted $I_t$.
+
+An information set including only the information on past realizations
+of $u_\tau$ and future distributions of $u_\tau\sim N(0\sigma)$ will
+be denoted $J_t$. We will use the following notation for expectations
+through these sets:
+\begin{eqnarray*}
+E^I_t[X] &=& E(X|I_t)\\
+E^J_t[X] &=& E(X|J_t)
+\end{eqnarray*}
+
+The agents optimize taking the decision variables of the social
+planner at $t$ and future as given. This means that all expectations
+they form are conditioned on the set $I_t$. Let $y_t$ denote a vector
+of all endogenous variables including the planer's decision
+variables. Let the number of endogenous variables be $n$. The economy
+can be described by $m$ equations including the first order conditions
+and transition equations:
+\begin{equation}\label{constr}
+E_t^I\left[f(y_{t-1},y_t,y_{t+1},u_t)\right] = 0.
+\end{equation}
+This lefts $n-m$
+the planner's control variables. The solution of this problem is a
+decision rule of the form:
+\begin{equation}\label{agent_dr}
+y_t=g(y_{t-1},u_t,c_{t|t},c_{t+1|t},\ldots,c_{t+k|t},\ldots),
+\end{equation}
+where $c$ is a vector of planner's control variables.
+
+Each period the social planner chooses the vector $c_t$ to maximize
+his objective such that \eqref{agent_dr} holds for all times following
+$t$. This would lead to $n-m$ first order conditions with respect to
+$c_t$. These first order conditions would contain unknown derivatives
+of endogenous variables with respect to $c$, which would have to be
+retrieved from the implicit constraints \eqref{constr} since the
+explicit form \eqref{agent_dr} is not known.
+
+The other way to proceed is to assume that the planner is so dumb that
+he is not sure what are his control variables. So he optimizes with
+respect to all $y_t$ given the constraints \eqref{constr}. If the
+planner's objective is $b(y_{t-1},y_t,y_{t+1},u_t)$ with a discount rate
+$\beta$, then the optimization problem looks as follows:
+\begin{align}
+\max_{\left\{y_\tau\right\}^\infty_t}&E_t^J
+\left[\sum_{\tau=t}^\infty\beta^{\tau-t}b(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]\notag\\
+&\rm{s.t.}\label{planner_optim}\\
+&\hskip1cm E^I_\tau\left[f(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]=0\quad\rm{for\ }
+\tau=\ldots,t-1,t,t+1,\ldots\notag
+\end{align}
+Note two things: First, each constraint \eqref{constr} in
+\eqref{planner_optim} is conditioned on $I_\tau$ not $I_t$. This is
+very important, since the behaviour of agents at period $\tau=t+k$ is
+governed by the constraint using expectations conditioned on $t+k$,
+not $t$. The social planner knows that at $t+k$ the agents will use
+all information available at $t+k$. Second, the constraints for the
+planner's decision made at $t$ include also constraints for agent's
+behaviour prior to $t$. This is because the agent's decision rules are
+given in the implicit form \eqref{constr} and not in the explicit form
+\eqref{agent_dr}.
+
+Using Lagrange multipliers, this can be rewritten as
+\begin{align}
+\max_{y_t}E_t^J&\left[\sum_{\tau=t}^\infty\beta^{\tau-t}b(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right.\notag\\
+&\left.+\sum_{\tau=-\infty}^{\infty}\beta^{\tau-t}\lambda^T_\tau E_\tau^I\left[f(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]\right],
+\label{planner_optim_l}
+\end{align}
+where $\lambda_t$ is a vector of Lagrange multipliers corresponding to
+constraints \eqref{constr}. Note that the multipliers are multiplied
+by powers of $\beta$ in order to make them stationary. Taking a
+derivative wrt $y_t$ and putting it to zero yields the first order
+conditions of the planner's problem:
+\begin{align}
+E^J_t\left[\vphantom{\frac{\int^(_)}{\int^(\_)}}\right.&\frac{\partial}{\partial y_t}b(y_{t-1},y_t,y_{t+1},u_t)+
+\beta L^{+1}\frac{\partial}{\partial y_{t-1}}b(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\beta^{-1}\lambda_{t-1}^TE^I_{t-1}\left[L^{-1}\frac{\partial}{\partial y_{t+1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]\notag\\
+&+\lambda_t^TE^I_t\left[\frac{\partial}{\partial y_{t}}f(y_{t-1},y_t,y_{t+1},u_t)\right]\notag\\
+&+\beta\lambda_{t+1}^TE^I_{t+1}\left[L^{+1}\frac{\partial}{\partial y_{t-1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]
+\left.\vphantom{\frac{\int^(_)}{\int^(\_)}}\right]
+ = 0,\label{planner_optim_foc}
+\end{align}
+where $L^{+1}$ and $L^{-1}$ are one period lead and lag operators respectively.
+
+Now we have to make a few assertions concerning expectations
+conditioned on the different information sets to simplify
+\eqref{planner_optim_foc}. Recall the formula for integration through
+information on which another expectation is conditioned, this is:
+$$E\left[E\left[u|v\right]\right] = E[u],$$
+where the outer expectation integrates through $v$. Since $J_t\subset
+I_t$, by easy application of the above formula we obtain
+\begin{eqnarray}
+E^J_t\left[E^I_t\left[X\right]\right] &=& E^J_t\left[X\right]\quad\rm{and}\notag\\
+E^J_t\left[E^I_{t-1}\left[X\right]\right] &=& E^J_t\left[X\right]\label{e_iden}\\
+E^J_t\left[E^I_{t+1}\left[X\right]\right] &=& E^J_{t+1}\left[X\right]\notag
+\end{eqnarray}
+Now, the last term of \eqref{planner_optim_foc} needs a special
+attention. It is equal to
+$E^J_t\left[\beta\lambda^T_{t+1}E^I_{t+1}[X]\right]$. If we assume
+that the problem \eqref{planner_optim} has a solution, then there is a
+deterministic function from $J_{t+1}$ to $\lambda_{t+1}$ and so
+$\lambda_{t+1}\in J_{t+1}\subset I_{t+1}$. And the last term is equal
+to $E^J_{t}\left[E^I_{t+1}[\beta\lambda^T_{t+1}X]\right]$, which is
+$E^J_{t+1}\left[\beta\lambda^T_{t+1}X\right]$. This term can be
+equivalently written as
+$E^J_{t}\left[\beta\lambda^T_{t+1}E^J_{t+1}[X]\right]$. The reason why
+we write the term in this way will be clear later. All in all, we have
+\begin{align}
+E^J_t\left[\vphantom{\frac{\int^(_)}{\int^(\_)}}\right.&\frac{\partial}{\partial y_t}b(y_{t-1},y_t,y_{t+1},u_t)+
+\beta L^{+1}\frac{\partial}{\partial y_{t-1}}b(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\beta^{-1}\lambda_{t-1}^TL^{-1}\frac{\partial}{\partial y_{t+1}}f(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\lambda_t^T\frac{\partial}{\partial y_{t}}f(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\beta\lambda_{t+1}^TE^J_{t+1}\left[L^{+1}\frac{\partial}{\partial y_{t-1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]
+\left.\vphantom{\frac{\int^(_)}{\int^(\_)}}\right]
+ = 0.\label{planner_optim_foc2}
+\end{align}
+Note that we have not proved that \eqref{planner_optim_foc} and
+\eqref{planner_optim_foc2} are equivalent. We proved only that if
+\eqref{planner_optim_foc} has a solution, then
+\eqref{planner_optim_foc2} is equivalent (and has the same solution).
+
+\section{Implementation}
+
+The user inputs $b(y_{t-1},y_t,y_{t+1},u_t)$, $\beta$, and agent's
+first order conditions \eqref{constr}. The algorithm has to produce
+\eqref{planner_optim_foc2}.
+
+\end{document}
diff --git a/dynare++/doc/dynare++-tutorial.tex b/dynare++/doc/dynare++-tutorial.tex
new file mode 100644
index 0000000000000000000000000000000000000000..979ab50e1467f763effdc2ee6aa7ed75fd121b7a
--- /dev/null
+++ b/dynare++/doc/dynare++-tutorial.tex
@@ -0,0 +1,1512 @@
+\documentclass[10pt]{article} 
+\usepackage{array,natbib}
+\usepackage{amsmath, amsthm, amssymb}
+
+\usepackage[pdftex,colorlinks]{hyperref}
+
+\begin{document}
+
+\title{DSGE Models with Dynare++. A Tutorial.}
+
+\author{Ondra Kamen\'\i k}
+
+\date{Jan 2009 (describes v. 1.3.7)}
+\maketitle
+
+\tableofcontents
+
+\section{Installation}
+
+Dynare++ installation procedure is pretty straightforward. Take the
+following steps:
+\begin{enumerate}
+\item Grab Dynare++ package from Dynare site either for Windows or for
+  Linux according to your operating system.\footnote{If unsure,
+  download the one for Windows.}
+\item Unzip or untar the package to a directory of your choice.
+\item Set operating system path to point to {\tt
+  dynare++-*.*}\footnote{Stars stand for a version number.}
+  subdirectory of the directory you have chosen. In Windows, this step
+  ensures that libraries distributed along Dynare++ are on the
+  path. This step is not really necessary in Linux.
+\item If you have Matlab and want to run custom simulations (see
+  \ref{custom}), then set the Matlab path to the path from the
+  previous step.
+\end{enumerate}
+
+Dynare++ uninstall procedure is even more simple. Just remove the directory {\tt
+  dynare++-*.*}.
+
+If you want (or need) to compile Dynare++ from sources, grab Dynare++
+source package and do your best.\footnote{Feel free to contact me if
+setting up the sources takes more time than solving your model by
+hand.}
+
+\section{Sample Session}
+
+As an example, let us take a simple DSGE model whose dynamic
+equilibrium is described by the following first order conditions:
+
+\begin{align*}
+&c_t\theta h_t^{1+\psi} = (1-\alpha)y_t\cr
+&\beta E_t\left[\frac{\exp(b_t)c_t}{\exp(b_{t+1})c_{t+1}}
+\left(\exp(b_{t+1})\alpha\frac{y_{t+1}}{k_{t+1}}+1-\delta\right)\right]=1\cr
+&y_t=\exp(a_t)k_t^\alpha h_t^{1-\alpha}\cr
+&k_{t}=\exp(b_{t-1})(y_{t-1}-c_{t-1})+(1-\delta)k_{t-1}\cr
+&a_t=\rho a_{t-1}+\tau b_{t-1}+\epsilon_t\cr
+&b_t=\tau a_{t-1}+\rho b_{t-1}+\nu_t
+\end{align*}
+
+\label{timing}
+The timing of this model is that the exogenous shocks $\epsilon_t$,
+and $\nu_t$ are observed by agents in the beginning of period $t$ and
+before the end of period $t$ all endogenous variables with index $t$
+are decided. The expectation operator $E_t$ works over the information
+accumulated just before the end of the period $t$ (this includes
+$\epsilon_t$, $\nu_t$ and all endogenous variables with index $t$).
+
+The exogenous shocks $\epsilon_t$ and $\nu_t$ are supposed to be
+serially uncorrelated with zero means and time-invariant
+variance-covariance matrix. In Dynare++, these variables are called
+exogenous; all other variables are endogenous. Now we are prepared to
+start writing a model file for Dynare++, which is an ordinary text
+file and could be created with any text editor.
+
+The model file starts with a preamble declaring endogenous and
+exogenous variables, parameters, and setting values of the
+parameters. Note that one can put expression on right hand sides. The
+preamble follows:
+
+{\small
+\begin{verbatim}
+var Y, C, K, A, H, B;
+varexo EPS, NU;
+
+parameters beta, rho, beta, alpha, delta, theta, psi, tau;
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 1/(1.03^0.25);
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+\end{verbatim}
+}
+
+The section setting values of the parameters is terminated by a
+beginning of the {\tt model} section, which states all the dynamic
+equations. A timing convention of a Dynare++ model is the same as the
+timing of our example model, so we may proceed with writing the model
+equations. The time indexes of $c_{t-1}$, $c_t$, and $c_{t+1}$ are
+written as {\tt C(-1)}, {\tt C}, and {\tt C(1)} resp. The {\tt model}
+section looks as follows:
+
+{\small
+\begin{verbatim}
+model;
+C*theta*H^(1+psi) = (1-alpha)*Y;
+beta*exp(B)*C/exp(B(1))/C(1)*
+  (exp(B(1))*alpha*Y(1)/K(1)+1-delta) = 1;
+Y = exp(A)*K^alpha*H^(1-alpha);
+K = exp(B(-1))*(Y(-1)-C(-1)) + (1-delta)*K(-1);
+A = rho*A(-1) + tau*B(-1) + EPS;
+B = tau*A(-1) + rho*B(-1) + NU;
+end;
+\end{verbatim}
+}
+
+At this point, almost all information that Dynare++ needs has been
+provided. Only three things remain to be specified: initial values of
+endogenous variables for non-linear solver, variance-covariance matrix
+of the exogenous shocks and order of the Taylor approximation. Since
+the model is very simple, there is a closed form solution for the
+deterministic steady state. We use it as initial values for the
+non-linear solver. Note that the expressions on the right hand-sides in
+{\tt initval} section can reference values previously calculated. The
+remaining portion of the model file looks as follows:
+
+{\small
+\begin{verbatim}
+initval;
+A = 0;
+B = 0;
+H = ((1-alpha)/(theta*(1-(delta*alpha)
+     /(1/beta-1+delta))))^(1/(1+psi));
+Y = (alpha/(1/beta-1+delta))^(alpha/(1-alpha))*H;
+K = alpha/(1/beta-1+delta)*Y;
+C = Y - delta*K;
+end;
+
+vcov = [
+  0.0002  0.00005;
+  0.00005 0.0001
+];
+
+order = 7;
+\end{verbatim}
+}
+
+Note that the order of rows/columns of the variance-covariance matrix
+corresponds to the ordering of exogenous variables in the {\tt varexo}
+declaration. Since the {\tt EPS} was declared first, its variance is
+$0.0002$, and the variance of {\tt NU} is $0.0001$.
+
+Let the model file be saved as {\tt example1.mod}. Now we are prepared
+to solve the model. At the operating system command
+prompt\footnote{Under Windows it is a {\tt cmd} program, under Unix it
+is any shell} we issue a command:
+
+{\small
+\begin{verbatim}
+dynare++ example1.mod
+\end{verbatim}
+}
+
+When the program is finished, it produces two output files: a journal
+file {\tt example1.jnl} and a Matlab MAT-4 {\tt example1.mat}. The
+journal file contains information about time, memory and processor
+resources needed for all steps of solution. The output file is more
+interesting. It contains various simulation results. It can be loaded
+into Matlab or Scilab and examined.%
+\footnote{For Matlab {\tt load example1.mat}, for Scilab {\tt
+mtlb\_load example1.mat}} The following examples are done in Matlab,
+everything would be very similar in Scilab.
+
+Let us first examine the contents of the MAT file:
+{\small
+\begin{verbatim}
+>> load example1.mat
+>> who
+
+Your variables are:
+
+dyn_g_1            dyn_i_Y            dyn_npred          
+dyn_g_2            dyn_irfm_EPS_mean  dyn_nstat          
+dyn_g_3            dyn_irfm_EPS_var   dyn_shocks         
+dyn_g_4            dyn_irfm_NU_mean   dyn_ss             
+dyn_g_5            dyn_irfm_NU_var    dyn_state_vars     
+dyn_i_A            dyn_irfp_EPS_mean  dyn_steady_states  
+dyn_i_B            dyn_irfp_EPS_var   dyn_vars           
+dyn_i_C            dyn_irfp_NU_mean   dyn_vcov           
+dyn_i_EPS          dyn_irfp_NU_var    dyn_vcov_exo       
+dyn_i_H            dyn_mean           
+dyn_i_K            dyn_nboth          
+dyn_i_NU           dyn_nforw          
+\end{verbatim}
+}
+
+All the variables coming from one MAT file have a common prefix. In
+this case it is {\tt dyn}, which is Dynare++ default. The prefix can
+be changed, so that the multiple results could be loaded into one Matlab
+session.
+
+In the default setup, Dynare++ solves the Taylor approximation to the
+decision rule and calculates unconditional mean and covariance of the
+endogenous variables, and generates impulse response functions. The
+mean and covariance are stored in {\tt dyn\_mean} and {\tt
+dyn\_vcov}. The ordering of the endogenous variables is given by {\tt
+dyn\_vars}.
+
+In our example, the ordering is
+
+{\small
+\begin{verbatim}
+>> dyn_vars
+dyn_vars =
+H
+A
+Y
+C
+K
+B
+\end{verbatim}
+}
+
+and unconditional mean and covariance are
+
+{\small
+\begin{verbatim}
+>> dyn_mean
+dyn_mean =
+    0.2924
+    0.0019
+    1.0930
+    0.8095
+   11.2549
+    0.0011
+>> dyn_vcov
+dyn_vcov =
+    0.0003    0.0006    0.0016    0.0004    0.0060    0.0004
+    0.0006    0.0024    0.0059    0.0026    0.0504    0.0012
+    0.0016    0.0059    0.0155    0.0069    0.1438    0.0037
+    0.0004    0.0026    0.0069    0.0040    0.0896    0.0016
+    0.0060    0.0504    0.1438    0.0896    2.1209    0.0405
+    0.0004    0.0012    0.0037    0.0016    0.0405    0.0014
+\end{verbatim}
+}
+
+The ordering of the variables is also given by indexes starting with
+{\tt dyn\_i\_}. Thus the mean of capital can be retrieved as
+
+{\small
+\begin{verbatim}
+>> dyn_mean(dyn_i_K)
+ans =
+   11.2549
+\end{verbatim}
+}
+
+\noindent and covariance of labor and capital by
+
+{\small
+\begin{verbatim}
+>> dyn_vcov(dyn_i_K,dyn_i_H)
+ans =
+    0.0060
+\end{verbatim}
+}
+
+The impulse response functions are stored in matrices as follows
+\begin{center}
+\begin{tabular}{|l|l|}
+\hline
+matrix& response to\\
+\hline
+{\tt dyn\_irfp\_EPS\_mean}& positive impulse to {\tt EPS}\\
+{\tt dyn\_irfm\_EPS\_mean}& negative impulse to {\tt EPS}\\
+{\tt dyn\_irfp\_NU\_mean}& positive impulse to {\tt NU}\\
+{\tt dyn\_irfm\_NU\_mean}& negative impulse to {\tt NU}\\
+\hline
+\end{tabular}
+\end{center}
+All shocks sizes are one standard error. Rows of the matrices
+correspond to endogenous variables, columns correspond to
+periods. Thus capital response to a positive shock to {\tt EPS} can be
+plotted as
+
+{\small
+\begin{verbatim}
+plot(dyn_irfp_EPS_mean(dyn_i_K,:));
+\end{verbatim}
+}
+
+The data is in units of the respective variables, so in order to plot
+the capital response in percentage changes from the decision rule's
+fix point (which is a vector {\tt dyn\_ss}), one has to issue the
+commands:
+
+{\small
+\begin{verbatim}
+Kss=dyn_ss(dyn_i_K);
+plot(100*dyn_irfp_EPS_mean(dyn_i_K,:)/Kss);
+\end{verbatim}
+}
+
+The plotted impulse response shows that the model is pretty persistent
+and that the Dynare++ default for a number of simulated periods is not
+sufficient. In addition, the model persistence puts in doubt also a
+number of simulations. The Dynare++ defaults can be changed when
+calling Dynare++, in operating system's command prompt, we issue a
+command:
+
+{\small
+\begin{verbatim}
+dynare++ --per 300 --sim 150 example1.mod
+\end{verbatim}
+}
+
+\noindent This sets the number of simulations to $150$ and the number
+of periods to $300$ for each simulation giving $45000$ total simulated
+periods.
+
+\section{Sample Optimal Policy Session}
+\label{optim_tut}
+
+Suppose that one wants to solve the following optimal policy problem
+with timeless perspective.\footnote{See \ref{ramsey} on how to solve
+Ramsey optimality problem within this framework} The following
+optimization problem is how to choose capital taxes financing public
+good to maximize agent's utility from consumption good and public
+good. The problem takes the form:
+\begin{align*}
+\max_{\{\tau_t\}_{t_0}^\infty} 
+E_{t_0}\sum_{t=t_0}^\infty &\beta^{t-t_0}\left(u(c_t)+av(g_t)\right)\\
+\hbox{subject\ to}&\\
+u'(c_t) &=
+\beta E_t\left[u'(c_{t+1})\left(1-\delta+f'(k_{t+1})(1-\alpha\tau_{t+1})\right)\right]\\
+K_t &= (1-\delta)K_{t-1} + (f(K_{t-1}) - c_{t-1} - g_{t-1})\\
+g_t &= \tau_t\alpha f(K_t),\\
+\hbox{where\ } t & = \ldots,t_0-1,t_0,t_0+1,\ldots
+\end{align*}
+$u(c_t)$ is utility from consuming the consumption good, $v(g_t)$ is
+utility from consuming the public good, $f(K_t)$ is a production
+function $f(K_t) = Z_tK_t^\alpha$. $Z_t$ is a technology shock modeled
+as AR(1) process. The three constraints come from the first order
+conditions of a representative agent. We suppose that it pursues a
+different objective, namely lifetime utility involving only
+consumption $c_t$. The representative agents chooses between
+consumption and investment. It rents the capital to firms and supplies
+constant amount of labour. All output is paid back to consumer in form
+of wage and capital rent. Only the latter is taxed. We suppose that
+the optimal choice has been taking place from infinite past and will
+be taking place for ever. Further we suppose the same about the
+constraints.
+
+Let us choose the following functional forms:
+\begin{eqnarray*}
+u(c_t) &=& \frac{c_t^{1-\eta}}{1-\eta}\\
+v(g_t) &=& \frac{g_t^{1-\phi}}{1-\phi}\\
+f(K_t) &=& K_t^\alpha
+\end{eqnarray*}
+
+Then the problem can be coded into Dynare++ as follows. We start with
+a preamble which states all the variables, shocks and parameters:
+{\small
+\begin{verbatim}
+var C G K TAU Z;
+
+varexo EPS;
+
+parameters eta beta alpha delta phi a rho; 
+
+eta = 2;
+beta = 0.99;
+alpha = 0.3;
+delta = 0.10;
+phi = 2.5;
+a = 0.1;
+rho = 0.7;
+\end{verbatim}
+}
+
+Then we specify the planner's objective and the discount factor in the
+objective. The objective is an expression (possibly including also
+variable leads and lags), and the discount factor must be one single
+declared parameter:
+{\small
+\begin{verbatim}
+planner_objective C^(1-eta)/(1-eta) + a*G^(1-phi)/(1-phi);
+
+planner_discount beta;
+\end{verbatim}
+}
+
+The model section will contain only the constraints of the social
+planner. These are capital accumulation, identity for the public
+product, AR(1) process for $Z_t$ and the first order condition of the
+representative agent (with different objective).
+{\small
+\begin{verbatim}
+model;
+K = (1-delta)*K(-1) + (exp(Z(-1))*K(-1)^alpha - C(-1) - G(-1));
+G = TAU*alpha*K^alpha;
+Z = rho*Z(-1) + EPS;
+C^(-eta) = beta*C(+1)^(-eta)*(1-delta +
+           exp(Z(+1))*alpha*K(+1)^(alpha-1)*(1-alpha*TAU(+1)));
+end;
+\end{verbatim}
+}
+
+Now we have to provide a good guess for non-linear solver calculating
+the deterministic steady state. The model's steady state has a closed
+form solution if the taxes are known. So we provide a guess for
+taxation {\tt TAU} and then use the closed form solution for capital,
+public good and consumption:\footnote{Initial guess for Lagrange
+multipliers and some auxiliary variables is calculated automatically. See
+\ref{opt_init} for more details.}
+{\small
+\begin{verbatim}
+initval;
+TAU = 0.70;
+K = ((delta+1/beta-1)/(alpha*(1-alpha*TAU)))^(1/(alpha-1));
+G = TAU*alpha*K^alpha;
+C =  K^alpha - delta*K - G;
+Z = 0;
+\end{verbatim}
+}
+
+Finally, we have to provide the order of approximation, and the
+variance-covariance matrix of the shocks (in our case we have only one
+shock):
+{\small
+\begin{verbatim}
+order = 4;
+
+vcov = [
+	0.01
+];
+\end{verbatim}
+}
+
+After this model file has been run, we can load the resulting MAT-file
+into the Matlab (or Scilab) and examine its contents:
+{\small
+\begin{verbatim}
+>> load kp1980_2.mat
+>> who
+
+Your variables are:
+
+dyn_g_1            dyn_i_MULT1        dyn_nforw          
+dyn_g_2            dyn_i_MULT2        dyn_npred          
+dyn_g_3            dyn_i_MULT3        dyn_nstat          
+dyn_g_4            dyn_i_TAU          dyn_shocks         
+dyn_i_AUX_3_0_1    dyn_i_Z            dyn_ss             
+dyn_i_AUX_4_0_1    dyn_irfm_EPS_mean  dyn_state_vars     
+dyn_i_C            dyn_irfm_EPS_var   dyn_steady_states  
+dyn_i_EPS          dyn_irfp_EPS_mean  dyn_vars           
+dyn_i_G            dyn_irfp_EPS_var   dyn_vcov           
+dyn_i_K            dyn_mean           dyn_vcov_exo       
+dyn_i_MULT0        dyn_nboth          
+\end{verbatim}
+}
+
+The data dumped into the MAT-file have the same structure as in the
+previous example of this tutorial. The only difference is that
+Dynare++ added a few more variables. Indeed:
+{\small
+\begin{verbatim}
+>> dyn_vars
+dyn_vars =
+MULT1    
+G        
+MULT3    
+C        
+K        
+Z        
+TAU      
+AUX_3_0_1
+AUX_4_0_1
+MULT0    
+MULT2    
+\end{verbatim}
+}
+Besides the five variables declared in the model ({\tt C}, {\tt G},
+{\tt K}, {\tt TAU}, and {\tt Z}), Dy\-na\-re++ added 6 more, four as Lagrange
+multipliers of the four constraints, two as auxiliary variables for
+shifting in time. See \ref{aux_var} for more details.
+
+The structure and the logic of the MAT-file is the same as these new 6
+variables were declared in the model file and the file is examined in
+the same way.
+
+For instance, let us examine the Lagrange multiplier of the optimal
+policy associated with the consumption first order condition. Recall
+that the consumers' objective is different from the policy
+objective. Therefore, the constraint will be binding and the
+multiplier will be non-zero. Indeed, its deterministic steady state,
+fix point and mean are as follows:
+{\small
+\begin{verbatim}
+>> dyn_steady_states(dyn_i_MULT3,1)
+ans =
+   -1.3400
+>> dyn_ss(dyn_i_MULT3)
+ans =
+   -1.3035
+>> dyn_mean(dyn_i_MULT3)
+ans =
+   -1.3422
+\end{verbatim}
+}
+
+\section{What Dynare++ Calculates}
+\label{dynpp_calc}
+
+Dynare++ solves first order conditions of a DSGE model in the recursive form:
+\begin{equation}\label{focs}
+E_t[f(y^{**}_{t+1},y_t,y^*_{t-1},u_t)]=0,
+\end{equation}
+where $y$ is a vector of endogenous variables, and $u$ a vector of
+exogenous variables. Some of elements of $y$ can occur at time $t+1$,
+these are $y^{**}$. Elements of $y$ occurring at time $t-1$ are denoted
+$y^*$. The exogenous shocks are supposed to be serially independent
+and normally distributed $u_t\sim N(0,\Sigma)$.
+
+The solution of this dynamic system is a decision rule
+\[
+y_t=g(y^*_{t-1},u_t)
+\]
+Dynare++ calculates a Taylor approximation of this decision rule of a
+given order. The approximation takes into account deterministic
+effects of future volatility, so a point about which the Taylor
+approximation is done will be different from the fix point $y$ of the rule
+yielding $y=g(y^*,0)$.
+
+The fix point of a rule corresponding to a model with $\Sigma=0$ is
+called {\it deterministic steady state} denoted as $\bar y$. In
+contrast to deterministic steady state, there is no consensus in
+literature how to call a fix point of the rule corresponding to a
+model with non-zero $\Sigma$. I am tempted to call it {\it stochastic
+  steady state}, however, it might be confused with unconditional mean
+or with steady distribution. So I will use a term {\it fix point} to
+avoid a confusion.
+
+By default, Dynare++ solves the Taylor approximation about the
+deterministic steady state. Alternatively, Dynare++ can split the
+uncertainty to a few steps and take smaller steps when calculating the
+fix points. This is controlled by an option {\tt --steps}. For the
+brief description of the second method, see \ref{multistep_alg}.
+
+\subsection{Decision Rule Form}
+\label{dr_form}
+
+In case of default solution algorithm (approximation about the
+deterministic steady state $\bar y$), Dynare++ calculates the higher
+order derivatives of the equilibrium rule to get a decision rule of
+the following form. In Einstein notation, it is:
+\[
+y_t-\bar y = \sum_{i=0}^k\frac{1}{i!}\left[g_{(y^*u)^i}\right]
+_{\alpha_1\ldots\alpha_i}
+\prod_{j=1}^i\left[\begin{array}{c} y^*_{t-1}-\bar y^*\\ u_t \end{array}\right]
+^{\alpha_j}
+\]
+
+Note that the ergodic mean will be different from the deterministic
+steady state $\bar y$ and thus deviations $y^*_{t-1}-\bar y^*$ will
+not be zero in average. This implies that in average we will commit
+larger round off errors than if we used the decision rule expressed in
+deviations from a point closer to the ergodic mean. Therefore, by
+default, Dynare++ recalculates this rule and expresses it in
+deviations from the stochastic fix point $y$.
+\[
+y_t-y = \sum_{i=1}^k\frac{1}{i!}\left[\tilde g_{(y^*u)^i}\right]
+_{\alpha_1\ldots\alpha_i}
+\prod_{j=1}^i\left[\begin{array}{c} y^*_{t-1}-y^*\\ u_t \end{array}\right]
+^{\alpha_j}
+\]
+Note that since the rule is centralized around its fix point, the
+first term (for $i=0$) drops out.
+
+Also note, that this rule mathematically equivalent to the rule
+expressed in deviations from the deterministic steady state, and still
+it is an approximation about the deterministic steady state. The fact
+that it is expressed in deviations from a different point should not
+be confused with the algorithm in \ref{multistep_alg}.
+
+This centralization can be avoided by invoking {\tt --no-centralize}
+command line option.
+
+\subsection{Taking Steps in Volatility Dimension}
+\label{multistep_alg}
+
+For models, where volatility of the exogenous shocks plays a big
+role, the approximation about deterministic steady state can be poor,
+since the equilibrium dynamics can be very different from the dynamics
+in the vicinity of the perfect foresight (deterministic steady state).
+
+Therefore, Dynare++ has on option {\tt --steps} triggering a multistep
+algorithm. The algorithm splits the volatility to a given number of
+steps. Dynare++ attempts to calculate approximations about fix points
+corresponding to these levels of volatility. The problem is that if we
+want to calculate higher order approximations about fix points
+corresponding to volatilities different from zero (as in the case of
+deterministic steady state), then the derivatives of lower orders
+depend on derivatives of higher orders with respect to forward looking
+variables. The multistep algorithm in each step approximates the
+missing higher order derivatives with extrapolations based on the
+previous step.
+
+In this way, the approximation of the stochastic fix point and the
+derivatives about this fix point are obtained. It is difficult to a
+priori decide whether this algorithm yields a better decision
+rule. Nothing is guaranteed, and the resulted decision rule should be
+checked with a numerical integration. See \ref{checks}.
+
+\subsection{Simulating the Decision Rule}
+
+After some form of a decision rule is calculated, it is simulated to
+obtain draws from ergodic (unconditional) distribution of endogenous
+variables. The mean and the covariance are reported. There are two
+ways how to calculate the mean and the covariance. The first one is to
+store all simulated samples and calculate the sample mean and
+covariance. The second one is to calculate mean and the covariance in
+the real-time not storing the simulated sample. The latter case is
+described below (see \ref{rt_simul}).
+
+The stored simulated samples are then used for impulse response
+function calculations. For each shock, the realized shocks in these
+simulated samples (control simulations) are taken and an impulse is
+added and the new realization of shocks is simulated. Then the control
+simulation is subtracted from the simulation with the impulse. This is
+done for all control simulations and the results are averaged. As the
+result, we get an expectation of difference between paths with impulse
+and without impulse. In addition, the sample variances are
+reported. They might be useful for confidence interval calculations.
+
+For each shock, Dynare++ calculates IRF for two impulses, positive and
+negative. Size of an impulse is one standard error of a respective
+shock.
+
+The rest of this subsection is divided to three parts giving account
+on real-time simulations, conditional simulations, and on the way how
+random numbers are generated resp.
+
+\subsubsection{Simulations With Real-Time Statistics}
+\label{rt_simul}
+
+When one needs to simulate large samples to get a good estimate of
+unconditional mean, simulating the decision rule with statistics
+calculated in real-time comes handy. The main reason is that the
+storing of all simulated samples may not fit into the available
+memory.
+
+The real-time statistics proceed as follows: We model the ergodic
+distribution as having normal distribution $y\sim N(\mu,\Sigma)$. Further,
+the parameters $\mu$ and $\Sigma$ are modelled as:
+\begin{eqnarray*}
+  \Sigma &\sim& {\rm InvWishart}_\nu(\Lambda)\\
+  \mu|\Sigma &\sim& N(\bar\mu,\Sigma/\kappa) \\ 
+\end{eqnarray*}
+This model of $p(\mu,\Sigma)$ has an advantage of conjugacy, i.e. a
+prior distribution has the same form as posterior. This property is
+used in the calculation of real-time estimates of $\mu$ and $\Sigma$,
+since it suffices to maintain only the parameters of $p(\mu,\Sigma)$
+conditional observed draws so far. The parameters are: $\nu$,
+$\Lambda$, $\kappa$, and $\bar\mu$.
+
+The mean of $\mu,\Sigma|Y$, where $Y$ are all the draws (simulated
+periods) is reported.
+
+\subsubsection{Conditional Distributions}
+\label{cond_dist}
+
+Starting with version 1.3.6, Dynare++ calculates variable
+distributions $y_t$ conditional on $y_0=\bar y$, where $\bar y$ is the
+deterministic steady state. If triggered, Dynare++ simulates a given
+number of samples with a given number of periods all starting at
+the deterministic steady state. Then for each time $t$, mean
+$E[y_t|y_0=\bar y]$ and variances $E[(y_t-E[y_t|y_0=\bar
+y])(y_t-E[y_t|y_0=\bar y])^T|y_0=\bar y]$ are reported.
+
+\subsubsection{Random Numbers}
+\label{random_numbers}
+
+For generating of the pseudo random numbers, Dynare++ uses Mersenne
+twister by Makoto Matsumoto and Takuji Nishimura. Because of the
+parallel nature of Dynare++ simulations, each simulated sample gets
+its own instance of the twister. Each such instance is seeded before
+the simulations are started. This is to prevent additional randomness
+implied by the operating system's thread scheduler to interfere with
+the pseudo random numbers.
+
+For seeding the individual instances of the Mersenne twister assigned
+to each simulated sample the system (C library) random generator is
+used. These random generators do not have usually very good
+properties, but we use them only to seed the Mersenne twister
+instances. The user can set the initial seed of the system random
+generator and in this way deterministically choose the seeds of all
+instances of the Mersenne twister.
+
+In this way, it is guaranteed that two runs of Dynare++
+with the same seed will yield the same results regardless the
+operating system's scheduler. The only difference may be caused by a
+different round-off errors committed when the same set of samples are
+summed in the different order (due to the operating system's scheduler).
+
+\subsection{Numerical Approximation Checks}
+\label{checks}
+
+Optionally, Dynare++ can run three kinds of checks for Taylor
+approximation errors. All three methods numerically calculate
+the residual of the DSGE equations
+\[
+E[f(g^{**}(g^*(y^*,u),u'),g(y^*,u),y^*,u)|y^*,u]
+\]
+which must be ideally zero for all $y^*$ and $u$. This integral is
+evaluated by either product or Smolyak rule applied to one dimensional
+Gauss--Hermite quadrature. The user does not need to care about the
+decision. An algorithm yielding higher quadrature level and less
+number of evaluations less than a user given maximum is selected.
+
+The three methods differ only by a set of $y^*$ and $u$ where the
+residuals are evaluated. These are:
+\begin{itemize}
+\item The first method calculates the residuals along the shocks for
+fixed $y^*$ equal to the fix point. We let all elements of $u$ be
+fixed at $0$ but one element, which varies from $-\mu\sigma$ to
+$\mu\sigma$, where $\sigma$ is a standard error of the element and
+$\mu$ is the user given multiplier. In this way we can see how the
+approximation error grows if the fix point is disturbed by a shock of
+varying size.
+\item The second method calculates the residuals along a simulation
+path. A random simulation is run, and at each point the residuals are
+reported.
+\item The third method calculates the errors on an ellipse of the
+state variables $y^*$. The shocks $u$ are always zero. The ellipse is
+defined as
+\[\{Ax|\; \Vert x\Vert_2=\mu\},\]
+where $\mu$ is a user given multiplier, and $AA^T=V$ for $V$ being a
+covariance of endogenous variables based on the first order
+approximation. The method calculates the residuals at low discrepancy
+sequence of points on the ellipse. Both the residuals and the points
+are reported.
+\end{itemize}
+
+\section{Optimal Policy with Dynare++}
+\label{optim}
+
+Starting with version 1.3.2, Dynare++ is able to automatically
+generate and then solve the first order conditions for a given
+objective and (possibly) forward looking constraints. Since the
+constraints can be forward looking, the use of this feature will
+mainly be in optimal policy or control.
+
+The only extra thing which needs to be added to the model file is a
+specification of the policy's objective. This is done by two keywords,
+placed not before parameter settings. If the objective is to maximize
+$$E_{t_0}\sum_{t=t_0}^\infty\beta^{t-t_0}\left[\frac{c_t^{1-\eta}}{1-\eta}+
+a\frac{g_t^{1-\phi}}{1-\phi}\right],$$
+then the keywords will be:
+{\small
+\begin{verbatim}
+planner_objective C^(1-eta)/(1-eta) + a*G^(1-phi)/(1-phi);
+
+planner_discount beta;
+\end{verbatim}
+}
+
+Dynare++ parses the file and if the two keywords are present, it
+automatically derives the first order conditions for the problem. The
+first order conditions are put to the form \eqref{focs} and solved. In
+this case, the equations in the {\tt model} section are understood as
+the constraints (they might come as the first order conditions from
+optimizations of other agents) and their number must be less than the
+number of endogenous variables.
+
+This section further describes how the optimal policy first order
+conditions look like, then discusses some issues with the initial
+guess for deterministic steady state, and finally describes how to
+simulate Ramsey policy within this framework.
+
+\subsection{First Order Conditions}
+
+Mathematically, the optimization problem looks as follows:
+\begin{align}
+\max_{\left\{y_\tau\right\}^\infty_t}&E_t
+\left[\sum_{\tau=t}^\infty\beta^{\tau-t}b(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]\notag\\
+&\rm{s.t.}\label{planner_optim}\\
+&\hskip1cm E^I_\tau\left[f(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]=0\quad\rm{for\ }
+\tau=\ldots,t-1,t,t+1,\ldots\notag
+\end{align}
+where $E^I$ is an expectation operator over an information set including,
+besides all the past, all future realizations of policy's control
+variables and distributions of future shocks $u_t\sim
+N(0,\Sigma)$. The expectation operator $E$ integrates over an
+information including only distributions of $u_t$ (besides the past).
+
+Note that the constraints $f$ take place at all times, and they are
+conditioned at the running $\tau$ since the policy knows that the
+agents at time $\tau$ will use all the information available at
+$\tau$.
+
+The maximization problem can be rewritten using Lagrange multipliers as:
+\begin{align}
+\max_{y_t}E_t&\left[\sum_{\tau=t}^\infty\beta^{\tau-t}b(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right.\notag\\
+&\left.+\sum_{\tau=-\infty}^{\infty}\beta^{\tau-t}\lambda^T_\tau E_\tau^I\left[f(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]\right],
+\label{planner_optim_l}
+\end{align}
+where $\lambda_t$ is a column vector of Lagrange multipliers.
+
+After some manipulations with compounded expectations over different
+information sets, one gets the following first order conditions:
+\begin{align}
+E_t\left[\vphantom{\frac{\int^(_)}{\int^(\_)}}\right.&\frac{\partial}{\partial y_t}b(y_{t-1},y_t,y_{t+1},u_t)+
+\beta L^{+1}\frac{\partial}{\partial y_{t-1}}b(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\beta^{-1}\lambda_{t-1}^TL^{-1}\frac{\partial}{\partial y_{t+1}}f(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\lambda_t^T\frac{\partial}{\partial y_{t}}f(y_{t-1},y_t,y_{t+1},u_t)\notag\\
+&+\beta\lambda_{t+1}^TE_{t+1}\left[L^{+1}\frac{\partial}{\partial y_{t-1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]
+\left.\vphantom{\frac{\int^(_)}{\int^(\_)}}\right]
+ = 0,\label{planner_optim_foc2}
+\end{align}
+where $L^{+1}$ is one period lead operator, and $L^{-1}$ is one period lag operator.
+
+Dynare++ takes input corresponding to \eqref{planner_optim},
+introduces the Lagrange multipliers according to
+\eqref{planner_optim_l}, and using its symbolic derivator it compiles
+\eqref{planner_optim_foc2}. The system \eqref{planner_optim_foc2} with
+the constraints from \eqref{planner_optim_l} is then solved in the
+same way as the normal input \eqref{focs}.
+
+\subsection{Initial Guess for Deterministic Steady State}
+\label{opt_init}
+
+Solving deterministic steady state of non-linear dynamic systems is
+not trivial and the first order conditions for optimal policy add
+significant complexity. The {\tt initval} section allows to input the
+initial guess of the non-linear solver. It requires that all user
+declared endogenous variables be initialized. However, in most cases,
+we have no idea what are good initial guesses for the Lagrange
+multipliers.
+
+For this reason, Dynare++ calculates an initial guess of Lagrange
+multipliers using user provided initial guesses of all other
+endogenous variables. It uses the linearity of the Lagrange
+multipliers in the \eqref{planner_optim_foc2}. In its static form,
+\eqref{planner_optim_foc2} looks as follows:
+\begin{align}
+&\frac{\partial}{\partial y_t}b(y,y,y,0)+
+\beta\frac{\partial}{\partial y_{t-1}}b(y,y,y,0)\notag\\
+&+\lambda^T\left[\beta^{-1}\frac{\partial}{\partial y_{t+1}}f(y,y,y,0)
+ +\frac{\partial}{\partial y_{t}}f(y,y,y,0)
+ +\beta\frac{\partial}{\partial y_{t-1}}f(y,y,y,0)\right]
+ = 0\label{planner_optim_static}
+\end{align}
+
+The user is required to provide an initial guess of all declared
+variables (all $y$). Then \eqref{planner_optim_static} becomes an
+overdetermined linear system in $\lambda$, which is solved by means of
+the least squares. The closer the initial guess of $y$ is to the exact
+solution, the closer are the Lagrange multipliers $\lambda$.
+
+The calculated Lagrange multipliers by the least squares are not used,
+if they are set in the {\tt initval} section. In other words, if a
+multiplier has been given a value in the {\tt initval} section, then
+the value is used, otherwise the calculated value is taken.
+
+For even more difficult problems, Dynare++ generates two Matlab files
+calculating a residual of the static system and its derivative. These
+can be used in Matlab's {\tt fsolve} or other algorithm to get an
+exact solution of the deterministic steady state. See
+\ref{output_matlab_scripts} for more details.
+
+Finally, Dynare++ might generate a few auxiliary variables. These are
+simple transformations of other variables. They are initialized
+automatically and the user usually does not need to care about it.
+
+\subsection{Optimal Ramsey Policy}
+\label{ramsey}
+
+Dynare++ solves the optimal policy problem with timeless
+perspective. This means that it assumes that the constraints in
+\eqref{planner_optim} are valid from the infinite past to infinite
+future. Dynare++ calculation of ergodic distribution then assumes that
+the policy has been taking place from infinite past.
+
+If some constraints in \eqref{planner_optim} are forward looking, this
+will result in some backward looking Lagrange multipliers. Such
+multipliers imply possibly time inconsistent policy in the states of
+the ``original'' economy, since these backward looking multipliers add
+new states to the ``optimized'' economy. In this respect, the timeless
+perspective means that there is no fixed initial distribution of such
+multipliers, instead, their ergodic distribution is taken.
+
+In contrast, Ramsey optimal policy is started at $t=0$. This means
+that the first order conditions at $t=0$ are different than the first
+order conditions at $t\geq 1$, which are
+\eqref{planner_optim_foc2}. However, it is not difficult to assert
+that the first order conditions at $t=0$ are in the form of
+\eqref{planner_optim_foc2} if all the backward looking Lagrange
+multipliers are set to zeros at period $-1$, i.e. $\lambda_{-1}=0$.
+
+All in all, the solution of \eqref{planner_optim_foc2} calculated by
+Dynare++ can be used as a Ramsey optimal policy solution provided that
+all the backward looking Lagrange multipliers were set to zeros prior
+to the first simulation period. This can be done by setting the
+initial state of a simulation path in {\tt dynare\_simul.m}. If this
+is applied on the example from \ref{optim_tut}, then we may do the
+following in the command prompt:
+{\small
+\begin{verbatim}
+>> load kp1980_2.mat
+>> shocks = zeros(1,100);
+>> ystart = dyn_ss;
+>> ystart(dyn_i_MULT3) = 0;
+>> r=dynare_simul('kp1980_2.mat',shocks,ystart);
+\end{verbatim}
+}
+This will simulate the economy if the policy was introduced in the
+beginning and no shocks happened.
+
+More information on custom simulations can be obtained by typing:
+{\small
+\begin{verbatim}
+help dynare_simul
+\end{verbatim}
+}
+
+
+\section{Running Dynare++}
+
+This section deals with Dynare++ input. The first subsection
+\ref{dynpp_opts} provides a list of command line options, next
+subsection \ref{dynpp_mod} deals with a format of Dynare++ model file,
+and the last subsection discusses incompatibilities between Dynare
+Matlab and Dynare++.
+
+\subsection{Command Line Options}
+\label{dynpp_opts}
+
+The calling syntax of the Dynare++ is
+
+{\small
+\begin{verbatim}
+dynare++ [--help] [--version] [options] <model file>
+\end{verbatim}
+}
+
+\noindent where the model file must be given as the last token and
+must include its extension. The model file may include path, in this
+case, the path is taken relative to the current directory. Note that
+the current directory can be different from the location of {\tt
+dynare++} binary.
+
+The options are as follows:
+
+\def\desc#1{\rlap{#1}\kern4cm}
+
+\begin{description}
+\item[\desc{\tt --help}] This prints a help message and exits.
+
+\item[\desc{\tt --version}] This prints a version information and
+exits.
+
+\item[\desc{\tt --per \it num}] This sets a number of simulated
+periods to {\it num}. This number is used when calculating
+unconditional mean and covariance and for IRFs. Default is 100.
+
+\item[\desc{\tt --sim \it num}] This sets a number of stochastic
+simulations. This number is used when calculating unconditional mean
+and covariance and for IRFs. The total sample size for unconditional
+mean and covariance is the number of periods times the number of
+successful simulations. Note that if a simulation results in {\tt NaN}
+or {\tt +-Inf}, then it is thrown away and is not considered for the
+mean nor the variance. The same is valid for IRF. Default is 80.
+
+\item[\desc{\tt --rtsim \it num}] This sets a number of stochastic
+simulations whose statistics are calculated in the real-time. See
+\ref{rt_simul} for more details. Default is 0, no simulations.
+
+\item[\desc{\tt --rtper \it num}] This sets a number of simulated
+periods per one simulation with real-time statistics to {\it num}. See
+\ref{rt_simul} for more details. Default is 0, no simulations.
+
+\item[\desc{\tt --condsim \it num}] This sets a number of stochastic
+conditional simulations. See \ref{cond_dist} for more details. Default
+is 0, no simulations.
+
+\item[\desc{\tt --condper \it num}] This sets a number of simulated
+periods per one conditional simulation. See \ref{cond_dist} for more
+details. Default is 0, no simulations.
+
+\item[\desc{\tt --steps \it num}] If the number {\it num} is greater
+than 0, this option invokes a multi-step algorithm (see section
+\ref{dynpp_calc}), which in the given number of steps calculates fix
+points and approximations of the decision rule for increasing
+uncertainty. Default is 0, which invokes a standard algorithm for
+approximation about deterministic steady state. For more details,
+see \ref{multistep_alg}.
+
+\item[\desc{\tt --centralize}] This option causes that the resulting
+decision rule is centralized about (in other words: expressed in the
+deviations from) the stochastic fix point. The centralized decision
+rule is mathematically equivalent but has an advantage of yielding
+less numerical errors in average than not centralized decision
+rule. By default, the rule is centralized. For more details, see
+\ref{dr_form}.
+
+\item[\desc{\tt --no-centralize}] This option causes that the
+resulting decision rule is not centralized about (in other words:
+expressed in the deviations from) the stochastic fix point. By
+default, the rule is centralized. For more details, see
+\ref{dr_form}.
+
+This option has no effect if the number of steps given by {\tt
+--steps} is greater than 0. In this case, the rule is always
+centralized.
+
+\item[\desc{\tt --prefix \it string}] This sets a common prefix of
+variables in the output MAT file. Default is {\tt dyn}.
+
+\item[\desc{\tt --seed \it num}] This sets an initial seed for the
+random generator providing seed to generators for each sample. See
+\ref{random_numbers} for more details. Default is 934098.
+
+\item[\desc{\tt --order \it num}] This sets the order of approximation
+and overrides the {\tt order} statement in the model file. There is no
+default.
+
+\item[\desc{\tt --threads \it num}] This sets a number of parallel
+threads. Complex evaluations of Faa Di Bruno formulas, simulations and
+numerical integration can be parallelized, Dynare++ exploits this
+advantage. You have to have a hardware support for this, otherwise
+there is no gain from the parallelization. As a rule of thumb, set the
+number of threads to the number of processors. An exception is a
+machine with Pentium 4 with Hyper Threading (abbreviated by HT). This
+processor can run two threads concurrently. The same applies to
+Dual-Core processors. Since these processors are present in most new
+PC desktops/laptops, the default is 2.
+
+\item[\desc{\tt --ss-tol \it float}] This sets the tolerance of the
+non-linear solver of deterministic steady state to {\it float}. It is
+in $\Vert\cdot\Vert_\infty$ norm, i.e. the algorithm is considered as
+converged when a maximum absolute residual is less than the
+tolerance. Default is $10^{-13}$.
+
+\item[\desc{\tt --check \it pPeEsS}] This selects types of residual
+checking to be performed. See section \ref{checks} for details. The
+string consisting of the letters ``pPeEsS'' governs the selection. The
+upper-case letters switch a check on, the lower-case letters
+off. ``P'' stands for checking along a simulation path, ``E'' stands
+for checking on ellipse, and finally ``S'' stands for checking along
+the shocks. It is possible to choose more than one type of check. The
+default behavior is that no checking is performed.
+
+\item[\desc{\tt --check-evals \it num}] This sets a maximum number of
+evaluations per one re\-sidual. The actual value depends on the selected
+algorithm for the integral evaluation. The algorithm can be either
+product or Smolyak quadrature and is chosen so that the actual number
+of evaluations would be minimal with maximal level of
+quadrature. Default is 1000.
+
+\item[\desc{\tt --check-num \it num}] This sets a number of checked
+points in a residual check. One input value $num$ is used for all
+three types of checks in the following way:
+\begin{itemize}
+\item For checks along the simulation, the number of simulated periods
+is $10\cdot num$
+\item For checks on ellipse, the number of points on ellipse is $10\cdot num$
+\item For checks along the shocks, the number of checked points
+corresponding to shocks from $0$ to $\mu\sigma$ (see \ref{checks}) is
+$num$.
+\end{itemize}
+Default is 10.
+
+\item[\desc{\tt --check-scale \it float}] This sets the scaling factor
+$\mu$ for checking on ellipse to $0.5\cdot float$ and scaling factor
+$\mu$ for checking along shocks to $float$. See section
+\ref{checks}. Default is 2.0.
+
+\item[\desc{\tt --no-irfs}] This suppresses IRF calculations. Default
+is to calculate IRFs for all shocks.
+
+\item[\desc{\tt --irfs}] This triggers IRF calculations. If there are
+no shock names following the {\tt --irfs} option, then IRFs for all
+shocks are calculated, otherwise see below. Default is to calculate
+IRFs for all shocks.
+
+\item[\desc{\tt --irfs \it shocklist}] This triggers IRF calculations
+only for the listed shocks. The {\it shocklist} is a space separated
+list of exogenous variables for which the IRFs will be
+calculated. Default is to calculate IRFs for all shocks.
+\end{description}
+
+The following are a few examples:
+{\small
+\begin{verbatim}
+dynare++ --sim 300 --per 50 blah.mod
+dynare++ --check PE --check-num 15 --check-evals 500 blah.dyn
+dynare++ --steps 5 --check S --check-scale 3 blahblah.mod
+\end{verbatim}
+}
+The first one sets the number of periods for IRF to 50, and sets a sample
+size for unconditional mean and covariance calculations to 6000. The
+second one checks the decision rule along a simulation path having 150
+periods and on ellipse at 150 points performing at most 500 evaluations
+per one residual. The third one solves the model in five steps and
+checks the rule along all the shocks from $-3\sigma$ to $3\sigma$ in
+$2*10+1$ steps (10 for negative, 10 for positive and 1 for at zero).
+
+\subsection{Dynare++ Model File}
+\label{dynpp_mod}
+
+In its strictest form, Dynare++ solves the following mathematical problem:
+\begin{equation}\label{basic_form}
+E_t[f(y^{**}_{t+1},y_t,y^*_{t-1},u_t)]=0
+\end{equation}
+This problem is input either directly, or it is an output of Dynare++
+routines calculating first order conditions of the optimal policy
+problem. In either case, Dynare++ performs necessary and
+mathematically correct substitutions to put the user specified problem
+to the \eqref{basic_form} form, which goes to Dynare++ solver. The
+following discusses a few timing issues:
+\begin{itemize}
+\item Endogenous variables can occur, starting from version 1.3.4, at
+times after $t+1$. If so, an equation containing such occurrence is
+broken to non-linear parts, and new equations and new auxiliary
+variables are automatically generated only for the non-linear terms
+containing the occurrence. Note that shifting such terms to time $t+1$
+may add occurrences of some other variables (involved in the terms) at
+times before $t-1$ implying addition of auxiliary variables to bring
+those variables to $t-1$.
+\item Variables declared as shocks may occur also at arbitrary
+times. If before $t$, additional endogenous variables are used to
+bring them to time $t$. If after $t$, then similar method is used as
+for endogenous variables occurring after $t+1$.
+\item There is no constraint on variables occurring at both times
+$t+1$ (or later) and $t-1$ (or earlier). Virtually, all variables can
+occur at arbitrary times.
+\item Endogenous variables can occur at times before $t-1$. If so,
+additional endogenous variables are added for all lags between the
+variable and $t-1$.
+\item Dynare++ applies the operator $E_t$ to all occurrences at time
+$t+1$. The realization of $u_t$ is included in the information set of
+$E_t$. See an explanation of Dynare++ timing on page \pageref{timing}.
+\end{itemize}
+
+The model equations are formulated in the same way as in Matlab
+Dynare. The time indexes different from $t$ are put to round
+parenthesis in this way: {\tt C(-1)}, {\tt C}, {\tt C(+1)}.
+
+The mathematical expressions can use the following functions and operators:
+\begin{itemize}
+\item binary {\tt + - * / \verb|^|}
+\item unary plus and minus minus as in {\tt a = -3;} and {\tt a = +3;} resp.
+\item unary mathematical functions: {\tt log exp sin cos tan
+sqrt}, whe\-re the logarithm has a natural base
+\item symbolic differentiation operator {\tt diff(expr,symbol)}, where
+{\tt expr} is a mathematical expression and {\tt symbol} is a unary
+symbol (a variable or a parameter); for example {\tt
+  diff(A*K(-1)\verb|^|alpha*L\verb|^|(1-alpha),K(-1))} is internally expanded as
+{\tt A*alpha*K(-1)\verb|^|(alpha-1)*L\verb|^|(1-alpha)}
+\item unary error function and complementary error function: {\tt erf}
+and {\tt erfc} defined as
+\begin{eqnarray*}
+erf(x) &= \frac{2}{\sqrt{\pi}}\int_0^x e^{-t^2}{\rm d}t\\
+erfc(x)&= \frac{2}{\sqrt{\pi}}\int_x^\infty e^{-t^2}{\rm d}t
+\end{eqnarray*}
+\end{itemize}
+
+The model file can contain user comments. Their usage can be
+understood from the following piece of the model file:
+
+{\small
+\begin{verbatim}
+P*C^(-gamma) = // line continues until semicolon
+  beta*C(+1)^(-gamma)*(P(+1)+Y(+1)); // asset price
+// choose dividend process: (un)comment what you want
+Y/Y_SS = (Y(-1)/Y_SS)^rho*exp(EPS);
+/*
+Y-Y_SS = rho*(Y(-1)-Y_SS)+EPS;
+*/
+\end{verbatim}
+}
+
+\subsection{Incompatibilities with Matlab Dynare}
+
+This section provides a list of incompatibilities between a model file
+for Dy\-na\-re++ and Matlab Dynare. These must be considered when a model
+file for Matlab Dynare is being migrated to Dynare++. The list is the
+following:
+\begin{itemize}
+\item There is no {\tt periods} keyword.
+\item The parameters cannot be lagged or leaded, I think that Dynare
+Matlab allows it, but the semantics is the same (parameter is a
+constant).
+\item There are no commands like {\tt steady}, {\tt check}, {\tt
+simul}, {\tt stoch\_simul}, etc.
+\item There are no sections like {\tt estimated\_params}, {\tt
+var\_obs}, etc.
+\item The variance-covariance matrix of endogenous shocks is given by
+{\tt vcov} matrix in Dynare++. An example follows. Starting from
+version 1.3.5, it is possible for vcov to be positive semi-definite
+matrix.
+{\small
+\begin{verbatim}
+vcov = [
+0.05 0 0 0;
+0 0.025 0 0;
+0 0 0.05 0;
+0 0 0 0.025
+];
+\end{verbatim}
+}
+
+\end{itemize}
+
+\section{Dynare++ Output}
+
+There are three output files; a data file in MAT-4 format containing
+the output data (\ref{matfile}), a journal text file containing an
+information about the Dynare++ run (\ref{journalfile}), and a dump
+file (\ref{dumpfile}). Further, Dynare++ generates two Matlab script
+files, which calculate a residual and the first derivative of the
+residual of the static system (\ref{output_matlab_scripts}). These are
+useful when calculating the deterministic steady state outside
+Dynare++.
+
+Note that all output files are created in the current directory of
+the Dynare++ process. This can be different from the directory where
+the Dynare++ binary is located and different from the directory where
+the model file is located.
+
+Before all, we need to understand what variables are automatically
+generated in Dynare++.
+
+\subsection{Auxiliary Variables}
+\label{aux_var}
+
+Besides the endogenous variables declared in {\tt var} section,
+Dynare++ might automatically add the following endogenous variables:
+
+\halign{\vrule width0pt height14pt{\tt #}\hfil & \kern 3mm%
+\vtop{\rightskip=0pt plus 5mm\noindent\hsize=9.5cm #}\cr
+MULT{\it n}& A Lagrange multiplier of the optimal policy problem
+associated with a constraint number {\it n} starting from zero.\cr
+AUX\_{\it n1}\_{\it n2}\_{\it n3}& An auxiliary variable associated
+with the last term in equation \eqref{planner_optim_foc2}. Since the
+term is under $E_{t+k}$, we need the auxiliary variable be put back
+in time. {\it n1} is a variable number starting from 0 in the declared
+order with respect to which the term was differentiated, {\it n2} is a
+number of constraint starting from 0, and finally {\it n3} is $k$
+(time shift of the term).\cr
+{\it endovar}\_p{\it K}& An auxiliary variable for bringing an
+endogenous variable {\it endovar} back in time by $K$ periods. The
+semantics of this variables is {\tt {\it endovar}\_p{\it K} = {\it
+endovar}(+{\it K})}.\cr
+{\it endovar}\_m{\it K}& An auxiliary variable for bringing an
+endogenous variable {\it endovar} forward in time by $K$ periods. The
+semantics of this variables is {\tt {\it endovar}\_m{\it K} = {\it
+endovar}(-{\it K})}.\cr
+{\it exovar}\_e& An auxiliary endogenous variable made equal to the
+exogenous variable to allow for a semantical occurrence of the
+exogenous variable at time other than $t$. The semantics of this
+variables is {\tt {\it exovar}\_e = {\it exovar}}.\cr
+AUXLD\_{\it n1}\_{\it n2}\_{\it n3}& An auxiliary variable for
+bringing a non-linear term containing an occurrence of a variable
+after $t+1$ to time $t+1$. {\it n1} is an equation number starting
+from 0, {\it n2} is the non-linear sub-term number in the equation
+starting from 0. {\it n3} is a time shift. For example, if the first
+equation is the following:
+\begin{verbatim}
+X - Y*W(+1) + W(+2)*Z(+4) = 0;
+\end{verbatim}
+then it will be expanded as:
+\begin{verbatim}
+X - Y*W(+1) + AUXLD_0_2_3(+1) = 0;
+AUXLD_0_2_1 = W(-1)*Z(+1);
+AUXLD_0_2_2 = AUXLD_0_2_1(+1);
+AUXLD_0_2_3 = AUXLD_0_2_2(+1);
+\end{verbatim}
+\cr
+}
+
+\subsection{MAT File}
+\label{matfile}
+
+The contents of the data file is depicted below. We
+assume that the prefix is {\tt dyn}.
+
+\halign{\vrule width0pt height14pt{\tt #}\hfil & \kern 3mm%
+\vtop{\rightskip=0pt plus 5mm\noindent\hsize=7.5cm #}\cr
+dyn\_nstat& Scalar. A number of static variables
+(those occurring only at time $t$).\cr
+dyn\_npred & Scalar. A number of variables occurring
+at time $t-1$ and not at $t+1$.\cr
+dyn\_nboth & Scalar. A number of variables occurring
+at $t+1$ and $t-1$.\cr
+dyn\_nforw & Scalar. A number of variables occurring
+at $t+1$ and not at $t-1$.\cr
+dyn\_vars & Column vector of endogenous variable
+names in Dy\-na\-re++ internal ordering.\cr
+dyn\_i\_{\it endovar} & Scalar. Index of a variable
+named {\it endovar} in the {\tt dyn\_vars}.\cr
+dyn\_shocks & Column vector of exogenous variable
+names.\cr
+dyn\_i\_{\it exovar} & Scalar. Index of a shock
+named {\it exovar} in the {\tt dyn\_shocks}.\cr
+dyn\_state\_vars & Column vector of state variables,
+these are stacked variables counted by {\tt dyn\_\-npred}, {\tt
+dyn\_\-nboth} and shocks.\cr
+dyn\_vcov\_exo & Matrix $nexo\times nexo$. The
+variance-covariance matrix of exogenous shocks as input in the model
+file. The ordering is given by {\tt dyn\_shocks}.\cr
+dyn\_mean & Column vector $nendo\times 1$. The
+unconditional mean of endogenous variables. The ordering is given by
+{\tt dyn\_vars}.\cr
+dyn\_vcov & Matrix $nendo\times nendo$. The
+unconditional covariance of endogenous variables. The ordering is given
+by {\tt dyn\_vars}.\cr
+dyn\_rt\_mean & Column vector $nendo\times 1$. The unconditional mean
+of endogenous variables estimated in real-time. See
+\ref{rt_simul}. The ordering is given by {\tt dyn\_vars}.\cr
+dyn\_rt\_vcov & Matrix $nendo\times nendo$. The unconditional
+covariance of endogenous variables estimated in real-time. See \ref{rt_simul}. The
+ordering is given by {\tt dyn\_vars}.\cr
+dyn\_cond\_mean & Matrix $nendo\times nper$. The rows correspond to
+endogenous variables in the ordering of {\tt dyn\_vars}, the columns
+to periods. If $t$ is a period (starting with 1), then $t$-th column
+is $E[y_t|y_0=\bar y]$. See \ref{cond_dist}.\cr
+dyn\_cond\_variance & Matrix $nendo\times nper$. The rows correspond
+to endogenous variables in the ordering of {\tt dyn\_vars}, the
+columns to periods. If $t$ is a period (starting with 1), then $t$-th
+column are the variances of $y_t|y_0=\bar y$. See \ref{cond_dist}.\cr
+dyn\_ss & Column vector $nendo\times 1$. The fix
+point of the resulting approximation of the decision rule.\cr
+dyn\_g\_{\it order} & Matrix $nendo\times ?$. A
+derivative of the decision rule of the {\it order} multiplied by
+$1/order!$. The rows correspond to endogenous variables in the
+ordering of {\tt dyn\_vars}. The columns correspond to a
+multidimensional index going through {\tt dyn\_state\_vars}. The data
+is folded (all symmetrical derivatives are stored only once).\cr
+dyn\_steady\_states & Matrix $nendo\times
+nsteps+1$. A list of fix points at which the multi-step algorithm
+calculated approximations. The rows correspond to endogenous variables
+and are ordered by {\tt dyn\_vars}, the columns correspond to the
+steps. The first column is always the deterministic steady state.\cr
+dyn\_irfp\_{\it exovar}\_mean & Matrix
+$nendo\times nper$. Positive impulse response to a shock named {\it
+exovar}. The row ordering is given by {\tt dyn\_vars}. The columns
+correspond to periods.\cr
+dyn\_irfp\_{\it exovar}\_var & Matrix
+$nendo\times nper$. The variances of positive impulse response
+functions.\cr
+dyn\_irfm\_{\it exovar}\_mean & Same as {\tt
+dyn\_irfp\_}{\it exovar}{\tt \_mean} but for negative impulse.\cr
+dyn\_irfp\_{\it exovar}\_var & Same as {\tt
+dyn\_irfp\_}{\it exovar}{\tt \_var} but for negative impulse.\cr
+dyn\_simul\_points & A simulation path along which the check was
+done. Rows correspond to endogenous variables, columns to
+periods. Appears only if {\tt --check P}.\cr
+dyn\_simul\_errors & Errors along {\tt
+dyn\_simul\_points}. The rows correspond to equations as stated in the
+model file, the columns to the periods. Appears only if {\tt --check
+P}\cr
+dyn\_ellipse\_points & A set of points on the ellipse at which the
+approximation was checked. Rows correspond to state endogenous
+variables (the upper part of {\tt dyn\_state\_vars}, this means
+without shocks), and columns correspond to periods. Appears only if
+{\tt --check E}.\cr
+dyn\_ellipse\_errors & Errors on the ellipse points {\tt
+dyn\_ellipse\_points}. The rows correspond to the equations as stated
+in the model file, columns to periods. Appears only if {\tt --check
+E}.\cr
+dyn\_shock\_{\it exovar}\_errors& Errors along a shock named {\it
+exovar}. The rows correspond to the equations as stated in the model
+file. There are $2m+1$ columns, the middle column is the error at zero
+shock. The columns to the left correspond to negative values, columns
+to the right to positive. Appears only if {\tt --check S}.\cr
+}
+
+\subsection{Journal File}
+\label{journalfile}
+
+The journal file provides information on resources usage during the
+run and gives some informative messages. The journal file is a text
+file, it is organized in single line records. The format of records is
+documented in a header of the journal file.
+
+The journal file should be consulted in the following circumstances:
+\begin{itemize}
+\item Something goes wrong. For example, if a model is not
+Blanchard--Kahn stable, then the eigenvalues are dumped to the journal
+file.
+
+If the unconditional covariance matrix {\tt dyn\_vcov} is NaN, then
+from the journal file you will know that all the simulations had to be
+thrown away due to occurrence of NaN or Inf. This is caused by
+non-stationarity of the resulting decision rule.
+
+If Dynare++ crashes, the journal file can be helpful for guessing a
+point where it crashed.
+
+\item You are impatient. You might be looking at the journal file
+during the run in order to have a better estimate about the time when
+the calculations are finished. In Unix, I use a command {\tt tail -f
+blah.jnl}.\footnote{This helps to develop one of the three
+programmer's virtues: {\it impatience}. The other two are {\it
+laziness} and {\it hubris}; according to Larry Wall.}
+
+\item Heavy swapping. If the physical memory is not
+sufficient, an operating system starts swapping memory pages with a
+disk. If this is the case, the journal file can be consulted for
+information on memory consumption and swapping activity.
+
+\item Not sure what Dynare++ is doing. If so, read the journal file,
+which contains a detailed record on what was calculated, simulated
+etc.
+\end{itemize}
+
+\subsection{Dump File}
+\label{dumpfile}
+
+The dump file is always created with the suffix {\tt .dump}. It is a
+text file which takes a form of a model file. It sets the parameter
+values which were used, it has the initval section setting the values
+which were finally used, and mainly it has a model section of all
+equations with all substitutions and formed the first order conditions
+of the planner.
+
+The dump file serves for debugging purposes, since it contains the
+mathematical problem which is being solved by dynare++.
+
+\subsection{Matlab Scripts for Steady State Calculations}
+\label{output_matlab_scripts}
+
+This section describes two Matlab scripts, which are useful when
+calculating the deterministic steady state outside Dynare++. The
+scripts are created by Dynare++ as soon as an input file is parsed,
+that is before any calculations.
+
+The first Matlab script having a name {\tt {\it modname}\_f.m} for
+given parameters values and given all endogenous variables $y$
+calculates a residual of the static system. Supposing the model is in
+the form of \eqref{focs}, the script calculates a vector:
+\[
+f(y,y,y,0)
+\]
+
+The second script having a name {\tt {\it modname}\_ff.m} calculates a matrix:
+\[
+\frac{\partial}{\partial y}f(y,y,y,0)
+\]
+
+Both scripts take two arguments. The first is a vector of parameter
+values ordered in the same ordering as declared in the model file. The
+second is a vector of all endogenous variables at which the evaluation
+is performed. These endogenous variables also include auxiliary
+variables automatically added by Dynare++ and Lagrange multipliers if
+an optimal policy problem is solved. If no endogenous variable has not
+been added by Dynare++, then the ordering is the same as the ordering
+in declaration in the model file. If some endogenous variables have
+been added, then the ordering can be read from comments close to the
+top of either two files.
+
+For example, if we want to calculate the deterministic steady state of
+the {\tt kp1980.dyn} model, we need to do the following:
+\begin{enumerate}
+\item Run Dynare++ with {\tt kp1980.dyn}, no matter if the calculation
+has not been finished, important output are the two Matlab scripts
+created just in the beginning.
+\item Consult file {\tt kp1980\_f.m}\ to get the ordering of parameters
+and all endogenous variables.
+\item Create a vector {\tt p} with the parameter values in the ordering
+\item Create a vector {\tt init\_y} with the initial guess for the
+Matlab solver {\tt fsolve}
+\item Create a simple Matlab function called {\tt kp1980\_fsolve.m}\ 
+returning the residual and Jacobian:
+{\small
+\begin{verbatim}
+function [r, J] = kp1980_fsolve(p, y)
+  r = kp1980_f(p, y);
+  J = kp1980_ff(p, y);
+\end{verbatim}
+}
+\item In the Matlab prompt, run the following:
+{\small
+\begin{verbatim}
+opt=optimset('Jacobian','on','Display','iter');
+y=fsolve(@(y) kp1980_fsolve(p,y), init_y, opt);
+\end{verbatim}
+}
+\end{enumerate}
+ 
+
+\subsection{Custom Simulations}
+\label{custom}
+
+When Dynare++ run is finished it dumps the derivatives of the
+calculated decision rule to the MAT file. The derivatives can be used
+for a construction of the decision rule and custom simulations can be
+run. This is done by {\tt dynare\_simul.m} M-file in Matlab. It reads
+the derivatives and simulates the decision rule with provided
+realization of shocks.
+
+All the necessary documentation can be viewed by the command:
+{\small
+\begin{verbatim}
+help dynare_simul
+\end{verbatim}
+}
+
+\end{document}
diff --git a/dynare++/extern/R/Makefile b/dynare++/extern/R/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..4501ebdb56cf0cab337bb10f2b27fb482f8e16eb
--- /dev/null
+++ b/dynare++/extern/R/Makefile
@@ -0,0 +1,54 @@
+RINTERNALS=/usr/share/R/include/
+
+sylvcppsource := $(wildcard ../../sylv/cc/*.cpp)
+sylvhsource := $(wildcard ../../sylv/cc/*.h)
+sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
+
+tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
+tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
+
+kordcwebsource := $(wildcard ../../kord/*.cweb)
+kordcppsource := $(patsubst %.cweb,%.cpp,$(kordcwebsource))
+kordhwebsource := $(wildcard ../../kord/*.hweb)
+kordhsource := $(patsubst %.hweb,%.h,$(kordhwebsource))
+kordobjects := $(patsubst %.cweb,%.o,$(kordcwebsource))
+
+integcwebsource := $(wildcard ../../integ/cc/*.cweb)
+integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
+integhwebsource := $(wildcard ../../integ/cc/*.hweb)
+integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
+integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
+
+parserhsource := $(wildcard ../../parser/cc/*.h)
+parsercppsource := $(wildcard ../parser/cc/*.cpp)
+
+utilshsource := $(wildcard ../../utils/cc/*.h)
+utilscppsource := $(wildcard ../utils/cc/*.cpp)
+
+srccpp := dynare3.cpp dynare_model.cpp planner_builder.cpp dynare_atoms.cpp dynare_params.cpp  nlsolve.cpp
+objects := $(patsubst %.cpp,../../src/%.o,$(srccpp)) \
+$(patsubst %.y,%_ll.o,$(wildcard ../../src/*.y)) \
+$(patsubst %.lex,%_tab.o,$(wildcard ../../src/*.lex))
+
+PKG_CPPFLAGS= -I../../tl/cc -I../../sylv/cc -I../../kord -I../../src -I../.. -I$(RINTERNALS)
+PKG_LIBS= ${LAPACK_LIBS} ${BLAS_LIBS} $(objects) $(kordobjects) $(integobjects) $(tlobjects) ../../parser/cc/parser.a ../../utils/cc/utils.a $(sylvobjects) -lpthread -llapack -lcblas -lf77blas -latlas -lg2c -lstdc++
+
+ifneq ($(LD_LIBRARY_PATH),)	# use LD_LIBRARY_PATH from environment
+	PKG_LIBS := -Wl,--library-path $(LD_LIBRARY_PATH) $(PKG_LIBS)
+endif
+
+dynareR.so: dynareR.o
+	g++ -shared  -o dynareR.so dynareR.o -L/usr/lib/R/lib -lR $(PKG_LIBS)
+
+dynareR.o: dynareR.cpp
+	g++ -I/usr/share/R/include -I/usr/share/R/include $(PKG_CPPFLAGS) \
+	-fpic  -g -O2 -c dynareR.cpp -o dynareR.o -DDEBUG
+
+test: test.cpp dynareR.cpp
+	g++ -O0 -g -o test test.cpp -DDEBUG $(PKG_LIBS) $(PKG_CPPFLAGS)
+
+test-debug:
+	valgrind --leak-check=yes ./test
diff --git a/dynare++/extern/R/README b/dynare++/extern/R/README
new file mode 100644
index 0000000000000000000000000000000000000000..c42c043b83199be36ebe0d808a4bea696755bd37
--- /dev/null
+++ b/dynare++/extern/R/README
@@ -0,0 +1,17 @@
+COMPILING
+
+The makefile for this interface is still preliminary, I will write a decent
+one when I have the time.  It needs all the compiled files from dynare++,
+but doesn't know how to make them.  So first you need to run make in the
+src/ directory, then run make in extern/R.
+
+You need Rinternals.h to make this file.  If you are not using prepackaged R
+on Unix/Linux, you need to modify the variable RINCLUDE in the Makefile
+accordingly.
+
+To compile dynare++, read doc/compiling-notes.txt.
+
+INSTALLATION
+
+Copy the dynareR.r and dynareR.so files to your working directory so that R
+can find them.
diff --git a/dynare++/extern/R/dynareR.cpp b/dynare++/extern/R/dynareR.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..727fe9967c2ad3672fab9c85d607478e7c86f11d
--- /dev/null
+++ b/dynare++/extern/R/dynareR.cpp
@@ -0,0 +1,249 @@
+// $Id: dynareR.cpp 862 2006-08-04 17:34:56Z tamas $
+
+// Copyright 2006, Tamas K Papp
+
+#include "dynare3.h"			// Dynare class
+#include "approximation.h"		// Approximation class
+
+// exceptions
+#include "dynare_exception.h"
+#include "parser/cc/parser_exception.h"
+#include "utils/cc/exception.h"
+#include "SylvException.h"
+#include "tl_exception.h"
+#include "kord_exception.h"
+
+#include <algorithm>
+
+#include <string.h>
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+#include <R_ext/Memory.h>
+
+/** This file containt the C glue functions for an R interface to
+ * Dynare++.  Although written in standard C (except for the use of
+ * R_alloc), the indexing, calling and memory management conventions
+ * of the functions in this file were tailored for R.
+ *
+ * It is not recommended that you use this interface for anything else
+ * but R.
+ */ 
+
+/** Error codes: these error codes correspond to possible
+ * exceptions. */
+#define DYNARER_SYLVEXCEPTION 1
+#define DYNARER_DYNAREEXCEPTION 2
+#define DYNARER_OGUEXCEPTION 3
+#define DYNARER_TLEXCEPTION 4
+#define DYNARER_KORDEXCEPTION 5
+#define DYNARER_NAMESMATCHINGERROR 6
+
+/** Copies the message into a buffer.  The buffer is allocated and
+ * managed by R, ie it will be garbage collected after the .C call
+ * returns and the contents are duplicated.
+ */
+char *passmessage(const char *errormessage) {
+	long l = strlen(errormessage);
+	char *em = R_alloc(l, 1);
+	return strcpy(em, errormessage);
+}
+
+/** This function puts the mapping between the newtotal items after
+ * nl[offset] and the items in orig into the buffer perm, which has to
+ * be at least as long as newtotal.  The function uses R indexing,
+ * that is to say, the first index is 1.
+ */
+int matchnames(const char **orig, int origtotal, 
+			   const NameList &nl, int offset, int newtotal,
+			   int *perm) {
+#ifdef DEBUG
+	printf("matching names (R indexing):\n");
+#endif
+	for (int i=0; i < newtotal; i++) {
+		int j;
+		for (j=0; j < origtotal; j++)
+			if (strcmp(nl.getName(offset+i), *(orig+j))==0) {
+				*(perm+i) = j+1;
+#ifdef DEBUG
+				printf("%d -> %d\n",i+1,j+1);
+#endif
+				break;
+			}
+		if (j==origtotal)
+			return 1;
+	}
+	return 0;
+}
+
+/** dynareR is the interface function.  The user provides:
+ * - a list of endogenous and exogenous variables, a list of
+ *   parameters (and the length of each list)
+ * - the model equations (modeleq, pointer to a 0-terminated string)
+ * - the order of expansion (ord)
+ * - journal file name (jnlfile, can be "/dev/null" for no journal)
+ * - values for the parametes (parval)
+ * - variance-covariance matrix (vcov, stacked by columns, R does
+ *   this)
+ * - initial values for finding the steady state (initval)
+ * - and the number of steps for the approximation algorithm
+ *   (numsteps)
+ *
+ * If successful, the interface will write the results to these
+ * buffers:
+ * - tensorbuffer for the steady state and the flattened tensors
+ * - num_state for the number of endogenous variables that ended up in
+ *   the state
+ * - mappings to variable names (ordering_state, ordering_endo,
+ *   ordering_exo), indices start from 1
+ * - the deterministic steady state (newinitval)
+ *
+ * If dynare throws an exception, the interface tries to catch it and
+ * return an error code (error), and error message (errormessage), and
+ * if applicable, information on the stability of the model
+ * (kordcode).  errormessage is allocated into R's memory, and will be
+ * collected after duplication.
+ */
+extern "C" {
+	void dynareR(const char** endo, const int* num_endo,
+				 const char** exo, const int* num_exo,
+				 const char** par, const int* num_par,
+				 const char** equations, const int* ord, const char* jnlfile,
+				 const double *parval, const double *vcov, 
+				 const double *initval,
+				 const int *num_steps,
+				 double* tensorbuffer,
+				 int *num_state, int *ordering_state,
+				 int *ordering_endo, int *ordering_exo,
+				 double *newinitval,
+				 int* error, char **errormessage, int *kordcode) {
+		// construct the model here
+		try {	
+#ifdef DEBUG					// will print only first var names etc.
+			printf("eq: %s\nendo: %d %s\nexo: %d %s\npar: %d %s\nord: %d\n",
+				   *equations,*num_endo,*endo,*num_exo,*exo,*num_par,*par,*ord);
+#endif
+			// create journal
+			Journal journal(jnlfile);
+			// create Dynare object
+			Dynare dynare(endo, *num_endo, exo, *num_exo,
+						  par, *num_par, *equations, strlen(*equations),
+						  *ord, journal);
+			// set Vcov and parameter values
+			copy(parval,parval+(*num_par),dynare.getParams().base());
+#ifdef DEBUG
+			printf("parameter values (%d):\n",dynare.getParams().length());
+			dynare.getParams().print();
+#endif
+			copy(vcov,vcov+(*num_exo)*(*num_exo),dynare.getVcov().base());
+#ifdef DEBUG
+			printf("vcov matrix:\n");
+			dynare.getVcov().print();
+#endif
+			// set initial values
+			Vector iv(initval,*num_endo);
+#ifdef DEBUG
+			printf("initial values:\n");
+			iv.print();
+#endif
+			dynare.setInitOuter(iv);
+			// construct approximation
+			tls.init(dynare.order(),
+					 dynare.nstat()+2*dynare.npred()+3*dynare.nboth()+
+					 2*dynare.nforw()+dynare.nexog());
+			Approximation approximation(dynare,journal,*num_steps);
+			approximation.walkStochSteady();
+			// write the steady state into the buffer
+			int ny = dynare.ny();
+			const Vector ss(dynare.getSteady());
+//			ss = ConstVector(approximation.getSS(), 0); // FIXME allow
+//														// for nonzero
+			int s = dynare.getStateNames().getNum();
+			int sm = s;
+			tensorbuffer = copy(ss.base(),ss.base()+ny,tensorbuffer);
+			// write the tensors into buffer
+			const UnfoldDecisionRule& udr = 
+				approximation.getUnfoldDecisionRule();
+			for (int i=1; i <= *ord; i++) {
+				const UFSTensor* t = udr.get(Symmetry(i));
+#ifdef DEBUG
+				printf("tensor %d:\n", i);
+				t->print();
+#endif
+				tensorbuffer = copy(t->base(), t->base()+ny*sm, tensorbuffer);
+				sm *= s;
+				}
+			// save number of endogenous states
+			*num_state = s-(*num_exo);
+			// ordering
+#ifdef DEBUG
+			printf("all endo names:\n");
+			dynare.getAllEndoNames().print();
+			printf("all state names:\n");
+			dynare.getStateNames().print();
+#endif
+			if (matchnames(endo, *num_endo, dynare.getAllEndoNames(),
+						   0, *num_endo, ordering_endo) ||
+				matchnames(endo, *num_endo, dynare.getStateNames(),
+						   0, *num_state, ordering_state) ||
+				matchnames(exo, *num_exo, dynare.getStateNames(),
+						   *num_state, *num_exo, ordering_exo)) {
+				*error = DYNARER_NAMESMATCHINGERROR;
+				*errormessage = "There was a problem when matching names.  This is weird and should not happen.";
+				return;
+			}
+			// return new init values (first column of SS matrix)
+			ConstVector newinit((const GeneralMatrix&) approximation.getSS(), 0);
+#ifdef DEBUG
+			printf("new initial values:\n");
+			newinit.print();
+#endif
+			copy(newinit.base(),newinit.base()+(*num_endo),newinitval);
+		} catch (const SylvException &e) {
+			*error = DYNARER_SYLVEXCEPTION;
+			char errorbuffer[501];
+			e.printMessage(errorbuffer, 500);
+			*errormessage = passmessage(errorbuffer);
+#ifdef DEBUG
+			printf("Caught Sylv exception: ");
+			e.printMessage();
+#endif
+			return;
+		} catch (const DynareException &e) {
+			*error = DYNARER_DYNAREEXCEPTION;
+			*errormessage = passmessage(e.message());
+#ifdef DEBUG
+			printf("Caught Dynare exception: %s\n", e.message());
+#endif
+			return;
+		}  catch (const ogu::Exception &e) {
+			*error = DYNARER_OGUEXCEPTION;
+			*errormessage = passmessage(e.message());
+#ifdef DEBUG
+			printf("Caught ogu::Exception: ");
+			e.print();
+#endif
+			return;
+		} catch (const TLException &e) {
+			*error = DYNARER_TLEXCEPTION;
+			*errormessage = passmessage(e.getmessage());
+#ifdef DEBUG
+			printf("Caugth TL exception: ");
+			e.print();
+#endif
+			return;
+		} catch (const KordException &e) {
+			*error = DYNARER_KORDEXCEPTION;
+			*errormessage = passmessage(e.getmessage());
+			*kordcode = e.code(); // Kord error code
+#ifdef DEBUG
+			printf("Caugth Kord exception: ");
+			e.print();
+#endif
+			return;
+		}
+		*error = 0;
+		return;}
+}
diff --git a/dynare++/extern/R/dynareR.r b/dynare++/extern/R/dynareR.r
new file mode 100644
index 0000000000000000000000000000000000000000..b7fb75cdf06d562bbffbf71e04a7c74a7dd9c236
--- /dev/null
+++ b/dynare++/extern/R/dynareR.r
@@ -0,0 +1,103 @@
+## $Id: dynareR.r 862 2006-08-04 17:34:56Z tamas $
+
+## Copyright 2006, Tamas K Papp
+
+dyn.load("dynareR.so")                  # FIXME: make it platform-independent
+
+## FIXME hide auxiliary functions in a namespace
+
+dynareR.indextensor <- function(ord, nume, nums) {
+  nume*((nums^ord-1)/(nums-1))
+}
+
+dynareR.extracttensor <- function(tensor, ord, nume, nums) {
+  aperm(array(tensor[dynareR.indextensor(ord,nume,nums)+(1:(nume*nums^ord))],
+              c(nume,rep(nums,ord))),(ord+1):1)
+}
+
+dynareR.errormessages <- c("Sylvester exception",
+                           "Dynare exception",
+                           "OGU exception",
+                           "Tensor library exception",
+                           "K-order expansion library exception",
+                           "Error matching names")
+
+calldynare <- function(modeleq, endo, exo, parameters, expandorder,
+                       parval, vcovmatrix, initval=rep(1,length(endo)),
+                       numsteps=0, jnlfile="/dev/null") {
+  ## check type of parameters
+  local({
+    is.charvector <- function(cv) { is.character(cv) && is.vector(cv) }
+    stopifnot(is.charvector(modeleq) && is.charvector(endo) &&
+              is.charvector(exo) && is.charvector(parameters) &&
+              is.charvector(jnlfile))
+  })
+  stopifnot(is.numeric(expandorder) && is.vector(expandorder) &&
+            (length(expandorder) == 1) && (expandorder >= 0))
+  stopifnot(length(jnlfile) == 1)
+  local({                               # variable names
+    checkvarname <- function(v) {
+      stopifnot(length(grep("[^a-zA-Z].*",v)) == 0) # look for strange chars 
+    }
+    checkvarname(endo)
+    checkvarname(exo)
+    checkvarname(parameters)
+  })
+  stopifnot(is.vector(parval) && is.numeric(parval))
+  stopifnot(is.vector(initval) && is.numeric(initval))
+  stopifnot(is.matrix(vcovmatrix) && is.numeric(vcovmatrix))
+  stopifnot(is.numeric(numsteps) && is.vector(numsteps) &&
+            (length(numsteps)==1))
+  ## append semicolons to model equations if necessary
+  modeleq <- sapply(modeleq, function(s) {
+    if (length(grep("^.*; *$",s))==1)
+      s
+    else
+      sprintf("%s;",s)
+  })
+  ## then concatenate into a single string
+  modeleq <- paste(modeleq, collapse=" ")
+  ## call dynareR
+  nume <- length(endo)
+  maxs <- length(endo)+length(exo)
+  dr <- .C("dynareR",
+           endo,as.integer(nume),
+           exo,as.integer(length(exo)),
+           parameters,as.integer(length(parameters)),
+           modeleq,as.integer(expandorder),jnlfile,
+           as.double(parval),as.double(vcovmatrix),
+           as.double(initval),
+           as.integer(numsteps),
+           tensorbuffer=double(dynareR.indextensor(expandorder+1,nume,maxs)),
+           numstate=integer(1), orderstate=integer(maxs),
+           orderendo=integer(nume),
+           orderexo=integer(length(exo)),
+           newinitval=double(nume),
+           error=integer(1),
+           errormessage=character(1),
+           kordcode=integer(1))
+  ## check for errors
+  kordcode <- 0
+  if (dr$error == 0) {
+    if (dr$error == 5) {
+      list(kordcode=dr$kordcode - 251)  # magic dynare++ constant
+    } else {
+      ## return result
+      with(dr, {
+        nums <- numstate+length(exo)
+        list(ss=dynareR.extracttensor(dr$tensorbuffer,0,nume,nums), # ss
+             rule=sapply(1:expandorder,function (o) { # decision rule
+               dynareR.extracttensor(dr$tensorbuffer,o,nume,nums)
+             }),                            
+             orderstate=orderstate[1:numstate], # state ordering
+             orderendo=orderendo,           # endog. ordering
+             orderexo=orderexo,             # exog. ordering
+             newinitval=newinitval,         # new init values
+             kordcode=0)
+      })
+    }
+  } else {
+    stop(sprintf("%s (\"%s\")",dynareR.errormessages[dr$error],
+                 dr$errormessage))
+  }
+}
diff --git a/dynare++/extern/R/dynareR.tex b/dynare++/extern/R/dynareR.tex
new file mode 100644
index 0000000000000000000000000000000000000000..b688f1a62c887004eae3e8e48c8e50a0301bf806
--- /dev/null
+++ b/dynare++/extern/R/dynareR.tex
@@ -0,0 +1,201 @@
+%% $Id: dynareR.tex 863 2006-08-04 17:35:21Z tamas $
+%% Copyright Tamas K Papp, 2006
+%% should compile with any reasonable TeX distribution, I am using tetex
+\documentclass[12pt,a4paper]{article}
+
+\usepackage{amsmath}
+\usepackage{amsfonts}
+%\usepackage[letterpaper,vmargin=1.7in]{geometry}
+%\usepackage[letterpaper,left=2cm,right=8cm,bottom=3cm,top=3cm,marginparwidth=4cm]{geometry}
+%\usepackage{natbib}
+\usepackage{graphicx}
+\usepackage{url}
+\usepackage{natbib}
+\usepackage{color}
+\usepackage{paralist}           % compactitem
+\DeclareMathOperator{\Var}{Var}
+\DeclareMathOperator{\Cov}{Cov}
+\DeclareMathOperator{\argmin}{argmin}
+\DeclareMathOperator{\argmax}{argmax}
+\DeclareMathSymbol{\ueps}{\mathord}{letters}{"0F} % ugly epsilon
+\renewcommand{\epsilon}{\varepsilon}
+\newcommand{\aseq}{\overset{as}=} % almost surely equals
+
+\usepackage{fancyhdr}
+\pagestyle{fancy}
+\lhead{Tam\'as K Papp} \chead{} \rhead{DynareR}
+\cfoot{\thepage}
+
+\renewcommand\floatpagefraction{.9}
+\renewcommand\topfraction{.9}
+\renewcommand\bottomfraction{.9}
+\renewcommand\textfraction{.1}
+
+\usepackage{listings}
+\lstset{
+  language=R,
+  extendedchars=true,
+  basicstyle=\footnotesize,
+  stringstyle=\ttfamily,
+  commentstyle=\slshape,
+%  numbers=left,
+%  stepnumber=5,
+%  numbersep=6pt,
+%  numberstyle=\footnotesize,
+  breaklines=true,
+  frame=single,
+  columns=fullflexible,
+}
+
+\begin{document}
+
+\title{DynareR}
+\author{Tam\'as K Papp (\url{tpapp@princeton.edu})}
+\date{\today}
+\maketitle
+
+DynareR is an R interface for Ondra Kamen\'ik's Dynare++ program.  The
+interface is still under development, and the functions might change.
+However, I thought that some documentation would help to get users
+started.
+
+The purpose of DynareR is to return the transition rule (the
+steady state and a list of tensors) for a given model.  DynareR
+does not simulate, and currently does no checking of the
+approximation.  Primarily, the interface is to be intended to be used
+in Bayesian estimation of DSGE models (via MCMC).
+
+Before you read on, make sure that
+\begin{compactitem}
+  \item you understand what Dynare++ is and how it works,
+  \item you have compiled Dynare++ and DynareR (see \verb!README! in
+    \verb!extern/R!), and placed \verb!dynareR.so! and
+    \verb!dynareR.r! in your load path for R.
+\end{compactitem}
+
+The function that performs all the work is called
+\lstinline{calldynare}.  Its is defined like this:
+\begin{lstlisting}
+  calldynare <- function(modeleq, endo, exo, parameters, expandorder,
+                       parval, vcovmatrix, initval=rep(1,length(endo)),
+                       numsteps=0, jnlfile="/dev/null") {
+                         ...
+                       }
+\end{lstlisting}
+\lstinline{modeleq} is a character vector for the model equations, and
+it may have a length longer than one.  First, \lstinline{calldynare}
+checks if each string in the vector has a terminating semicolon (may
+be followed by whitespace), if it doesn't, then it appends one.  Then
+it concatenates all equations into a single string.  Thus, the
+following versions of \lstinline{modeleq} give equivalent results:
+\begin{lstlisting}
+  modeleq1 <- c("(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1",
+              "a=rho*a(-1)+eps",
+              "k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1)")
+  modeleq2 <- c("(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1;",
+              "a=rho*a(-1)+eps ;    ",
+              "k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1)  \t;\t  ")
+  modeleq3 <- paste(modeleq1, collapse=" ")
+\end{lstlisting}
+The next three arguments name the endo- and exogenous variables and
+the parameters.  The names should be character vectors, for example,
+\begin{lstlisting}
+  parameters <- c("beta","gamma","rho","alpha","delta")
+  varendo <- c("k","c","a")
+  varexo <- "eps"
+\end{lstlisting}
+\lstinline{calldynare} also needs the order of the approximation
+\lstinline{expandorder} (a nonnegative integer), the parameter values
+\lstinline{parval} (should be the same length as
+\lstinline{parameters}), a variance-covariance matrix \lstinline{vcov}
+(dimensions should match the length of \lstinline{exo}) and initial
+values for finding the deterministic steady state
+(\lstinline{initval}).  If you don't provide initial values,
+\lstinline{calldynare} will use a sequence of $1$s, on the assumption
+that most variables in economics are positive --- you should always
+try to provide a reasonable initial guess for the nonlinear solver if
+possible (if you are doing MCMC, chances are that you only have to do
+it once, see \lstinline{newinitval} below).
+
+You can also provide the number of steps for calculating the
+stochastic steady state (\lstinline{numsteps}, the default is zero,
+see the dynare++ tutorial for more information) and the name of the
+journal file \lstinline{jnlfile}.  If you don't provide a journal
+file, the default is \verb!/dev/null!.
+
+Below, you see an example of using dynareR.
+\lstinputlisting{test.r}
+
+\lstinline{calldynare} returns the results in a list, variables below
+refer to elements of this list.  First, you should always check
+\lstinline{kordcode}, which tells whether dynare++ could calculate an
+approximation.  It can have the following values:
+\begin{description}
+\item[0] the calculation was successful
+\item[1] the system is not stable (Blanchard-Kahn)
+\item[2] failed to calculate fixed point (infinite values)
+\item[3] failed to calculate fixed point (NaN values)
+\end{description}
+If \lstinline{kordcode} is nonzero, then the list has only this
+element.
+
+If \lstinline{kordcode} equals zero, then the list has the following
+elements:
+\begin{description}
+\item[ss] the steady state (ordered by \lstinline{orderendo}), which
+  is a vector
+\item[rule] the transition rule (ordered by \lstinline{orderendo},
+  \lstinline{orderstate} and \lstinline{orderexo}), a list of arrays
+\item[newinitval] the deterministic steady state, you can use this to
+  initialize the nonlinear solver for a nearby point in the parameter
+  space (ordered by \lstinline{orderendo})
+\item[orderstate] the index of endogenous variables that ended up in
+  the state
+\item[orderendo] the ordering of endogenous variables
+\item[orderexo] the ordering of exogenous variables
+\item[kordcode] discussed above
+\end{description}
+
+An example will illustrate the ordering.  To continue the example above,
+\begin{lstlisting}
+> dd$orderstate
+[1] 1 3
+> dd$orderendo
+[1] 1 3 2
+> dd$orderexo
+[1] 1
+> dd$rule[[1]]
+          [,1] [,2]       [,3]
+[1,] 0.9669374  0.0 0.02071077
+[2,] 2.4230073  0.9 0.45309125
+[3,] 2.6922303  1.0 0.50343473
+\end{lstlisting}
+Recall that the original ordering of endogenous variables was
+\lstinline{k, c, a}.  The vectors and matrices of the result are
+ordered as \lstinline{varendo[dd$orderendo]}, that is, as
+\lstinline{k, a, c}.  This is the ordering for the steady state and
+the first dimension of the tensors in \lstinline{rule}.  The other
+dimensions are ordered as
+\lstinline{c(varendo[dd$orderstate],varexo[dd$orderexo])}, that is to
+say, as \lstinline{k, a, eps}.  Use these orderings when calculating
+with the tensors and the steady state.  Also, remember that the $i$th
+tensor is already divided by $i!$.
+
+\lstinline{calldynare} also handles exceptions from dynare.  All
+exceptions (except KordException, which sets \lstinline{kordcode})
+generate an error in the R interface.  Normally, when solving a
+well-formed model (no typos in the equations, etc), users should not
+encounter these exceptions.  Having a journal file is useful for
+debugging.  If you are making long calculations, it is reasonable to
+catch errors with \lstinline{try} so that they won't abort the
+calculation.
+
+% \bibliographystyle{apalike}
+% \bibliography{/home/tpapp/doc/general.bib}
+
+\end{document}
+
+%%% Local Variables: 
+%%% mode: latex
+%%% TeX-master: t
+%%% End: 
diff --git a/dynare++/extern/R/test.cpp b/dynare++/extern/R/test.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6c843c609e941d2903db35260c840f4472f6fe86
--- /dev/null
+++ b/dynare++/extern/R/test.cpp
@@ -0,0 +1,32 @@
+#include "dynareR.cpp"
+
+int main(void) {
+	const char *parameters[] = {"beta","gamma","rho","alpha","delta"};
+	const char *varendo[] = {"k","c","a"};
+	const char *varexo[] = {"eps"};
+	const int numpar = 5;
+	const int numendo = 3;
+	const int numexo = 1;
+	const int ord = 2;
+	const int numsteps = 0;
+	const double parval[] = {.99,2,.9,.3,.025};
+	const double vcov[] = {0.001};
+	const double initval[] = {0.066, 0.43, 0.01};
+
+	int e;
+	double tensorbuffer[100];
+	int num_state;
+	int ordering_state[] = {0,0,0};
+	int ordering_endo[] = {0,0,0};
+	int ordering_exo[] = {0};
+	double newinitval[] = {0,0,0};
+	
+	const char *modeleq[] = {"(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1; a=rho*a(-1)+eps; k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1);"};
+
+	dynareR(varendo, &numendo, varexo, &numexo, parameters, &numpar, modeleq,
+			&ord, "journal", parval, vcov, initval,
+			&numsteps, tensorbuffer,
+			&num_state, ordering_state, ordering_endo, ordering_exo,
+			newinitval,&e);
+	printf("error code: %d\n", e);
+}
diff --git a/dynare++/extern/R/test.r b/dynare++/extern/R/test.r
new file mode 100644
index 0000000000000000000000000000000000000000..2896d13e2e7b5a50d24a67c24a40d725c5c5aac9
--- /dev/null
+++ b/dynare++/extern/R/test.r
@@ -0,0 +1,15 @@
+source("dynareR.r")
+
+parameters <- c("beta","gamma","rho","alpha","delta")
+varendo <- c("k","c","a")
+varexo <- "eps"
+parval <- c(.99,2,.9,.3,.025)
+vcovmatrix <- matrix(1,1,1)
+initval <- c(0.066, 0.43, 0.01)
+
+modeleq <- c("(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1",
+             "a=rho*a(-1)+eps",
+             "k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1)")
+
+
+dd <- calldynare(modeleq,varendo,varexo,parameters,2,parval,vcovmatrix,initval)
diff --git a/dynare++/extern/matlab/Makefile b/dynare++/extern/matlab/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..46c95a900e1d1f9be7cb7e595be7d7657b734e98
--- /dev/null
+++ b/dynare++/extern/matlab/Makefile
@@ -0,0 +1,134 @@
+include ../../Makefile.include
+#CC = gcc
+CC_FLAGS = -Wall -I../../sylv/cc -I../../tl/cc -I../../kord -I../../integ/cc
+LDFLAGS = -llapack -lblas -lg2c -lstdc++
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O3 -DPOSIX_THREADS
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+	LDFLAGS := -mno-cygwin -mthreads $(LDFLAGS)  -lpthreadGC2
+	ARCH := w32
+	MEX_SUFFIX = dll
+else
+	CC_FLAGS := -fPIC $(CC_FLAGS)
+	LDFLAGS := $(LDFLAGS) -lpthread
+	ARCH := linux
+#	MEX_SUFFIX = mexglx
+	MEX_SUFFIX = mexa64
+endif
+
+sylvcppsource := $(wildcard ../../sylv/cc/*.cpp)
+sylvhsource := $(wildcard ../../sylv/cc/*.h)
+sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
+
+tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
+tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
+
+kordcwebsource := $(wildcard ../../kord/*.cweb)
+kordcppsource := $(patsubst %.cweb,%.cpp,$(kordcwebsource))
+kordhwebsource := $(wildcard ../../kord/*.hweb)
+kordhsource := $(patsubst %.hweb,%.h,$(kordhwebsource))
+kordobjects := $(patsubst %.cweb,%.o,$(kordcwebsource))
+
+integcwebsource := $(wildcard ../../integ/cc/*.cweb)
+integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
+integhwebsource := $(wildcard ../../integ/cc/*.hweb)
+integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
+integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
+
+cppsource := $(wildcard *.cpp)
+mexobjects := $(patsubst %.cpp,%_.$(MEX_SUFFIX),$(cppsource))
+
+all: $(mexobjects)
+
+../../tl/cc/dummy.ch:
+	make -C ../../tl/cc dummy.ch
+
+../../tl/cc/%.cpp: ../../tl/cc/%.cweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.cpp
+
+../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.h
+
+../../tl/cc/%.o: ../../tl/cc/%.cpp $(tlhsource)
+	make -C ../../tl/cc $*.o
+
+../../integ/cc/dummy.ch:
+	make -C ../../integ/cc dummy.ch
+
+../../integ/cc/%.cpp: ../../integ/cc/%.cweb ../../integ/cc/dummy.ch
+	make -C ../../integ/cc $*.cpp
+
+../../integ/cc/%.h: ../../integ/cc/%.hweb ../../integ/cc/dummy.ch
+	make -C ../../integ/cc $*.h
+
+../../integ/cc/%.o: ../../integ/cc/%.cpp $(integhsource) $(tlhsource)
+	make -C ../../integ/cc $*.o
+
+
+../../sylv/cc/%.o: ../../sylv/cc/%.cpp $(sylvhsource)
+	make -C ../../sylv/cc $*.o
+
+../../kord/dummy.ch:
+	make -C ../../kord dummy.ch
+
+../../kord/%.cpp: ../../kord/%.cweb ../../kord/dummy.ch
+	make -C ../../kord $*.cpp
+
+../../kord/%.h: ../../kord/%.hweb ../../kord/dummy.ch
+	make -C ../../kord $*.h
+
+../../kord/%.o: ../../kord/%.cpp $(tlhsource) $(kordhsource) $(integhsource)
+	make -C ../../kord $*.o
+
+
+dynarelib.a: $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
+         $(integhwebsource) $(integcwebsoure) $(integhsource) $(integcppsource) \
+         $(kordhwebsource) $(kordcwebsoure) $(kordhsource) $(kordcppsource) \
+         $(sylvhsource) $(sylvcppsource) \
+         $(kordobjects) $(tlobjects) $(integobjects) $(sylvobjects)
+	ar cr dynarelib.a $(kordobjects) $(tlobjects) $(integobjects) $(sylvobjects)
+	ranlib dynarelib.a
+
+
+# to compile mex objects for Windows do:
+# 1. install gnumex
+# 2. create mexopts.bat via gnumex in this directory, specify MinGW compilation, and dll output
+# 3. in created mexopts.bat add "-llapack -lblas -lg2c -lstdc++
+#    -lpthreadGC2" in the beginning of GM_ADD_LIBS
+# 4. in created mexopts.bat add "-fexceptions -DPOSIX_THREADS" to COMPFLAGS in the end
+# 5. in created mexopts.bat change suffix mexw32 to dll in NAME_OUTPUT
+# 6. pray it works
+# OR: just use the mexopt.bat from the repository and check MATLAB
+#     root directory and gnumex root directories
+%_.$(MEX_SUFFIX): %.cpp $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
+         $(integhwebsource) $(integcwebsoure) $(integhsource) $(integcppsource) \
+         $(kordhwebsource) $(kordcwebsoure) $(kordhsource) $(kordcppsource) \
+         $(sylvhsource) $(sylvcppsource) \
+         dynarelib.a
+ifeq ($(OS),Windows_NT)
+	mex.bat -I../../sylv/cc -I../../tl/cc -I../../kord -I../../integ/cc $*.cpp dynarelib.a
+else
+	mex -I../../sylv/cc/ -I../../tl/cc -I../../kord -I../../integ/cc $*.cpp CFLAGS='$$CFLAGS -fexceptions' dynarelib.a -lmwlapack -lmwblas
+endif
+	mv $*.$(MEX_SUFFIX) $*_.$(MEX_SUFFIX)
+
+clear:
+	rm -f dynarelib.a
+	rm -f *.mexglx
+	rm -f *.dll
+	make -C ../../tl/testing clear
+	make -C ../../tl/cc clear
+	make -C ../../integ/testing clear
+	make -C ../../integ/cc clear
+	make -C ../../sylv/testing clear
+	make -C ../../sylv/cc clear
+	make -C ../../kord clear
diff --git a/dynare++/extern/matlab/dynare_simul.cpp b/dynare++/extern/matlab/dynare_simul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5a9d9c8ca948978bd5bac6d06f43dee48ca3122f
--- /dev/null
+++ b/dynare++/extern/matlab/dynare_simul.cpp
@@ -0,0 +1,133 @@
+// $Id: dynare_simul.cpp 1488 2007-12-19 14:16:30Z kamenik $
+
+// Copyright 2005, Ondra Kamenik
+
+// This is the mexFunction providing interface to
+// DecisionRule<>::simulate(). It takes the following input
+// parameters:
+//      order    the order of approximation, needs order+1 derivatives
+//      nstat
+//      npred
+//      nboth
+//      nforw
+//      nexog
+//      ystart   starting value (full vector of endogenous)
+//      shocks   matrix of shocks (nexog x number of period)
+//      vcov     covariance matrix of shocks (nexog x nexog)
+//      seed     integer seed
+//      ysteady  full vector of decision rule's steady
+//      ...      order+1 matrices of derivatives
+
+// output:
+//      res      simulated results
+
+#include "mex.h"
+
+#include "decision_rule.h"
+#include "fs_tensor.h"
+#include "SylvException.h"
+
+extern "C" {
+	void mexFunction(int nhls, mxArray* plhs[],
+					 int nhrs, const mxArray* prhs[])
+	{
+		if (nhrs < 12)
+			mexErrMsgTxt("Must have at least 12 input parameters.\n");
+		if (nhls != 1)
+			mexErrMsgTxt("Must have exactly 1 output parameter.\n");
+
+		int order = (int)mxGetScalar(prhs[0]);
+		if (nhrs != 12 + order) {
+			mexErrMsgTxt("Must have exactly 11+order input parameters.\n");
+			return;
+		}
+
+		int nstat = (int)mxGetScalar(prhs[1]);
+		int npred = (int)mxGetScalar(prhs[2]);
+		int nboth = (int)mxGetScalar(prhs[3]);
+		int nforw = (int)mxGetScalar(prhs[4]);
+		int nexog = (int)mxGetScalar(prhs[5]);
+
+		const mxArray* const ystart = prhs[6];
+		const mxArray* const shocks = prhs[7];
+		const mxArray* const vcov = prhs[8];
+		int seed = (int)mxGetScalar(prhs[9]);
+		const mxArray* const ysteady = prhs[10];
+		const int* const ystart_dim = mxGetDimensions(ystart);
+		const int* const shocks_dim = mxGetDimensions(shocks);
+		const int* const vcov_dim = mxGetDimensions(vcov);
+		const int* const ysteady_dim = mxGetDimensions(ysteady);
+
+		int ny = nstat + npred + nboth + nforw;
+		if (ny != ystart_dim[0])
+			mexErrMsgTxt("ystart has wrong number of rows.\n");
+		if (1 != ystart_dim[1])
+			mexErrMsgTxt("ystart has wrong number of cols.\n");
+		int nper = shocks_dim[1];
+		if (nexog != shocks_dim[0])
+			mexErrMsgTxt("shocks has a wrong number of rows.\n");
+		if (nexog != vcov_dim[0])
+			mexErrMsgTxt("vcov has a wrong number of rows.\n");
+		if (nexog != vcov_dim[1])
+			mexErrMsgTxt("vcov has a wrong number of cols.\n");
+		if (ny != ysteady_dim[0])
+			mexErrMsgTxt("ysteady has wrong number of rows.\n");
+		if (1 != ysteady_dim[1])
+			mexErrMsgTxt("ysteady has wrong number of cols.\n");
+
+		mxArray* res = mxCreateDoubleMatrix(ny, nper, mxREAL);
+
+		try {
+			// initialize tensor library
+			tls.init(order, npred+nboth+nexog);
+
+			// form the polynomial
+			UTensorPolynomial pol(ny, npred+nboth+nexog);
+			for (int dim = 0; dim <= order; dim++) {
+				const mxArray* gk = prhs[11+dim];
+				const int* const gk_dim = mxGetDimensions(gk);
+				FFSTensor ft(ny, npred+nboth+nexog, dim);
+				if (ft.ncols() != gk_dim[1]) {
+					char buf[1000];
+					sprintf(buf, "Wrong number of columns for folded tensor: got %d but I want %d\n",
+							gk_dim[1], ft.ncols());
+					mexErrMsgTxt(buf);
+				}
+				if (ft.nrows() != gk_dim[0]) {
+					char buf[1000];
+					sprintf(buf, "Wrong number of rows for folded tensor: got %d but I want %d\n",
+							gk_dim[0], ft.nrows());
+					mexErrMsgTxt(buf);
+				}
+				ft.zeros();
+				ConstTwoDMatrix gk_mat(ft.nrows(), ft.ncols(), mxGetPr(gk));
+				ft.add(1.0, gk_mat);
+				UFSTensor* ut = new UFSTensor(ft);
+				pol.insert(ut);
+			}
+			// form the decision rule
+			UnfoldDecisionRule
+				dr(pol, PartitionY(nstat, npred, nboth, nforw),
+				   nexog, ConstVector(mxGetPr(ysteady), ny));
+			// form the shock realization
+			TwoDMatrix shocks_mat(nexog, nper, (const double*)mxGetPr(shocks));
+			TwoDMatrix vcov_mat(nexog, nexog, (const double*)mxGetPr(vcov));
+			GenShockRealization sr(vcov_mat, shocks_mat, seed);
+			// simulate and copy the results
+			Vector ystart_vec((const double*)mxGetPr(ystart), ny);
+			TwoDMatrix* res_mat =
+				dr.simulate(DecisionRule::horner, nper,
+							ystart_vec, sr);
+			TwoDMatrix res_tmp_mat(ny, nper, mxGetPr(res));
+			res_tmp_mat = (const TwoDMatrix&)(*res_mat);
+			delete res_mat;
+			plhs[0] = res;
+		} catch (const KordException& e) {
+			mexErrMsgTxt("Caugth Kord exception.");
+		} catch (const TLException& e) {
+			mexErrMsgTxt("Caugth TL exception.");
+		} catch (SylvException& e) {
+			mexErrMsgTxt("Caught Sylv exception.");
+		}
+	}
+};
diff --git a/dynare++/extern/matlab/dynare_simul.m b/dynare++/extern/matlab/dynare_simul.m
new file mode 100644
index 0000000000000000000000000000000000000000..d70b90c839afbeb1d1ede6b8c04ccf061dad1b04
--- /dev/null
+++ b/dynare++/extern/matlab/dynare_simul.m
@@ -0,0 +1,160 @@
+%
+% SYNOPSIS
+% 
+% r = dynare_simul(name, shocks)
+% r = dynare_simul(name, prefix, shocks)
+% r = dynare_simul(name, shocks, start)
+% r = dynare_simul(name, prefix, shocks, start)
+%
+%     name     name of MAT-file produced by dynare++
+%     prefix   prefix of variables in the MAT-file
+%     shocks   matrix of shocks
+%     start    zero period value
+%
+% SEMANTICS
+%
+% The command reads a decision rule from the MAT-file having the given
+% prefix. Then it starts simulating the decision rule with zero time value
+% equal to the given start. It uses the given shocks for the simulation. If
+% the start is not given, the state about which the decision rule is
+% centralized is taken (called fix point, or stochastic steady state, take
+% your pick).
+%
+%     prefix   Use the prefix with which you called dynare++, the default
+%              prefix in dynare++ is 'dyn'.
+%     shocks   Number of rows must be a number of exogenous shocks,
+%              number of columns gives the number of simulated
+%              periods. NaNs and Infs in the matrix are substitued by
+%              draws from the normal distribution using the covariance
+%              matrix given in the model file.
+%     start    Vector of endogenous variables in the ordering given by
+%              <prefix>_vars.
+%
+% Seed for random generator is derived from calling rand(1,1). Therefore,
+% seeding can be controlled with rand('state') and rand('state',some_seed).
+%
+% EXAMPLES
+%
+% All examples suppose that the prefix is 'dyn' and that your_model.mat
+% has been loaded into Matlab.
+%
+% 1. response to permanent negative shock to the third exo var EPS3 for
+%    100 periods
+%
+%       shocks = zeros(4,100); % 4 exogenous variables in the model
+%       shocks(dyn_i_EPS3,:) = -0.1; % the permanent shock to EPS3
+%       r = dynare_simul('your_model.mat',shocks);
+%
+% 2. one stochastic simulation for 100 periods
+%
+%       shocks = zeros(4,100)./0; % put NaNs everywhere
+%       r = dynare_simul('your_model.mat',shocks);
+%
+% 3. one stochastic simulation starting at 75% undercapitalized economy
+%
+%       shocks = zeros(4,100)./0; % put NaNs everywhere
+%       ystart = dyn_ss; % get copy of DR fix point
+%       ystart(dyn_i_K) = 0.75*dyn_ss(dyn_i_K); % scale down the capital
+%       r = dynare_simul('your_model.mat',shocks,ystart);
+%
+% 
+% SEE ALSO
+%
+%   "DSGE Models with Dynare++. A Tutorial.", Ondra Kamenik, 2005
+
+function r = dynare_simul(varargin)
+
+% get the file name and load data
+fname = varargin{1};
+eval(['load ' fname]);
+
+% set prefix, shocks, ystart
+if ischar(varargin{2})
+  prefix = varargin{2};
+  if length(varargin) == 3
+    shocks = varargin{3};
+    ystart = NaN;
+  elseif length(varargin) == 4
+    shocks = varargin{3};
+    ystart = varargin{4};
+  else
+    error('Wrong number of parameters.');
+  end
+else
+  prefix = 'dyn';
+  if length(varargin) == 2
+    shocks = varargin{2};
+    ystart = NaN;
+  elseif length(varargin) == 3
+    shocks = varargin{2};
+    ystart = varargin{3};
+  else
+    error('Wrong number of parameters.');
+  end
+end
+
+% load all needed variables but prefix_g_*
+if (exist([prefix '_nstat']))
+  nstat = eval([prefix '_nstat']);
+else
+  error(['Could not find variable ' prefix '_nstat in workspace']);
+end
+if (exist([prefix '_npred']))
+  npred = eval([prefix '_npred']);
+else
+  error(['Could not find variable ' prefix '_npred in workspace']);
+end
+if (exist([prefix '_nboth']))
+  nboth = eval([prefix '_nboth']);
+else
+  error(['Could not find variable ' prefix '_nboth in workspace']);
+end
+if (exist([prefix '_nforw']))
+  nforw = eval([prefix '_nforw']);
+else
+  error(['Could not find variable ' prefix '_nforw in workspace']);
+end
+if (exist([prefix '_ss']))
+  ss = eval([prefix '_ss']);
+else
+  error(['Could not find variable ' prefix '_ss in workspace']);
+end
+if (exist([prefix '_vcov_exo']))
+  vcov_exo = eval([prefix '_vcov_exo']);
+else
+  error(['Could not find variable ' prefix '_vcov_exo in workspace']);
+end
+nexog = size(vcov_exo,1);
+
+if isnan(ystart)
+  ystart = ss;
+end
+
+% newer version of dynare++ doesn't return prefix_g_0, we make it here if
+% it does not exist in workspace
+g_zero = [prefix '_g_0'];
+if (~ exist(g_zero))
+  eval([ g_zero '= zeros(nstat+npred+nboth+nforw,1);']);
+end
+
+% make derstr a string of comma seperated existing prefix_g_*
+derstr = [',' g_zero];
+order = 1;
+cont = 1;
+while cont == 1
+  g_ord = [prefix '_g_' num2str(order)];
+  if (exist(g_ord))
+    derstr = [derstr ',' g_ord];
+    order = order + 1;
+  else
+    cont = 0;
+  end
+end
+
+% set seed
+seed = ceil(10000*rand(1,1));
+
+% call dynare_simul_
+command = ['r=dynare_simul_(' num2str(order-1) ',nstat,npred,nboth,nforw,' ...
+           'nexog,ystart,shocks,vcov_exo,seed,ss' derstr ');'];
+eval(command);
diff --git a/dynare++/extern/matlab/mexopts.bat b/dynare++/extern/matlab/mexopts.bat
new file mode 100755
index 0000000000000000000000000000000000000000..5801b2336f63b21d9977326a6f2a546cf20a37db
--- /dev/null
+++ b/dynare++/extern/matlab/mexopts.bat
@@ -0,0 +1,65 @@
+@echo off
+rem C:\ondra\work\dpp\dynare++\extern\matlab\mexopts.bat
+rem Generated by gnumex.m script in c:\fs\gnumex
+rem gnumex version: 2.01
+rem Compile and link options used for building MEX etc files with
+rem the Mingw/Cygwin tools.  Options here are:
+rem Gnumex, version 2.01                       
+rem MinGW linking                              
+rem Mex (*.dll) creation                       
+rem Libraries regenerated now                  
+rem Language: C / C++                          
+rem Optimization level: -O3 (full optimization)
+rem Matlab version 7.7
+rem
+set MATLAB=C:\PROGRA~1\MATLAB\R2008b
+set GM_PERLPATH=C:\PROGRA~1\MATLAB\R2008b\sys\perl\win32\bin\perl.exe
+set GM_UTIL_PATH=c:\fs\gnumex
+set PATH=c:\fs\mingw\bin;%PATH%
+set PATH=%PATH%;C:\Cygwin\usr\local\gfortran\libexec\gcc\i686-pc-cygwin\4.3.0
+set LIBRARY_PATH=c:\fs\mingw\lib
+set G95_LIBRARY_PATH=c:\fs\mingw\lib
+rem
+rem precompiled library directory and library files
+set GM_QLIB_NAME=C:\\ondra\work\dpp\dynare++\extern\matlab\gnumex
+rem
+rem directory for .def-files
+set GM_DEF_PATH=C:\\ondra\work\dpp\dynare++\extern\matlab\gnumex
+rem
+rem Type of file to compile (mex or engine)
+set GM_MEXTYPE=mex
+rem
+rem Language for compilation
+set GM_MEXLANG=c
+rem
+rem File for exporting mexFunction symbol
+set GM_MEXDEF=C:\\ondra\work\dpp\dynare++\extern\matlab\gnumex\mex.def
+rem
+set GM_ADD_LIBS=-llapack -lblas -lg2c -lstdc++ -lpthreadGC2 -llibmx -llibmex -llibmat
+rem
+rem compiler options; add compiler flags to compflags as desired
+set NAME_OBJECT=-o
+set COMPILER=gcc
+set COMPFLAGS=-c -DMATLAB_MEX_FILE -fexceptions -DPOSIX_THREADS
+set OPTIMFLAGS=-O3
+set DEBUGFLAGS=-g
+set CPPCOMPFLAGS=%COMPFLAGS% -x c++ 
+set CPPOPTIMFLAGS=%OPTIMFLAGS%
+set CPPDEBUGFLAGS=%DEBUGFLAGS%
+rem
+rem NB Library creation commands occur in linker scripts
+rem
+rem Linker parameters
+set LINKER=%GM_PERLPATH% %GM_UTIL_PATH%\linkmex.pl
+set LINKFLAGS=
+set CPPLINKFLAGS=GM_ISCPP 
+set LINKOPTIMFLAGS=-s
+set LINKDEBUGFLAGS=-g  -Wl,--image-base,0x28000000\n
+set LINKFLAGS= -LC:\\ondra\work\dpp\dynare++\extern\matlab\gnumex
+set LINK_FILE=
+set LINK_LIB=
+set NAME_OUTPUT=-o %OUTDIR%%MEX_NAME%.dll
+rem
+rem Resource compiler parameters
+set RC_COMPILER=%GM_PERLPATH% %GM_UTIL_PATH%\rccompile.pl  -o %OUTDIR%mexversion.res
+set RC_LINKER=
diff --git a/dynare++/integ/cc/Makefile b/dynare++/integ/cc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..716f13ab257b48e9646acc97ddd167ab32fd6289
--- /dev/null
+++ b/dynare++/integ/cc/Makefile
@@ -0,0 +1,67 @@
+# $Id: Makefile 2344 2009-02-09 20:36:08Z michel $
+# Copyright 2005, Ondra Kamenik
+
+include ../../Makefile.include
+
+CC_FLAGS := -I../../sylv/cc -I../../tl/cc
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+	LD_LIBS := -mno-cygwin -mthreads $(LD_LIBS)  -lpthreadGC1
+else
+	LD_LIBS := $(LD_LIBS) -lpthread
+endif
+
+
+matrix_interface := GeneralMatrix Vector SylvException 
+matobjs := $(patsubst %, ../../sylv/cc/%.o, $(matrix_interface))
+cwebsource := $(wildcard *.cweb)
+cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource)) 
+objects := $(patsubst %.cweb,%.o,$(cwebsource))
+hwebsource := $(wildcard *.hweb)
+hsource := $(patsubst %.hweb,%.h,$(hwebsource))
+
+tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
+tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+
+
+dummy.ch:
+	touch dummy.ch
+
+../../tl/cc/dummy.ch:
+	make -C ../../tl/cc dummy.ch
+
+../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.h
+
+%.cpp: %.cweb dummy.ch
+	ctangle -bhp $*.cweb dummy.ch $*.cpp
+
+%.h: %.hweb dummy.ch
+	ctangle -bhp $*.hweb dummy.ch $*.h
+
+%.o : %.cpp $(hsource) $(tlhsource)
+	$(CC) $(CC_FLAGS) $(EXTERN_DEFS) -c $*.cpp
+
+doc: main.web $(hwebsource) $(cwebsource)
+	cweave -bhp main.web
+	pdftex main
+	mv main.pdf integ.pdf 
+
+all: $(objects) $(cppsource) $(hsource)
+
+
+clear:
+	rm -f $(cppsource)
+	rm -f $(hsource)
+	rm -f *.o
+	rm -f dummy.ch
+	rm -f *~
diff --git a/dynare++/integ/cc/main.web b/dynare++/integ/cc/main.web
new file mode 100644
index 0000000000000000000000000000000000000000..3159b78a37cd92ae54799a5d3adbe9a074ac9baa
--- /dev/null
+++ b/dynare++/integ/cc/main.web
@@ -0,0 +1,41 @@
+@q $Id: main.web 2333 2009-01-14 10:32:55Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+\let\ifpdf\relax
+\input eplain
+\def\title{{\mainfont Numerical Integration Module}}
+
+
+@i c++lib.w
+@s Vector int
+@s ConstVector int
+@s IntSequence int
+@s GeneralMatrix int
+@s THREAD int
+@s THREAD_GROUP int
+@s SYNCHRO int
+
+\titletrue
+\null\vfill
+\centerline{\titlefont Numerical Integration Module}
+\vfill\vfill
+Copyright \copyright\ 2005 by Ondra Kamenik
+
+\penalty-10000
+
+@i vector_function.hweb
+@i vector_function.cweb
+
+@i quadrature.hweb
+@i quadrature.cweb
+
+@i product.hweb
+@i product.cweb
+
+@i smolyak.hweb
+@i smolyak.cweb
+
+@i quasi_mcarlo.hweb
+@i quasi_mcarlo.cweb
+
+
diff --git a/dynare++/integ/cc/precalc_quadrature.dat b/dynare++/integ/cc/precalc_quadrature.dat
new file mode 100644
index 0000000000000000000000000000000000000000..59b9d90b0b7b278c8eeb4b8def8edc789fdc6e82
--- /dev/null
+++ b/dynare++/integ/cc/precalc_quadrature.dat
@@ -0,0 +1,1821 @@
+// $Id: precalc_quadrature.dat 431 2005-08-16 15:41:01Z kamenik $
+// Copyright 2005, Ondra Kamenik
+
+// The file contains one dimensional quadrature points and weights for
+// a few quadratures. The format of data is clear. There is a class
+// OneDPrecalcQuadrature which implements an interface OneDQuadrature
+// using the data of this format.
+
+// Gauss-Hermite quadrature; prefix gh
+
+// number of levels
+static const int gh_num_levels = 26;
+
+// number of points in each level
+static const int gh_num_points[] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+	30, 32, 40, 50, 60, 64
+};
+
+// weights, starting with the first level
+static const double gh_weights[] = {
+    // weights 1 = sqrt(pi)
+	1.77245385090551588191942755656782537698745727539062,
+	// weights 2
+	0.886226925452758013649083741671e+00,
+	0.886226925452758013649083741671e+00,
+	// weights 3
+	0.295408975150919337883027913890e+00,
+	0.118163590060367735153211165556e+01,
+	0.295408975150919337883027913890e+00,
+	// weights 4
+	0.813128354472451771430345571899e-01,
+	0.804914090005512836506049184481e+00,
+	0.804914090005512836506049184481e+00,
+	0.813128354472451771430345571899e-01,
+	// weights 5
+	0.199532420590459132077434585942e-01,
+	0.393619323152241159828495620852e+00,
+	0.945308720482941881225689324449e+00,
+	0.393619323152241159828495620852e+00,
+	0.199532420590459132077434585942e-01,
+	// weights 6
+	0.453000990550884564085747256463e-02,
+	0.157067320322856643916311563508e+00,
+	0.724629595224392524091914705598e+00,
+	0.724629595224392524091914705598e+00,
+	0.157067320322856643916311563508e+00,
+	0.453000990550884564085747256463e-02,
+	// weights 7
+	0.971781245099519154149424255939e-03,
+	0.545155828191270305921785688417e-01,
+	0.425607252610127800520317466666e+00,
+	0.810264617556807326764876563813e+00,
+	0.425607252610127800520317466666e+00,
+	0.545155828191270305921785688417e-01,
+	0.971781245099519154149424255939e-03,
+	// weights 8
+	0.199604072211367619206090452544e-03,
+	0.170779830074134754562030564364e-01,
+	0.207802325814891879543258620286e+00,
+	0.661147012558241291030415974496e+00,
+	0.661147012558241291030415974496e+00,
+	0.207802325814891879543258620286e+00,
+	0.170779830074134754562030564364e-01,
+	0.199604072211367619206090452544e-03,
+	// weights 9
+	0.396069772632643819045862946425e-04,
+	0.494362427553694721722456597763e-02,
+	0.884745273943765732879751147476e-01,
+	0.432651559002555750199812112956e+00,
+	0.720235215606050957124334723389e+00,
+	0.432651559002555750199812112956e+00,
+	0.884745273943765732879751147476e-01,
+	0.494362427553694721722456597763e-02,
+	0.396069772632643819045862946425e-04,
+	// weights 10
+	0.764043285523262062915936785960e-05,
+	0.134364574678123269220156558585e-02,
+	0.338743944554810631361647312776e-01,
+	0.240138611082314686416523295006e+00,
+	0.610862633735325798783564990433e+00,
+	0.610862633735325798783564990433e+00,
+	0.240138611082314686416523295006e+00,
+	0.338743944554810631361647312776e-01,
+	0.134364574678123269220156558585e-02,
+	0.764043285523262062915936785960e-05,
+	// weights 11
+	0.143956039371425822033088366032e-05,
+	0.346819466323345510643413772940e-03,
+	0.119113954449115324503874202916e-01,
+	0.117227875167708503381788649308e+00,
+	0.429359752356125028446073598601e+00,
+	0.654759286914591779203940657627e+00,
+	0.429359752356125028446073598601e+00,
+	0.117227875167708503381788649308e+00,
+	0.119113954449115324503874202916e-01,
+	0.346819466323345510643413772940e-03,
+	0.143956039371425822033088366032e-05,
+	// weights 12
+	0.265855168435630160602311400877e-06,
+	0.857368704358785865456906323153e-04,
+	0.390539058462906185999438432620e-02,
+	0.516079856158839299918734423606e-01,
+	0.260492310264161129233396139765e+00,
+	0.570135236262479578347113482275e+00,
+	0.570135236262479578347113482275e+00,
+	0.260492310264161129233396139765e+00,
+	0.516079856158839299918734423606e-01,
+	0.390539058462906185999438432620e-02,
+	0.857368704358785865456906323153e-04,
+	0.265855168435630160602311400877e-06,
+	// weights 13
+	0.482573185007313108834997332342e-07,
+	0.204303604027070731248669432937e-04,
+	0.120745999271938594730924899224e-02,
+	0.208627752961699392166033805050e-01,
+	0.140323320687023437762792268873e+00,
+	0.421616296898543221746893558568e+00,
+	0.604393187921161642342099068579e+00,
+	0.421616296898543221746893558568e+00,
+	0.140323320687023437762792268873e+00,
+	0.208627752961699392166033805050e-01,
+	0.120745999271938594730924899224e-02,
+	0.204303604027070731248669432937e-04,
+	0.482573185007313108834997332342e-07,
+	// weights 14
+	0.862859116812515794532041783429e-08,
+	0.471648435501891674887688950105e-05,
+	0.355092613551923610483661076691e-03,
+	0.785005472645794431048644334608e-02,
+	0.685055342234652055387163312367e-01,
+	0.273105609064246603352569187026e+00,
+	0.536405909712090149794921296776e+00,
+	0.536405909712090149794921296776e+00,
+	0.273105609064246603352569187026e+00,
+	0.685055342234652055387163312367e-01,
+	0.785005472645794431048644334608e-02,
+	0.355092613551923610483661076691e-03,
+	0.471648435501891674887688950105e-05,
+	0.862859116812515794532041783429e-08,
+	// weights 15
+	0.152247580425351702016062666965e-08,
+	0.105911554771106663577520791055e-05,
+	0.100004441232499868127296736177e-03,
+	0.277806884291277589607887049229e-02,
+	0.307800338725460822286814158758e-01,
+	0.158488915795935746883839384960e+00,
+	0.412028687498898627025891079568e+00,
+	0.564100308726417532852625797340e+00,
+	0.412028687498898627025891079568e+00,
+	0.158488915795935746883839384960e+00,
+	0.307800338725460822286814158758e-01,
+	0.277806884291277589607887049229e-02,
+	0.100004441232499868127296736177e-03,
+	0.105911554771106663577520791055e-05,
+	0.152247580425351702016062666965e-08,
+	// weights 16
+	0.265480747401118224470926366050e-09,
+	0.232098084486521065338749423185e-06,
+	0.271186009253788151201891432244e-04,
+	0.932284008624180529914277305537e-03,
+	0.128803115355099736834642999312e-01,
+	0.838100413989858294154207349001e-01,
+	0.280647458528533675369463335380e+00,
+	0.507929479016613741913517341791e+00,
+	0.507929479016613741913517341791e+00,
+	0.280647458528533675369463335380e+00,
+	0.838100413989858294154207349001e-01,
+	0.128803115355099736834642999312e-01,
+	0.932284008624180529914277305537e-03,
+	0.271186009253788151201891432244e-04,
+	0.232098084486521065338749423185e-06,
+	0.265480747401118224470926366050e-09,
+	// weights 17
+	0.458057893079863330580889281222e-10,
+	0.497707898163079405227863353715e-07,
+	0.711228914002130958353327376218e-05,
+	0.298643286697753041151336643059e-03,
+	0.506734995762753791170069495879e-02,
+	0.409200341495762798094994877854e-01,
+	0.172648297670097079217645196219e+00,
+	0.401826469470411956577635085257e+00,
+	0.530917937624863560331883103379e+00,
+	0.401826469470411956577635085257e+00,
+	0.172648297670097079217645196219e+00,
+	0.409200341495762798094994877854e-01,
+	0.506734995762753791170069495879e-02,
+	0.298643286697753041151336643059e-03,
+	0.711228914002130958353327376218e-05,
+	0.497707898163079405227863353715e-07,
+	0.458057893079863330580889281222e-10,
+	// weights 18
+	0.782819977211589102925147471012e-11,
+	0.104672057957920824443559608435e-07,
+	0.181065448109343040959702385911e-05,
+	0.918112686792940352914675407371e-04,
+	0.188852263026841789438175325426e-02,
+	0.186400423875446519219315221973e-01,
+	0.973017476413154293308537234155e-01,
+	0.284807285669979578595606820713e+00,
+	0.483495694725455552876410522141e+00,
+	0.483495694725455552876410522141e+00,
+	0.284807285669979578595606820713e+00,
+	0.973017476413154293308537234155e-01,
+	0.186400423875446519219315221973e-01,
+	0.188852263026841789438175325426e-02,
+	0.918112686792940352914675407371e-04,
+	0.181065448109343040959702385911e-05,
+	0.104672057957920824443559608435e-07,
+	0.782819977211589102925147471012e-11,
+	// weights 19
+	0.132629709449851575185289154385e-11,
+	0.216305100986355475019693077221e-08,
+	0.448824314722312295179447915594e-06,
+	0.272091977631616257711941025214e-04,
+	0.670877521407181106194696282100e-03,
+	0.798886677772299020922211491861e-02,
+	0.508103869090520673569908110358e-01,
+	0.183632701306997074156148485766e+00,
+	0.391608988613030244504042313621e+00,
+	0.502974888276186530840731361096e+00,
+	0.391608988613030244504042313621e+00,
+	0.183632701306997074156148485766e+00,
+	0.508103869090520673569908110358e-01,
+	0.798886677772299020922211491861e-02,
+	0.670877521407181106194696282100e-03,
+	0.272091977631616257711941025214e-04,
+	0.448824314722312295179447915594e-06,
+	0.216305100986355475019693077221e-08,
+	0.132629709449851575185289154385e-11,
+	// weights 20
+	0.222939364553415129252250061603e-12,
+	0.439934099227318055362885145547e-09,
+	0.108606937076928169399952456345e-06,
+	0.780255647853206369414599199965e-05,
+	0.228338636016353967257145917963e-03,
+	0.324377334223786183218324713235e-02,
+	0.248105208874636108821649525589e-01,
+	0.109017206020023320013755033535e+00,
+	0.286675505362834129719659706228e+00,
+	0.462243669600610089650328639861e+00,
+	0.462243669600610089650328639861e+00,
+	0.286675505362834129719659706228e+00,
+	0.109017206020023320013755033535e+00,
+	0.248105208874636108821649525589e-01,
+	0.324377334223786183218324713235e-02,
+	0.228338636016353967257145917963e-03,
+	0.780255647853206369414599199965e-05,
+	0.108606937076928169399952456345e-06,
+	0.439934099227318055362885145547e-09,
+	0.222939364553415129252250061603e-12,
+	// weights 30
+	0.290825470013122622941102747365e-20,
+	0.281033360275090370876277491534e-16,
+	0.287860708054870606219239791142e-13,
+	0.810618629746304420399344796173e-11,
+	0.917858042437852820850075742492e-09,
+	0.510852245077594627738963204403e-07,
+	0.157909488732471028834638794022e-05,
+	0.293872522892298764150118423412e-04,
+	0.348310124318685523420995323183e-03,
+	0.273792247306765846298942568953e-02,
+	0.147038297048266835152773557787e-01,
+	0.551441768702342511680754948183e-01,
+	0.146735847540890099751693643152e+00,
+	0.280130930839212667413493211293e+00,
+	0.386394889541813862555601849165e+00,
+	0.386394889541813862555601849165e+00,
+	0.280130930839212667413493211293e+00,
+	0.146735847540890099751693643152e+00,
+	0.551441768702342511680754948183e-01,
+	0.147038297048266835152773557787e-01,
+	0.273792247306765846298942568953e-02,
+	0.348310124318685523420995323183e-03,
+	0.293872522892298764150118423412e-04,
+	0.157909488732471028834638794022e-05,
+	0.510852245077594627738963204403e-07,
+	0.917858042437852820850075742492e-09,
+	0.810618629746304420399344796173e-11,
+	0.287860708054870606219239791142e-13,
+	0.281033360275090370876277491534e-16,
+	0.290825470013122622941102747365e-20,
+	// weights 32
+	0.731067642736e-22,
+	0.923173653649e-18,
+	0.119734401709e-14,
+	0.421501021125e-12,
+	0.593329146300e-10,
+	0.409883216476e-08,
+	0.157416779254e-06,
+	0.365058512955e-05,
+	0.541658406172e-04,
+	0.536268365526e-03,
+	0.365489032664e-02,
+	0.175534288315e-01,
+	0.604581309557e-01,
+	0.151269734076e+00,
+	0.277458142302e+00,
+	0.375238352592e+00,
+	0.375238352592e+00,
+	0.277458142302e+00,
+	0.151269734076e+00,
+	0.604581309557e-01,
+	0.175534288315e-01,
+	0.365489032664e-02,
+	0.536268365526e-03,
+	0.541658406172e-04,
+	0.365058512955e-05,
+	0.157416779254e-06,
+	0.409883216476e-08,
+	0.593329146300e-10,
+	0.421501021125e-12,
+	0.119734401709e-14,
+	0.923173653649e-18,
+	0.731067642736e-22,
+	// weights 40
+	0.259104371384e-28,
+	0.854405696375e-24,
+	0.256759336540e-20,
+	0.198918101211e-17,
+	0.600835878947e-15,
+	0.880570764518e-13,
+	0.715652805267e-11,
+	0.352562079135e-09,
+	0.112123608322e-07,
+	0.241114416359e-06,
+	0.363157615067e-05,
+	0.393693398108e-04,
+	0.313853594540e-03,
+	0.187149682959e-02,
+	0.846088800823e-02,
+	0.293125655361e-01,
+	0.784746058652e-01,
+	0.163378732713e+00,
+	0.265728251876e+00,
+	0.338643277425e+00,
+	0.338643277425e+00,
+	0.265728251876e+00,
+	0.163378732713e+00,
+	0.784746058652e-01,
+	0.293125655361e-01,
+	0.846088800823e-02,
+	0.187149682959e-02,
+	0.313853594540e-03,
+	0.393693398108e-04,
+	0.363157615067e-05,
+	0.241114416359e-06,
+	0.112123608322e-07,
+	0.352562079135e-09,
+	0.715652805267e-11,
+	0.880570764518e-13,
+	0.600835878947e-15,
+	0.198918101211e-17,
+	0.256759336540e-20,
+	0.854405696375e-24,
+	0.259104371384e-28,
+	// weights 50
+	0.183379404857e-36,
+	0.167380166790e-31,
+	0.121524412340e-27,
+	0.213765830835e-24,
+	0.141709359957e-21,
+	0.447098436530e-19,
+	0.774238295702e-17,
+	0.809426189344e-15,
+	0.546594403180e-13,
+	0.250665552389e-11,
+	0.811187736448e-10,
+	0.190904054379e-08,
+	0.334679340401e-07,
+	0.445702996680e-06,
+	0.458168270794e-05,
+	0.368401905377e-04,
+	0.234269892109e-03,
+	0.118901178175e-02,
+	0.485326382616e-02,
+	0.160319410684e-01,
+	0.430791591566e-01,
+	0.945489354768e-01,
+	0.170032455676e+00,
+	0.251130856331e+00,
+	0.305085129203e+00,
+	0.305085129203e+00,
+	0.251130856331e+00,
+	0.170032455676e+00,
+	0.945489354768e-01,
+	0.430791591566e-01,
+	0.160319410684e-01,
+	0.485326382616e-02,
+	0.118901178175e-02,
+	0.234269892109e-03,
+	0.368401905377e-04,
+	0.458168270794e-05,
+	0.445702996680e-06,
+	0.334679340401e-07,
+	0.190904054379e-08,
+	0.811187736448e-10,
+	0.250665552389e-11,
+	0.546594403180e-13,
+	0.809426189344e-15,
+	0.774238295702e-17,
+	0.447098436530e-19,
+	0.141709359957e-21,
+	0.213765830835e-24,
+	0.121524412340e-27,
+	0.167380166790e-31,
+	0.183379404857e-36,
+	// weights 60
+	0.110958724796e-44,
+	0.243974758810e-39,
+	0.377162672698e-35,
+	0.133255961176e-31,
+	0.171557314767e-28,
+	0.102940599693e-25,
+	0.334575695574e-23,
+	0.651256725748e-21,
+	0.815364047300e-19,
+	0.692324790956e-17,
+	0.415244410968e-15,
+	0.181662457614e-13,
+	0.594843051597e-12,
+	0.148895734905e-10,
+	0.289935901280e-09,
+	0.445682277521e-08,
+	0.547555461926e-07,
+	0.543351613419e-06,
+	0.439428693625e-05,
+	0.291874190415e-04,
+	0.160277334681e-03,
+	0.731773556963e-03,
+	0.279132482894e-02,
+	0.893217836028e-02,
+	0.240612727660e-01,
+	0.547189709320e-01,
+	0.105298763697e+00,
+	0.171776156918e+00,
+	0.237868904958e+00,
+	0.279853117522e+00,
+	0.279853117522e+00,
+	0.237868904958e+00,
+	0.171776156918e+00,
+	0.105298763697e+00,
+	0.547189709320e-01,
+	0.240612727660e-01,
+	0.893217836028e-02,
+	0.279132482894e-02,
+	0.731773556963e-03,
+	0.160277334681e-03,
+	0.291874190415e-04,
+	0.439428693625e-05,
+	0.543351613419e-06,
+	0.547555461926e-07,
+	0.445682277521e-08,
+	0.289935901280e-09,
+	0.148895734905e-10,
+	0.594843051597e-12,
+	0.181662457614e-13,
+	0.415244410968e-15,
+	0.692324790956e-17,
+	0.815364047300e-19,
+	0.651256725748e-21,
+	0.334575695574e-23,
+	0.102940599693e-25,
+	0.171557314767e-28,
+	0.133255961176e-31,
+	0.377162672698e-35,
+	0.243974758810e-39,
+	0.110958724796e-44,
+	// weights 64
+	0.553570653584e-48,
+	0.167974799010e-42,
+	0.342113801099e-38,
+	0.155739062462e-34,
+	0.254966089910e-31,
+	0.192910359546e-28,
+	0.786179778889e-26,
+	0.191170688329e-23,
+	0.298286278427e-21,
+	0.315225456649e-19,
+	0.235188471067e-17,
+	0.128009339117e-15,
+	0.521862372645e-14,
+	0.162834073070e-12,
+	0.395917776693e-11,
+	0.761521725012e-10,
+	0.117361674232e-08,
+	0.146512531647e-07,
+	0.149553293672e-06,
+	0.125834025103e-05,
+	0.878849923082e-05,
+	0.512592913577e-04,
+	0.250983698512e-03,
+	0.103632909950e-02,
+	0.362258697852e-02,
+	0.107560405098e-01,
+	0.272031289536e-01,
+	0.587399819634e-01,
+	0.108498349306e+00,
+	0.171685842349e+00,
+	0.232994786062e+00,
+	0.271377424940e+00,
+	0.271377424940e+00,
+	0.232994786062e+00,
+	0.171685842349e+00,
+	0.108498349306e+00,
+	0.587399819634e-01,
+	0.272031289536e-01,
+	0.107560405098e-01,
+	0.362258697852e-02,
+	0.103632909950e-02,
+	0.250983698512e-03,
+	0.512592913577e-04,
+	0.878849923082e-05,
+	0.125834025103e-05,
+	0.149553293672e-06,
+	0.146512531647e-07,
+	0.117361674232e-08,
+	0.761521725012e-10,
+	0.395917776693e-11,
+	0.162834073070e-12,
+	0.521862372645e-14,
+	0.128009339117e-15,
+	0.235188471067e-17,
+	0.315225456649e-19,
+	0.298286278427e-21,
+	0.191170688329e-23,
+	0.786179778889e-26,
+	0.192910359546e-28,
+	0.254966089910e-31,
+	0.155739062462e-34,
+	0.342113801099e-38,
+	0.167974799010e-42,
+	0.553570653584e-48
+};
+
+// points, starting with the first level
+static const double gh_points[] = {
+	// points 1
+	0.0,
+	// points 2
+	-0.707106781186547524400844362105e+00,
+	0.707106781186547524400844362105e+00,
+	// points 3
+	-0.122474487139158904909864203735e+01,
+	0.0e+00,
+	0.122474487139158904909864203735e+01,
+	// points 4
+	-0.165068012388578455588334111112e+01,
+	-0.524647623275290317884060253835e+00,
+	0.524647623275290317884060253835e+00,
+	0.165068012388578455588334111112e+01,
+	// points 5
+	-0.202018287045608563292872408814e+01,
+	-0.958572464613818507112770593893e+00,
+	0.0e+00,
+	0.958572464613818507112770593893e+00,
+	0.202018287045608563292872408814e+01,
+	// points 6
+	-0.235060497367449222283392198706e+01,
+	-0.133584907401369694971489528297e+01,
+	-0.436077411927616508679215948251e+00,
+	0.436077411927616508679215948251e+00,
+	0.133584907401369694971489528297e+01,
+	0.235060497367449222283392198706e+01,
+	// points 7
+	-0.265196135683523349244708200652e+01,
+	-0.167355162876747144503180139830e+01,
+	-0.816287882858964663038710959027e+00,
+	0.0e+00,
+	0.816287882858964663038710959027e+00,
+	0.167355162876747144503180139830e+01,
+	0.265196135683523349244708200652e+01,
+	// points 8
+	-0.293063742025724401922350270524e+01,
+	-0.198165675669584292585463063977e+01,
+	-0.115719371244678019472076577906e+01,
+	-0.381186990207322116854718885584e+00,
+	0.381186990207322116854718885584e+00,
+	0.115719371244678019472076577906e+01,
+	0.198165675669584292585463063977e+01,
+	0.293063742025724401922350270524e+01,
+	// points 9
+	-0.319099320178152760723004779538e+01,
+	-0.226658058453184311180209693284e+01,
+	-0.146855328921666793166701573925e+01,
+	-0.723551018752837573322639864579e+00,
+	0.0e+00,
+	0.723551018752837573322639864579e+00,
+	0.146855328921666793166701573925e+01,
+	0.226658058453184311180209693284e+01,
+	0.319099320178152760723004779538e+01,
+	// points 10
+	-0.343615911883773760332672549432e+01,
+	-0.253273167423278979640896079775e+01,
+	-0.175668364929988177345140122011e+01,
+	-0.103661082978951365417749191676e+01,
+	-0.342901327223704608789165025557e+00,
+	0.342901327223704608789165025557e+00,
+	0.103661082978951365417749191676e+01,
+	0.175668364929988177345140122011e+01,
+	0.253273167423278979640896079775e+01,
+	0.343615911883773760332672549432e+01,
+	// points 11
+	-0.366847084655958251845837146485e+01,
+	-0.278329009978165177083671870152e+01,
+	-0.202594801582575533516591283121e+01,
+	-0.132655708449493285594973473558e+01,
+	-0.656809566882099765024611575383e+00,
+	0.0e+00,
+	0.656809566882099765024611575383e+00,
+	0.132655708449493285594973473558e+01,
+	0.202594801582575533516591283121e+01,
+	0.278329009978165177083671870152e+01,
+	0.366847084655958251845837146485e+01,
+	// points 12
+	-0.388972489786978191927164274724e+01,
+	-0.302063702512088977171067937518e+01,
+	-0.227950708050105990018772856942e+01,
+	-0.159768263515260479670966277090e+01,
+	-0.947788391240163743704578131060e+00,
+	-0.314240376254359111276611634095e+00,
+	0.314240376254359111276611634095e+00,
+	0.947788391240163743704578131060e+00,
+	0.159768263515260479670966277090e+01,
+	0.227950708050105990018772856942e+01,
+	0.302063702512088977171067937518e+01,
+	0.388972489786978191927164274724e+01,
+	// points 13
+	-0.410133759617863964117891508007e+01,
+	-0.324660897837240998812205115236e+01,
+	-0.251973568567823788343040913628e+01,
+	-0.185310765160151214200350644316e+01,
+	-0.122005503659074842622205526637e+01,
+	-0.605763879171060113080537108602e+00,
+	0.0e+00,
+	0.605763879171060113080537108602e+00,
+	0.122005503659074842622205526637e+01,
+	0.185310765160151214200350644316e+01,
+	0.251973568567823788343040913628e+01,
+	0.324660897837240998812205115236e+01,
+	0.410133759617863964117891508007e+01,
+	// points 14
+	-0.430444857047363181262129810037e+01,
+	-0.346265693360227055020891736115e+01,
+	-0.274847072498540256862499852415e+01,
+	-0.209518325850771681573497272630e+01,
+	-0.147668273114114087058350654421e+01,
+	-0.878713787329399416114679311861e+00,
+	-0.291745510672562078446113075799e+00,
+	0.291745510672562078446113075799e+00,
+	0.878713787329399416114679311861e+00,
+	0.147668273114114087058350654421e+01,
+	0.209518325850771681573497272630e+01,
+	0.274847072498540256862499852415e+01,
+	0.346265693360227055020891736115e+01,
+	0.430444857047363181262129810037e+01,
+	// points 15
+	-0.449999070730939155366438053053e+01,
+	-0.366995037340445253472922383312e+01,
+	-0.296716692790560324848896036355e+01,
+	-0.232573248617385774545404479449e+01,
+	-0.171999257518648893241583152515e+01,
+	-0.113611558521092066631913490556e+01,
+	-0.565069583255575748526020337198e+00,
+	0.0e+00,
+	0.565069583255575748526020337198e+00,
+	0.113611558521092066631913490556e+01,
+	0.171999257518648893241583152515e+01,
+	0.232573248617385774545404479449e+01,
+	0.296716692790560324848896036355e+01,
+	0.366995037340445253472922383312e+01,
+	0.449999070730939155366438053053e+01,
+	// points 16
+	-0.468873893930581836468849864875e+01,
+	-0.386944790486012269871942409801e+01,
+	-0.317699916197995602681399455926e+01,
+	-0.254620215784748136215932870545e+01,
+	-0.195178799091625397743465541496e+01,
+	-0.138025853919888079637208966969e+01,
+	-0.822951449144655892582454496734e+00,
+	-0.273481046138152452158280401965e+00,
+	0.273481046138152452158280401965e+00,
+	0.822951449144655892582454496734e+00,
+	0.138025853919888079637208966969e+01,
+	0.195178799091625397743465541496e+01,
+	0.254620215784748136215932870545e+01,
+	0.317699916197995602681399455926e+01,
+	0.386944790486012269871942409801e+01,
+	0.468873893930581836468849864875e+01,
+	// points 17
+	-0.487134519367440308834927655662e+01,
+	-0.406194667587547430689245559698e+01,
+	-0.337893209114149408338327069289e+01,
+	-0.275776291570388873092640349574e+01,
+	-0.217350282666662081927537907149e+01,
+	-0.161292431422123133311288254454e+01,
+	-0.106764872574345055363045773799e+01,
+	-0.531633001342654731349086553718e+00,
+	0.0e+00,
+	0.531633001342654731349086553718e+00,
+	0.106764872574345055363045773799e+01,
+	0.161292431422123133311288254454e+01,
+	0.217350282666662081927537907149e+01,
+	0.275776291570388873092640349574e+01,
+	0.337893209114149408338327069289e+01,
+	0.406194667587547430689245559698e+01,
+	0.487134519367440308834927655662e+01,
+	// points 18
+	-0.504836400887446676837203757885e+01,
+	-0.424811787356812646302342016090e+01,
+	-0.357376906848626607950067599377e+01,
+	-0.296137750553160684477863254906e+01,
+	-0.238629908916668600026459301424e+01,
+	-0.183553160426162889225383944409e+01,
+	-0.130092085838961736566626555439e+01,
+	-0.776682919267411661316659462284e+00,
+	-0.258267750519096759258116098711e+00,
+	0.258267750519096759258116098711e+00,
+	0.776682919267411661316659462284e+00,
+	0.130092085838961736566626555439e+01,
+	0.183553160426162889225383944409e+01,
+	0.238629908916668600026459301424e+01,
+	0.296137750553160684477863254906e+01,
+	0.357376906848626607950067599377e+01,
+	0.424811787356812646302342016090e+01,
+	0.504836400887446676837203757885e+01,
+	// points 19
+	-0.522027169053748216460967142500e+01,
+	-0.442853280660377943723498532226e+01,
+	-0.376218735196402009751489394104e+01,
+	-0.315784881834760228184318034120e+01,
+	-0.259113378979454256492128084112e+01,
+	-0.204923170985061937575050838669e+01,
+	-0.152417061939353303183354859367e+01,
+	-0.101036838713431135136859873726e+01,
+	-0.503520163423888209373811765050e+00,
+	0.0e+00,
+	0.503520163423888209373811765050e+00,
+	0.101036838713431135136859873726e+01,
+	0.152417061939353303183354859367e+01,
+	0.204923170985061937575050838669e+01,
+	0.259113378979454256492128084112e+01,
+	0.315784881834760228184318034120e+01,
+	0.376218735196402009751489394104e+01,
+	0.442853280660377943723498532226e+01,
+	0.522027169053748216460967142500e+01,
+	// points 20
+	-0.538748089001123286201690041068e+01,
+	-0.460368244955074427307767524898e+01,
+	-0.394476404011562521037562880052e+01,
+	-0.334785456738321632691492452300e+01,
+	-0.278880605842813048052503375640e+01,
+	-0.225497400208927552308233334473e+01,
+	-0.173853771211658620678086566214e+01,
+	-0.123407621539532300788581834696e+01,
+	-0.737473728545394358705605144252e+00,
+	-0.245340708300901249903836530634e+00,
+	0.245340708300901249903836530634e+00,
+	0.737473728545394358705605144252e+00,
+	0.123407621539532300788581834696e+01,
+	0.173853771211658620678086566214e+01,
+	0.225497400208927552308233334473e+01,
+	0.278880605842813048052503375640e+01,
+	0.334785456738321632691492452300e+01,
+	0.394476404011562521037562880052e+01,
+	0.460368244955074427307767524898e+01,
+	0.538748089001123286201690041068e+01,
+	// points 30
+	-6.86334529352989158106110835756e+00,
+	-6.13827922012393462039499237854e+00,
+	-5.53314715156749572511833355558e+00,
+	-4.98891896858994394448649710633e+00,
+	-4.48305535709251834188703761971e+00,
+	-4.00390860386122881522787601332e+00,
+	-3.54444387315534988692540090217e+00,
+	-3.09997052958644174868873332237e+00,
+	-2.66713212453561720057110646422e+00,
+	-2.24339146776150407247297999483e+00,
+	-1.82674114360368803883588048351e+00,
+	-1.41552780019818851194072510555e+00,
+	-1.00833827104672346180498960870e+00,
+	-0.603921058625552307778155678757e+00,
+	-0.201128576548871485545763013244e+00,
+	0.201128576548871485545763013244e+00,
+	0.603921058625552307778155678757e+00,
+	1.00833827104672346180498960870e+00,
+	1.41552780019818851194072510555e+00,
+	1.82674114360368803883588048351e+00,
+	2.24339146776150407247297999483e+00,
+	2.66713212453561720057110646422e+00,
+	3.09997052958644174868873332237e+00,
+	3.54444387315534988692540090217e+00,
+	4.00390860386122881522787601332e+00,
+	4.48305535709251834188703761971e+00,
+	4.98891896858994394448649710633e+00,
+	5.53314715156749572511833355558e+00,
+	6.13827922012393462039499237854e+00,
+	6.86334529352989158106110835756e+00,
+	// points 32
+	-7.12581390983e+00,
+	-6.40949814927e+00,
+	-5.81222594952e+00,
+	-5.27555098652e+00,
+	-4.77716450350e+00,
+	-4.30554795335e+00,
+	-3.85375548547e+00,
+	-3.41716749282e+00,
+	-2.99249082500e+00,
+	-2.57724953773e+00,
+	-2.16949918361e+00,
+	-1.76765410946e+00,
+	-1.37037641095e+00,
+	-0.976500463590e+00,
+	-0.584978765436e+00,
+	-0.194840741569e+00,
+	0.194840741569e+00,
+	0.584978765436e+00,
+	0.976500463590e+00,
+	1.37037641095e+00,
+	1.76765410946e+00,
+	2.16949918361e+00,
+	2.57724953773e+00,
+	2.99249082500e+00,
+	3.41716749282e+00,
+	3.85375548547e+00,
+	4.30554795335e+00,
+	4.77716450350e+00,
+	5.27555098652e+00,
+	5.81222594952e+00,
+	6.40949814927e+00,
+	7.12581390983e+00,
+	// points 40
+	-8.09876113925e+00,
+	-7.41158253149e+00,
+	-6.84023730525e+00,
+	-6.32825535122e+00,
+	-5.85409505603e+00,
+	-5.40665424797e+00,
+	-4.97926097855e+00,
+	-4.56750207284e+00,
+	-4.16825706683e+00,
+	-3.77920675344e+00,
+	-3.39855826586e+00,
+	-3.02487988390e+00,
+	-2.65699599844e+00,
+	-2.29391714188e+00,
+	-1.93479147228e+00,
+	-1.57886989493e+00,
+	-1.22548010905e+00,
+	-0.874006612357e+00,
+	-0.523874713832e+00,
+	-0.174537214598e+00,
+	0.174537214598e+00,
+	0.523874713832e+00,
+	0.874006612357e+00,
+	1.22548010905e+00,
+	1.57886989493e+00,
+	1.93479147228e+00,
+	2.29391714188e+00,
+	2.65699599844e+00,
+	3.02487988390e+00,
+	3.39855826586e+00,
+	3.77920675344e+00,
+	4.16825706683e+00,
+	4.56750207284e+00,
+	4.97926097855e+00,
+	5.40665424797e+00,
+	5.85409505603e+00,
+	6.32825535122e+00,
+	6.84023730525e+00,
+	7.41158253149e+00,
+	8.09876113925e+00,
+	// points 50
+	-9.18240695813e+00,
+	-8.52277103092e+00,
+	-7.97562236821e+00,
+	-7.48640942986e+00,
+	-7.03432350977e+00,
+	-6.60864797386e+00,
+	-6.20295251927e+00,
+	-5.81299467542e+00,
+	-5.43578608722e+00,
+	-5.06911758492e+00,
+	-4.71129366617e+00,
+	-4.36097316045e+00,
+	-4.01706817286e+00,
+	-3.67867706252e+00,
+	-3.34503831394e+00,
+	-3.01549776957e+00,
+	-2.68948470227e+00,
+	-2.36649390430e+00,
+	-2.04607196869e+00,
+	-1.72780654752e+00,
+	-1.41131775490e+00,
+	-1.09625112896e+00,
+	-0.782271729555e+00,
+	-0.469059056678e+00,
+	-0.156302546889e+00,
+	0.156302546889e+00,
+	0.469059056678e+00,
+	0.782271729555e+00,
+	1.09625112896e+00,
+	1.41131775490e+00,
+	1.72780654752e+00,
+	2.04607196869e+00,
+	2.36649390430e+00,
+	2.68948470227e+00,
+	3.01549776957e+00,
+	3.34503831394e+00,
+	3.67867706252e+00,
+	4.01706817286e+00,
+	4.36097316045e+00,
+	4.71129366617e+00,
+	5.06911758492e+00,
+	5.43578608722e+00,
+	5.81299467542e+00,
+	6.20295251927e+00,
+	6.60864797386e+00,
+	7.03432350977e+00,
+	7.48640942986e+00,
+	7.97562236821e+00,
+	8.52277103092e+00,
+	9.18240695813e+00,
+	// points 60
+	-10.1591092462e+00,
+	-9.52090367701e+00,
+	-8.99239800140e+00,
+	-8.52056928412e+00,
+	-8.08518865425e+00,
+	-7.67583993750e+00,
+	-7.28627659440e+00,
+	-6.91238153219e+00,
+	-6.55125916706e+00,
+	-6.20077355799e+00,
+	-5.85929019639e+00,
+	-5.52552108614e+00,
+	-5.19842653458e+00,
+	-4.87715007747e+00,
+	-4.56097375794e+00,
+	-4.24928643596e+00,
+	-3.94156073393e+00,
+	-3.63733587617e+00,
+	-3.33620465355e+00,
+	-3.03780333823e+00,
+	-2.74180374807e+00,
+	-2.44790690231e+00,
+	-2.15583787123e+00,
+	-1.86534153123e+00,
+	-1.57617901198e+00,
+	-1.28812467487e+00,
+	-1.00096349956e+00,
+	-0.714488781673e+00,
+	-0.428500064221e+00,
+	-0.142801238703e+00,
+	0.142801238703e+00,
+	0.428500064221e+00,
+	0.714488781673e+00,
+	1.00096349956e+00,
+	1.28812467487e+00,
+	1.57617901198e+00,
+	1.86534153123e+00,
+	2.15583787123e+00,
+	2.44790690231e+00,
+	2.74180374807e+00,
+	3.03780333823e+00,
+	3.33620465355e+00,
+	3.63733587617e+00,
+	3.94156073393e+00,
+	4.24928643596e+00,
+	4.56097375794e+00,
+	4.87715007747e+00,
+	5.19842653458e+00,
+	5.52552108614e+00,
+	5.85929019639e+00,
+	6.20077355799e+00,
+	6.55125916706e+00,
+	6.91238153219e+00,
+	7.28627659440e+00,
+	7.67583993750e+00,
+	8.08518865425e+00,
+	8.52056928412e+00,
+	8.99239800140e+00,
+	9.52090367701e+00,
+	10.1591092462e+00,
+	// points 64
+	-10.5261231680e+00,
+	-9.89528758683e+00,
+	-9.37315954965e+00,
+	-8.90724909996e+00,
+	-8.47752908338e+00,
+	-8.07368728501e+00,
+	-7.68954016404e+00,
+	-7.32101303278e+00,
+	-6.96524112055e+00,
+	-6.62011226264e+00,
+	-6.28401122877e+00,
+	-5.95566632680e+00,
+	-5.63405216435e+00,
+	-5.31832522463e+00,
+	-5.00777960220e+00,
+	-4.70181564741e+00,
+	-4.39991716823e+00,
+	-4.10163447457e+00,
+	-3.80657151395e+00,
+	-3.51437593574e+00,
+	-3.22473129199e+00,
+	-2.93735082300e+00,
+	-2.65197243543e+00,
+	-2.36835458863e+00,
+	-2.08627287988e+00,
+	-1.80551717147e+00,
+	-1.52588914021e+00,
+	-1.24720015694e+00,
+	-0.969269423071e+00,
+	-0.691922305810e+00,
+	-0.414988824121e+00,
+	-0.138302244987e+00,
+	0.138302244987e+00,
+	0.414988824121e+00,
+	0.691922305810e+00,
+	0.969269423071e+00,
+	1.24720015694e+00,
+	1.52588914021e+00,
+	1.80551717147e+00,
+	2.08627287988e+00,
+	2.36835458863e+00,
+	2.65197243543e+00,
+	2.93735082300e+00,
+	3.22473129199e+00,
+	3.51437593574e+00,
+	3.80657151395e+00,
+	4.10163447457e+00,
+	4.39991716823e+00,
+	4.70181564741e+00,
+	5.00777960220e+00,
+	5.31832522463e+00,
+	5.63405216435e+00,
+	5.95566632680e+00,
+	6.28401122877e+00,
+	6.62011226264e+00,
+	6.96524112055e+00,
+	7.32101303278e+00,
+	7.68954016404e+00,
+	8.07368728501e+00,
+	8.47752908338e+00,
+	8.90724909996e+00,
+	9.37315954965e+00,
+	9.89528758683e+00,
+	10.5261231680e+00
+};
+
+// Gauss-Legendre quadrature; prefix gl
+
+// number of levels
+static const int gl_num_levels = 22;
+
+// number of points in each level
+static const int gl_num_points[] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+	32, 64
+};
+
+// weights, starting with the first level
+static const double gl_weights[] = {
+	// weight 1
+	2.0e+00,
+	// weights 2
+	1.0e+00,
+	1.0e+00,
+	// weights 3
+	0.555555555555555555555555555555e+00,
+	0.888888888888888888888888888888e+00,
+	0.555555555555555555555555555555e+00,
+	// weights 4
+	0.347854845137453857373063949222e+00,
+	0.652145154862546142626936050778e+00,
+	0.652145154862546142626936050778e+00,
+	0.347854845137453857373063949222e+00,
+	// weights 5
+	0.236926885056189087514264040720e+00,
+	0.478628670499366468041291514836e+00,
+	0.568888888888888888888888888889e+00,
+	0.478628670499366468041291514836e+00,
+	0.236926885056189087514264040720e+00,
+	// weights 6
+	0.171324492379170345040296142173e+00,
+	0.360761573048138607569833513838e+00,
+	0.467913934572691047389870343990e+00,
+	0.467913934572691047389870343990e+00,
+	0.360761573048138607569833513838e+00,
+	0.171324492379170345040296142173e+00,
+	// weights 7
+	0.129484966168869693270611432679e+00,
+	0.279705391489276667901467771424e+00,
+	0.381830050505118944950369775489e+00,
+	0.417959183673469387755102040816e+00,
+	0.381830050505118944950369775489e+00,
+	0.279705391489276667901467771424e+00,
+	0.129484966168869693270611432679e+00,
+	// weights 8
+	0.101228536290376259152531354310e+00,
+	0.222381034453374470544355994426e+00,
+	0.313706645877887287337962201987e+00,
+	0.362683783378361982965150449277e+00,
+	0.362683783378361982965150449277e+00,
+	0.313706645877887287337962201987e+00,
+	0.222381034453374470544355994426e+00,
+	0.101228536290376259152531354310e+00,
+	// weights 9
+	0.812743883615744119718921581105e-01,
+	0.180648160694857404058472031243e+00,
+	0.260610696402935462318742869419e+00,
+	0.312347077040002840068630406584e+00,
+	0.330239355001259763164525069287e+00,
+	0.312347077040002840068630406584e+00,
+	0.260610696402935462318742869419e+00,
+	0.180648160694857404058472031243e+00,
+	0.812743883615744119718921581105e-01,
+	// weights 10
+	0.666713443086881375935688098933e-01,
+	0.149451349150580593145776339658e+00,
+	0.219086362515982043995534934228e+00,
+	0.269266719309996355091226921569e+00,
+	0.295524224714752870173892994651e+00,
+	0.295524224714752870173892994651e+00,
+	0.269266719309996355091226921569e+00,
+	0.219086362515982043995534934228e+00,
+	0.149451349150580593145776339658e+00,
+	0.666713443086881375935688098933e-01,
+	// weights 11
+	0.556685671161736664827537204425e-01,
+	0.125580369464904624634694299224e+00,
+	0.186290210927734251426097641432e+00,
+	0.233193764591990479918523704843e+00,
+	0.262804544510246662180688869891e+00,
+	0.272925086777900630714483528336e+00,
+	0.262804544510246662180688869891e+00,
+	0.233193764591990479918523704843e+00,
+	0.186290210927734251426097641432e+00,
+	0.125580369464904624634694299224e+00,
+	0.556685671161736664827537204425e-01,
+	// weights 12
+	0.471753363865118271946159614850e-01,
+	0.106939325995318430960254718194e+00,
+	0.160078328543346226334652529543e+00,
+	0.203167426723065921749064455810e+00,
+	0.233492536538354808760849898925e+00,
+	0.249147045813402785000562436043e+00,
+	0.249147045813402785000562436043e+00,
+	0.233492536538354808760849898925e+00,
+	0.203167426723065921749064455810e+00,
+	0.160078328543346226334652529543e+00,
+	0.106939325995318430960254718194e+00,
+	0.471753363865118271946159614850e-01,
+	// weights 13
+	0.404840047653158795200215922010e-01,
+	0.921214998377284479144217759538e-01,
+	0.138873510219787238463601776869e+00,
+	0.178145980761945738280046691996e+00,
+	0.207816047536888502312523219306e+00,
+	0.226283180262897238412090186040e+00,
+	0.232551553230873910194589515269e+00,
+	0.226283180262897238412090186040e+00,
+	0.207816047536888502312523219306e+00,
+	0.178145980761945738280046691996e+00,
+	0.138873510219787238463601776869e+00,
+	0.921214998377284479144217759538e-01,
+	0.404840047653158795200215922010e-01,
+	// weights 14
+	0.351194603317518630318328761382e-01,
+	0.801580871597602098056332770629e-01,
+	0.121518570687903184689414809072e+00,
+	0.157203167158193534569601938624e+00,
+	0.185538397477937813741716590125e+00,
+	0.205198463721295603965924065661e+00,
+	0.215263853463157790195876443316e+00,
+	0.215263853463157790195876443316e+00,
+	0.205198463721295603965924065661e+00,
+	0.185538397477937813741716590125e+00,
+	0.157203167158193534569601938624e+00,
+	0.121518570687903184689414809072e+00,
+	0.801580871597602098056332770629e-01,
+	0.351194603317518630318328761382e-01,
+	// weights 15
+	0.307532419961172683546283935772e-01,
+	0.703660474881081247092674164507e-01,
+	0.107159220467171935011869546686e+00,
+	0.139570677926154314447804794511e+00,
+	0.166269205816993933553200860481e+00,
+	0.186161000015562211026800561866e+00,
+	0.198431485327111576456118326444e+00,
+	0.202578241925561272880620199968e+00,
+	0.198431485327111576456118326444e+00,
+	0.186161000015562211026800561866e+00,
+	0.166269205816993933553200860481e+00,
+	0.139570677926154314447804794511e+00,
+	0.107159220467171935011869546686e+00,
+	0.703660474881081247092674164507e-01,
+	0.307532419961172683546283935772e-01,
+	// weights 16
+	0.271524594117540948517805724560e-01,
+	0.622535239386478928628438369944e-01,
+	0.951585116824927848099251076022e-01,
+	0.124628971255533872052476282192e+00,
+	0.149595988816576732081501730547e+00,
+	0.169156519395002538189312079030e+00,
+	0.182603415044923588866763667969e+00,
+	0.189450610455068496285396723208e+00,
+	0.189450610455068496285396723208e+00,
+	0.182603415044923588866763667969e+00,
+	0.169156519395002538189312079030e+00,
+	0.149595988816576732081501730547e+00,
+	0.124628971255533872052476282192e+00,
+	0.951585116824927848099251076022e-01,
+	0.622535239386478928628438369944e-01,
+	0.271524594117540948517805724560e-01,
+	// weights 17
+	0.241483028685479319601100262876e-01,
+	0.554595293739872011294401653582e-01,
+	0.850361483171791808835353701911e-01,
+	0.111883847193403971094788385626e+00,
+	0.135136368468525473286319981702e+00,
+	0.154045761076810288081431594802e+00,
+	0.168004102156450044509970663788e+00,
+	0.176562705366992646325270990113e+00,
+	0.179446470356206525458265644262e+00,
+	0.176562705366992646325270990113e+00,
+	0.168004102156450044509970663788e+00,
+	0.154045761076810288081431594802e+00,
+	0.135136368468525473286319981702e+00,
+	0.111883847193403971094788385626e+00,
+	0.850361483171791808835353701911e-01,
+	0.554595293739872011294401653582e-01,
+	0.241483028685479319601100262876e-01,
+	// weights 18
+	0.216160135264833103133427102665e-01,
+	0.497145488949697964533349462026e-01,
+	0.764257302548890565291296776166e-01,
+	0.100942044106287165562813984925e+00,
+	0.122555206711478460184519126800e+00,
+	0.140642914670650651204731303752e+00,
+	0.154684675126265244925418003836e+00,
+	0.164276483745832722986053776466e+00,
+	0.169142382963143591840656470135e+00,
+	0.169142382963143591840656470135e+00,
+	0.164276483745832722986053776466e+00,
+	0.154684675126265244925418003836e+00,
+	0.140642914670650651204731303752e+00,
+	0.122555206711478460184519126800e+00,
+	0.100942044106287165562813984925e+00,
+	0.764257302548890565291296776166e-01,
+	0.497145488949697964533349462026e-01,
+	0.216160135264833103133427102665e-01,
+	// weights 19
+	0.194617882297264770363120414644e-01,
+	0.448142267656996003328381574020e-01,
+	0.690445427376412265807082580060e-01,
+	0.914900216224499994644620941238e-01,
+	0.111566645547333994716023901682e+00,
+	0.128753962539336227675515784857e+00,
+	0.142606702173606611775746109442e+00,
+	0.152766042065859666778855400898e+00,
+	0.158968843393954347649956439465e+00,
+	0.161054449848783695979163625321e+00,
+	0.158968843393954347649956439465e+00,
+	0.152766042065859666778855400898e+00,
+	0.142606702173606611775746109442e+00,
+	0.128753962539336227675515784857e+00,
+	0.111566645547333994716023901682e+00,
+	0.914900216224499994644620941238e-01,
+	0.690445427376412265807082580060e-01,
+	0.448142267656996003328381574020e-01,
+	0.194617882297264770363120414644e-01,
+	// weights 20
+	0.176140071391521183118619623519e-01,
+	0.406014298003869413310399522749e-01,
+	0.626720483341090635695065351870e-01,
+	0.832767415767047487247581432220e-01,
+	0.101930119817240435036750135480e+00,
+	0.118194531961518417312377377711e+00,
+	0.131688638449176626898494499748e+00,
+	0.142096109318382051329298325067e+00,
+	0.149172986472603746787828737002e+00,
+	0.152753387130725850698084331955e+00,
+	0.152753387130725850698084331955e+00,
+	0.149172986472603746787828737002e+00,
+	0.142096109318382051329298325067e+00,
+	0.131688638449176626898494499748e+00,
+	0.118194531961518417312377377711e+00,
+	0.101930119817240435036750135480e+00,
+	0.832767415767047487247581432220e-01,
+	0.626720483341090635695065351870e-01,
+	0.406014298003869413310399522749e-01,
+	0.176140071391521183118619623519e-01,
+	// weights 32
+	0.701861000947009660040706373885e-02,
+	0.162743947309056706051705622064e-01,
+	0.253920653092620594557525897892e-01,
+	0.342738629130214331026877322524e-01,
+	0.428358980222266806568786466061e-01,
+	0.509980592623761761961632446895e-01,
+	0.586840934785355471452836373002e-01,
+	0.658222227763618468376500637069e-01,
+	0.723457941088485062253993564785e-01,
+	0.781938957870703064717409188283e-01,
+	0.833119242269467552221990746043e-01,
+	0.876520930044038111427714627518e-01,
+	0.911738786957638847128685771116e-01,
+	0.938443990808045656391802376681e-01,
+	0.956387200792748594190820022041e-01,
+	0.965400885147278005667648300636e-01,
+	0.965400885147278005667648300636e-01,
+	0.956387200792748594190820022041e-01,
+	0.938443990808045656391802376681e-01,
+	0.911738786957638847128685771116e-01,
+	0.876520930044038111427714627518e-01,
+	0.833119242269467552221990746043e-01,
+	0.781938957870703064717409188283e-01,
+	0.723457941088485062253993564785e-01,
+	0.658222227763618468376500637069e-01,
+	0.586840934785355471452836373002e-01,
+	0.509980592623761761961632446895e-01,
+	0.428358980222266806568786466061e-01,
+	0.342738629130214331026877322524e-01,
+	0.253920653092620594557525897892e-01,
+	0.162743947309056706051705622064e-01,
+	0.701861000947009660040706373885e-02,
+	// weights 64
+	0.178328072169643294729607914497e-02,
+	0.414703326056246763528753572855e-02,
+	0.650445796897836285611736039998e-02,
+	0.884675982636394772303091465973e-02,
+	0.111681394601311288185904930192e-01,
+	0.134630478967186425980607666860e-01,
+	0.157260304760247193219659952975e-01,
+	0.179517157756973430850453020011e-01,
+	0.201348231535302093723403167285e-01,
+	0.222701738083832541592983303842e-01,
+	0.243527025687108733381775504091e-01,
+	0.263774697150546586716917926252e-01,
+	0.283396726142594832275113052002e-01,
+	0.302346570724024788679740598195e-01,
+	0.320579283548515535854675043479e-01,
+	0.338051618371416093915654821107e-01,
+	0.354722132568823838106931467152e-01,
+	0.370551285402400460404151018096e-01,
+	0.385501531786156291289624969468e-01,
+	0.399537411327203413866569261283e-01,
+	0.412625632426235286101562974736e-01,
+	0.424735151236535890073397679088e-01,
+	0.435837245293234533768278609737e-01,
+	0.445905581637565630601347100309e-01,
+	0.454916279274181444797709969713e-01,
+	0.462847965813144172959532492323e-01,
+	0.469681828162100173253262857546e-01,
+	0.475401657148303086622822069442e-01,
+	0.479993885964583077281261798713e-01,
+	0.483447622348029571697695271580e-01,
+	0.485754674415034269347990667840e-01,
+	0.486909570091397203833653907347e-01,
+	0.486909570091397203833653907347e-01,
+	0.485754674415034269347990667840e-01,
+	0.483447622348029571697695271580e-01,
+	0.479993885964583077281261798713e-01,
+	0.475401657148303086622822069442e-01,
+	0.469681828162100173253262857546e-01,
+	0.462847965813144172959532492323e-01,
+	0.454916279274181444797709969713e-01,
+	0.445905581637565630601347100309e-01,
+	0.435837245293234533768278609737e-01,
+	0.424735151236535890073397679088e-01,
+	0.412625632426235286101562974736e-01,
+	0.399537411327203413866569261283e-01,
+	0.385501531786156291289624969468e-01,
+	0.370551285402400460404151018096e-01,
+	0.354722132568823838106931467152e-01,
+	0.338051618371416093915654821107e-01,
+	0.320579283548515535854675043479e-01,
+	0.302346570724024788679740598195e-01,
+	0.283396726142594832275113052002e-01,
+	0.263774697150546586716917926252e-01,
+	0.243527025687108733381775504091e-01,
+	0.222701738083832541592983303842e-01,
+	0.201348231535302093723403167285e-01,
+	0.179517157756973430850453020011e-01,
+	0.157260304760247193219659952975e-01,
+	0.134630478967186425980607666860e-01,
+	0.111681394601311288185904930192e-01,
+	0.884675982636394772303091465973e-02,
+	0.650445796897836285611736039998e-02,
+	0.414703326056246763528753572855e-02,
+	0.178328072169643294729607914497e-02
+};
+
+// points, starting with the first level
+static const double gl_points[] = {
+	// points 1
+	0.0e+00,
+	// points 2
+	-0.577350269189625764509148780502e+00,
+	0.577350269189625764509148780502e+00,
+	// points 3
+	-0.774596669241483377035853079956e+00,
+	0.0e+00,
+	0.774596669241483377035853079956e+00,
+	// points 4
+	-0.861136311594052575223946488893e+00,
+	-0.339981043584856264802665759103e+00,
+	0.339981043584856264802665759103e+00,
+	0.861136311594052575223946488893e+00,
+	// points 5
+	-0.906179845938663992797626878299e+00,
+	-0.538469310105683091036314420700e+00,
+	0.0e+00,
+	0.538469310105683091036314420700e+00,
+	0.906179845938663992797626878299e+00,
+	// points 6
+	-0.932469514203152027812301554494e+00,
+	-0.661209386466264513661399595020e+00,
+	-0.238619186083196908630501721681e+00,
+	0.238619186083196908630501721681e+00,
+	0.661209386466264513661399595020e+00,
+	0.932469514203152027812301554494e+00,
+	// points 7
+	-0.949107912342758524526189684048e+00,
+	-0.741531185599394439863864773281e+00,
+	-0.405845151377397166906606412077e+00,
+	0.0e+00,
+	0.405845151377397166906606412077e+00,
+	0.741531185599394439863864773281e+00,
+	0.949107912342758524526189684048e+00,
+	// points 8
+	-0.960289856497536231683560868569e+00,
+	-0.796666477413626739591553936476e+00,
+	-0.525532409916328985817739049189e+00,
+	-0.183434642495649804939476142360e+00,
+	0.183434642495649804939476142360e+00,
+	0.525532409916328985817739049189e+00,
+	0.796666477413626739591553936476e+00,
+	0.960289856497536231683560868569e+00,
+	// points 9
+	-0.968160239507626089835576202904e+00,
+	-0.836031107326635794299429788070e+00,
+	-0.613371432700590397308702039341e+00,
+	-0.324253423403808929038538014643e+00,
+	0.0e+00,
+	0.324253423403808929038538014643e+00,
+	0.613371432700590397308702039341e+00,
+	0.836031107326635794299429788070e+00,
+	0.968160239507626089835576202904e+00,
+	// points 10
+	-0.973906528517171720077964012084e+00,
+	-0.865063366688984510732096688423e+00,
+	-0.679409568299024406234327365115e+00,
+	-0.433395394129247190799265943166e+00,
+	-0.148874338981631210884826001130e+00,
+	0.148874338981631210884826001130e+00,
+	0.433395394129247190799265943166e+00,
+	0.679409568299024406234327365115e+00,
+	0.865063366688984510732096688423e+00,
+	0.973906528517171720077964012084e+00,
+	// points 11
+	-0.978228658146056992803938001123e+00,
+	-0.887062599768095299075157769304e+00,
+	-0.730152005574049324093416252031e+00,
+	-0.519096129206811815925725669459e+00,
+	-0.269543155952344972331531985401e+00,
+	0.0e+00,
+	0.269543155952344972331531985401e+00,
+	0.519096129206811815925725669459e+00,
+	0.730152005574049324093416252031e+00,
+	0.887062599768095299075157769304e+00,
+	0.978228658146056992803938001123e+00,
+	// points 12
+	-0.981560634246719250690549090149e+00,
+	-0.904117256370474856678465866119e+00,
+	-0.769902674194304687036893833213e+00,
+	-0.587317954286617447296702418941e+00,
+	-0.367831498998180193752691536644e+00,
+	-0.125233408511468915472441369464e+00,
+	0.125233408511468915472441369464e+00,
+	0.367831498998180193752691536644e+00,
+	0.587317954286617447296702418941e+00,
+	0.769902674194304687036893833213e+00,
+	0.904117256370474856678465866119e+00,
+	0.981560634246719250690549090149e+00,
+	// points 13
+	-0.984183054718588149472829448807e+00,
+	-0.917598399222977965206547836501e+00,
+	-0.801578090733309912794206489583e+00,
+	-0.642349339440340220643984606996e+00,
+	-0.448492751036446852877912852128e+00,
+	-0.230458315955134794065528121098e+00,
+	0.0e+00,
+	0.230458315955134794065528121098e+00,
+	0.448492751036446852877912852128e+00,
+	0.642349339440340220643984606996e+00,
+	0.801578090733309912794206489583e+00,
+	0.917598399222977965206547836501e+00,
+	0.984183054718588149472829448807e+00,
+	// points 14
+	-0.986283808696812338841597266704e+00,
+	-0.928434883663573517336391139378e+00,
+	-0.827201315069764993189794742650e+00,
+	-0.687292904811685470148019803019e+00,
+	-0.515248636358154091965290718551e+00,
+	-0.319112368927889760435671824168e+00,
+	-0.108054948707343662066244650220e+00,
+	0.108054948707343662066244650220e+00,
+	0.319112368927889760435671824168e+00,
+	0.515248636358154091965290718551e+00,
+	0.687292904811685470148019803019e+00,
+	0.827201315069764993189794742650e+00,
+	0.928434883663573517336391139378e+00,
+	0.986283808696812338841597266704e+00,
+	// points 15
+	-0.987992518020485428489565718587e+00,
+	-0.937273392400705904307758947710e+00,
+	-0.848206583410427216200648320774e+00,
+	-0.724417731360170047416186054614e+00,
+	-0.570972172608538847537226737254e+00,
+	-0.394151347077563369897207370981e+00,
+	-0.201194093997434522300628303395e+00,
+	0.0e+00,
+	0.201194093997434522300628303395e+00,
+	0.394151347077563369897207370981e+00,
+	0.570972172608538847537226737254e+00,
+	0.724417731360170047416186054614e+00,
+	0.848206583410427216200648320774e+00,
+	0.937273392400705904307758947710e+00,
+	0.987992518020485428489565718587e+00,
+	// points 16
+	-0.989400934991649932596154173450e+00,
+	-0.944575023073232576077988415535e+00,
+	-0.865631202387831743880467897712e+00,
+	-0.755404408355003033895101194847e+00,
+	-0.617876244402643748446671764049e+00,
+	-0.458016777657227386342419442984e+00,
+	-0.281603550779258913230460501460e+00,
+	-0.950125098376374401853193354250e-01,
+	0.950125098376374401853193354250e-01,
+	0.281603550779258913230460501460e+00,
+	0.458016777657227386342419442984e+00,
+	0.617876244402643748446671764049e+00,
+	0.755404408355003033895101194847e+00,
+	0.865631202387831743880467897712e+00,
+	0.944575023073232576077988415535e+00,
+	0.989400934991649932596154173450e+00,
+	// points 17
+	-0.990575475314417335675434019941e+00,
+	-0.950675521768767761222716957896e+00,
+	-0.880239153726985902122955694488e+00,
+	-0.781514003896801406925230055520e+00,
+	-0.657671159216690765850302216643e+00,
+	-0.512690537086476967886246568630e+00,
+	-0.351231763453876315297185517095e+00,
+	-0.178484181495847855850677493654e+00,
+	0.0e+00,
+	0.178484181495847855850677493654e+00,
+	0.351231763453876315297185517095e+00,
+	0.512690537086476967886246568630e+00,
+	0.657671159216690765850302216643e+00,
+	0.781514003896801406925230055520e+00,
+	0.880239153726985902122955694488e+00,
+	0.950675521768767761222716957896e+00,
+	0.990575475314417335675434019941e+00,
+	// points 18
+	-0.991565168420930946730016004706e+00,
+	-0.955823949571397755181195892930e+00,
+	-0.892602466497555739206060591127e+00,
+	-0.803704958972523115682417455015e+00,
+	-0.691687043060353207874891081289e+00,
+	-0.559770831073947534607871548525e+00,
+	-0.411751161462842646035931793833e+00,
+	-0.251886225691505509588972854878e+00,
+	-0.847750130417353012422618529358e-01,
+	0.847750130417353012422618529358e-01,
+	0.251886225691505509588972854878e+00,
+	0.411751161462842646035931793833e+00,
+	0.559770831073947534607871548525e+00,
+	0.691687043060353207874891081289e+00,
+	0.803704958972523115682417455015e+00,
+	0.892602466497555739206060591127e+00,
+	0.955823949571397755181195892930e+00,
+	0.991565168420930946730016004706e+00,
+	// points 19
+	-0.992406843843584403189017670253e+00,
+	-0.960208152134830030852778840688e+00,
+	-0.903155903614817901642660928532e+00,
+	-0.822714656537142824978922486713e+00,
+	-0.720966177335229378617095860824e+00,
+	-0.600545304661681023469638164946e+00,
+	-0.464570741375960945717267148104e+00,
+	-0.316564099963629831990117328850e+00,
+	-0.160358645640225375868096115741e+00,
+	0.0e+00,
+	0.160358645640225375868096115741e+00,
+	0.316564099963629831990117328850e+00,
+	0.464570741375960945717267148104e+00,
+	0.600545304661681023469638164946e+00,
+	0.720966177335229378617095860824e+00,
+	0.822714656537142824978922486713e+00,
+	0.903155903614817901642660928532e+00,
+	0.960208152134830030852778840688e+00,
+	0.992406843843584403189017670253e+00,
+	// points 20
+	-0.993128599185094924786122388471e+00,
+	-0.963971927277913791267666131197e+00,
+	-0.912234428251325905867752441203e+00,
+	-0.839116971822218823394529061702e+00,
+	-0.746331906460150792614305070356e+00,
+	-0.636053680726515025452836696226e+00,
+	-0.510867001950827098004364050955e+00,
+	-0.373706088715419560672548177025e+00,
+	-0.227785851141645078080496195369e+00,
+	-0.765265211334973337546404093988e-01,
+	0.765265211334973337546404093988e-01,
+	0.227785851141645078080496195369e+00,
+	0.373706088715419560672548177025e+00,
+	0.510867001950827098004364050955e+00,
+	0.636053680726515025452836696226e+00,
+	0.746331906460150792614305070356e+00,
+	0.839116971822218823394529061702e+00,
+	0.912234428251325905867752441203e+00,
+	0.963971927277913791267666131197e+00,
+	0.993128599185094924786122388471e+00,
+	// points 32
+	-0.997263861849481563544981128665e+00,
+	-0.985611511545268335400175044631e+00,
+	-0.964762255587506430773811928118e+00,
+	-0.934906075937739689170919134835e+00,
+	-0.896321155766052123965307243719e+00,
+	-0.849367613732569970133693004968e+00,
+	-0.794483795967942406963097298970e+00,
+	-0.732182118740289680387426665091e+00,
+	-0.663044266930215200975115168663e+00,
+	-0.587715757240762329040745476402e+00,
+	-0.506899908932229390023747474378e+00,
+	-0.421351276130635345364119436172e+00,
+	-0.331868602282127649779916805730e+00,
+	-0.239287362252137074544603209166e+00,
+	-0.144471961582796493485186373599e+00,
+	-0.483076656877383162348125704405e-01,
+	0.483076656877383162348125704405e-01,
+	0.144471961582796493485186373599e+00,
+	0.239287362252137074544603209166e+00,
+	0.331868602282127649779916805730e+00,
+	0.421351276130635345364119436172e+00,
+	0.506899908932229390023747474378e+00,
+	0.587715757240762329040745476402e+00,
+	0.663044266930215200975115168663e+00,
+	0.732182118740289680387426665091e+00,
+	0.794483795967942406963097298970e+00,
+	0.849367613732569970133693004968e+00,
+	0.896321155766052123965307243719e+00,
+	0.934906075937739689170919134835e+00,
+	0.964762255587506430773811928118e+00,
+	0.985611511545268335400175044631e+00,
+	0.997263861849481563544981128665e+00,
+	// points 64
+	-0.999305041735772139456905624346e+00,
+	-0.996340116771955279346924500676e+00,
+	-0.991013371476744320739382383443e+00,
+	-0.983336253884625956931299302157e+00,
+	-0.973326827789910963741853507352e+00,
+	-0.961008799652053718918614121897e+00,
+	-0.946411374858402816062481491347e+00,
+	-0.929569172131939575821490154559e+00,
+	-0.910522137078502805756380668008e+00,
+	-0.889315445995114105853404038273e+00,
+	-0.865999398154092819760783385070e+00,
+	-0.840629296252580362751691544696e+00,
+	-0.813265315122797559741923338086e+00,
+	-0.783972358943341407610220525214e+00,
+	-0.752819907260531896611863774886e+00,
+	-0.719881850171610826848940217832e+00,
+	-0.685236313054233242563558371031e+00,
+	-0.648965471254657339857761231993e+00,
+	-0.611155355172393250248852971019e+00,
+	-0.571895646202634034283878116659e+00,
+	-0.531279464019894545658013903544e+00,
+	-0.489403145707052957478526307022e+00,
+	-0.446366017253464087984947714759e+00,
+	-0.402270157963991603695766771260e+00,
+	-0.357220158337668115950442615046e+00,
+	-0.311322871990210956157512698560e+00,
+	-0.264687162208767416373964172510e+00,
+	-0.217423643740007084149648748989e+00,
+	-0.169644420423992818037313629748e+00,
+	-0.121462819296120554470376463492e+00,
+	-0.729931217877990394495429419403e-01,
+	-0.243502926634244325089558428537e-01,
+	0.243502926634244325089558428537e-01,
+	0.729931217877990394495429419403e-01,
+	0.121462819296120554470376463492e+00,
+	0.169644420423992818037313629748e+00,
+	0.217423643740007084149648748989e+00,
+	0.264687162208767416373964172510e+00,
+	0.311322871990210956157512698560e+00,
+	0.357220158337668115950442615046e+00,
+	0.402270157963991603695766771260e+00,
+	0.446366017253464087984947714759e+00,
+	0.489403145707052957478526307022e+00,
+	0.531279464019894545658013903544e+00,
+	0.571895646202634034283878116659e+00,
+	0.611155355172393250248852971019e+00,
+	0.648965471254657339857761231993e+00,
+	0.685236313054233242563558371031e+00,
+	0.719881850171610826848940217832e+00,
+	0.752819907260531896611863774886e+00,
+	0.783972358943341407610220525214e+00,
+	0.813265315122797559741923338086e+00,
+	0.840629296252580362751691544696e+00,
+	0.865999398154092819760783385070e+00,
+	0.889315445995114105853404038273e+00,
+	0.910522137078502805756380668008e+00,
+	0.929569172131939575821490154559e+00,
+	0.946411374858402816062481491347e+00,
+	0.961008799652053718918614121897e+00,
+	0.973326827789910963741853507352e+00,
+	0.983336253884625956931299302157e+00,
+	0.991013371476744320739382383443e+00,
+	0.996340116771955279346924500676e+00,
+	0.999305041735772139456905624346e+00
+};
+
+// this is the positive half of normal inverse cum distribution
+// function starting at 0.5 and ending at 0.998, with step 0.002
+static const int normal_icdf_num = 250;
+static const double normal_icdf_end = 0.998;
+static const double normal_icdf_step = 0.002;
+static const double normal_icdf_data[] = {
+                         0,     5.013277548926632e-03,     1.002668110027482e-02,
+     1.504033667863573e-02,     2.005437035295075e-02,     2.506890825871118e-02,
+     3.008407662018906e-02,     3.510000177270896e-02,     4.011681018496811e-02,
+     4.513462848142118e-02,     5.015358346473358e-02,     5.517380213831685e-02,
+     6.019541172895673e-02,     6.521853970954372e-02,     7.024331382191684e-02,
+     7.526986209982979e-02,     8.029831289205518e-02,     8.532879488562921e-02,
+     9.036143712925872e-02,     9.539636905689193e-02,     1.004337205114700e-01,
+     1.054736217688682e-01,     1.105162035620419e-01,     1.155615971053833e-01,
+     1.206099341193073e-01,     1.256613468550742e-01,     1.307159681198632e-01,
+     1.357739313021116e-01,     1.408353703971274e-01,     1.459004200329941e-01,
+     1.509692154967774e-01,     1.560418927610502e-01,     1.611185885107454e-01,
+     1.661994401703590e-01,     1.712845859315068e-01,     1.763741647808615e-01,
+     1.814683165284770e-01,     1.865671818365194e-01,     1.916709022484199e-01,
+     1.967796202184666e-01,     2.018934791418509e-01,     2.070126233851871e-01,
+     2.121371983175242e-01,     2.172673503418634e-01,     2.224032269272064e-01,
+     2.275449766411493e-01,     2.326927491830447e-01,     2.378466954177492e-01,
+     2.430069674099821e-01,     2.481737184593126e-01,     2.533471031357997e-01,
+     2.585272773163098e-01,     2.637143982215299e-01,     2.689086244537098e-01,
+     2.741101160351471e-01,     2.793190344474543e-01,     2.845355426716215e-01,
+     2.897598052289143e-01,     2.949919882226262e-01,     3.002322593807220e-01,
+     3.054807880993972e-01,     3.107377454875922e-01,     3.160033044124830e-01,
+     3.212776395459965e-01,     3.265609274123727e-01,     3.318533464368166e-01,
+     3.371550769952773e-01,     3.424663014653906e-01,     3.477872042786273e-01,
+     3.531179719736894e-01,     3.584587932511938e-01,     3.638098590296960e-01,
+     3.691713625030897e-01,     3.745434991994428e-01,     3.799264670413076e-01,
+     3.853204664075677e-01,     3.907257001968699e-01,     3.961423738926983e-01,
+     4.015706956301487e-01,     4.070108762644656e-01,     4.124631294414047e-01,
+     4.179276716694820e-01,     4.234047223941831e-01,     4.288945040742017e-01,
+     4.343972422597815e-01,     4.399131656732339e-01,     4.454425062917200e-01,
+     4.509854994323708e-01,     4.565423838398405e-01,     4.621134017763774e-01,
+     4.676987991145082e-01,     4.732988254324370e-01,     4.789137341122557e-01,
+     4.845437824410792e-01,     4.901892317152095e-01,     4.958503473474533e-01,
+     5.015273989777081e-01,     5.072206605869456e-01,     5.129304106147284e-01,
+     5.186569320803909e-01,     5.244005127080407e-01,     5.301614450555191e-01,
+     5.359400266474903e-01,     5.417365601128169e-01,     5.475513533264015e-01,
+     5.533847195556728e-01,     5.592369776119069e-01,     5.651084520065839e-01,
+     5.709994731129874e-01,     5.769103773332714e-01,     5.828415072712163e-01,
+     5.887932119109195e-01,     5.947658468016782e-01,     6.007597742493188e-01,
+     6.067753635142652e-01,     6.128129910166273e-01,     6.188730405486286e-01,
+     6.249559034946875e-01,     6.310619790594989e-01,     6.371916745044747e-01,
+     6.433454053929173e-01,     6.495235958443252e-01,     6.557266787982537e-01,
+     6.619550962881621e-01,     6.682092997257233e-01,     6.744897501960819e-01,
+     6.807969187645747e-01,     6.871312867954694e-01,     6.934933462832894e-01,
+     6.998836001973414e-01,     7.063025628400875e-01,     7.127507602200432e-01,
+     7.192287304399239e-01,     7.257370241008051e-01,     7.322762047230997e-01,
+     7.388468491852137e-01,     7.454495481807891e-01,     7.520849066954916e-01,
+     7.587535445043710e-01,     7.654560966908778e-01,     7.721932141886847e-01,
+     7.789655643475453e-01,     7.857738315244843e-01,     7.926187177017122e-01,
+     7.995009431327367e-01,     8.064212470182405e-01,     8.133803882134047e-01,
+     8.203791459684610e-01,     8.274183207043821e-01,     8.344987348257406e-01,
+     8.416212335729145e-01,     8.487866859159668e-01,     8.559959854926823e-01,
+     8.632500515934207e-01,     8.705498301956541e-01,     8.778962950512290e-01,
+     8.852904488296417e-01,     8.927333243208563e-01,     9.002259857014339e-01,
+     9.077695298680560e-01,     9.153650878428145e-01,     9.230138262549803e-01,
+     9.307169489043392e-01,     9.384756984115684e-01,     9.462913579615760e-01,
+     9.541652531461944e-01,     9.620987539131418e-01,     9.700932766287370e-01,
+     9.781502862624715e-01,     9.862712987022384e-01,     9.944578832097529e-01,
+     1.002711665026549e+00,     1.011034328141817e+00,     1.019427618234370e+00,
+     1.027893345802143e+00,     1.036433389493790e+00,     1.045049699658389e+00,
+     1.053744302130666e+00,     1.062519302270867e+00,     1.071376889280213e+00,
+     1.080319340814956e+00,     1.089349027924277e+00,     1.098468420339863e+00,
+     1.107680092147800e+00,     1.116986727876610e+00,     1.126391129038801e+00,
+     1.135896221167312e+00,     1.145505061392697e+00,     1.155220846611952e+00,
+     1.165046922305602e+00,     1.174986792066090e+00,     1.185044127907810e+00,
+     1.195222781437427e+00,     1.205526795972518e+00,     1.215960419707319e+00,
+     1.226528120036610e+00,     1.237234599162827e+00,     1.248084811127547e+00,
+     1.259083980427072e+00,     1.270237622393149e+00,     1.281551565544601e+00,
+     1.293031976144243e+00,     1.304685385228790e+00,     1.316518718418261e+00,
+     1.328539328856810e+00,     1.340755033690217e+00,     1.353174154548003e+00,
+     1.365805562572272e+00,     1.378658728623277e+00,     1.391743779396326e+00,
+     1.405071560309632e+00,     1.418653706172739e+00,     1.432502720825812e+00,
+     1.446632067158978e+00,     1.461056269186906e+00,     1.475791028179170e+00,
+     1.490853355246661e+00,     1.506261723278244e+00,     1.522036241735856e+00,
+     1.538198858584064e+00,     1.554773594596853e+00,     1.571786816509860e+00,
+     1.589267557051392e+00,     1.607247891900218e+00,     1.625763386233235e+00,
+     1.644853626951473e+00,     1.664562861202721e+00,     1.684940767871913e+00,
+     1.706043396888962e+00,     1.727934322388419e+00,     1.750686071252170e+00,
+     1.774381910344958e+00,     1.799118106837967e+00,     1.825006821146403e+00,
+     1.852179858769047e+00,     1.880793608151250e+00,     1.911035647549119e+00,
+     1.943133751105067e+00,     1.977368428181947e+00,     2.014090812018140e+00,
+     2.053748910631823e+00,     2.096927429164343e+00,     2.144410620911840e+00,
+     2.197286376641053e+00,     2.257129244486226e+00,     2.326347874040842e+00,
+     2.408915545815460e+00,     2.512144327930459e+00,     2.652069807902199e+00,
+     2.878161739095476e+00
+};
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/integ/cc/product.cweb b/dynare++/integ/cc/product.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..39a6e846bf0f95e40ab4de8fa32c1dc88c5365ca
--- /dev/null
+++ b/dynare++/integ/cc/product.cweb
@@ -0,0 +1,213 @@
+@q $Id: product.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ This is {\tt product.cpp} file.
+
+@c
+#include "product.h"
+#include "symmetry.h"
+
+@<|prodpit| empty constructor@>;
+@<|prodpit| regular constructor@>;
+@<|prodpit| copy constructor@>;
+@<|prodpit| destructor@>;
+@<|prodpit::operator==| code@>;
+@<|prodpit::operator=| code@>;
+@<|prodpit::operator++| code@>;
+@<|prodpit::setPointAndWeight| code@>;
+@<|prodpit::print| code@>;
+@<|ProductQuadrature| constructor@>;
+@<|ProductQuadrature::begin| code@>;
+@<|ProductQuadrature::designLevelForEvals| code@>;
+
+@ 
+@<|prodpit| empty constructor@>=
+prodpit::prodpit()
+	: prodq(NULL), level(0), npoints(0), jseq(NULL),
+	  end_flag(true), sig(NULL), p(NULL)
+{
+}
+
+@ This constructs a product iterator corresponding to index $(j0,0\ldots,0)$.
+@<|prodpit| regular constructor@>=
+prodpit::prodpit(const ProductQuadrature& q, int j0, int l)
+	: prodq(&q), level(l), npoints(q.uquad.numPoints(l)), jseq(new IntSequence(q.dimen(), 0)),
+	  end_flag(false), sig(new ParameterSignal(q.dimen())), p(new Vector(q.dimen()))
+{
+	if (j0 < npoints) {
+		(*jseq)[0] = j0;
+		setPointAndWeight();
+	} else {
+		end_flag = true;
+	}
+}
+
+@ Copy constructor, clear.
+@<|prodpit| copy constructor@>=
+prodpit::prodpit(const prodpit& ppit)
+	: prodq(ppit.prodq), level(ppit.level), npoints(ppit.npoints),
+	  end_flag(ppit.end_flag), w(ppit.w)
+{
+	if (ppit.jseq)
+		jseq = new IntSequence(*(ppit.jseq));
+	else
+		jseq = NULL;
+	if (ppit.sig)
+		sig = new ParameterSignal(*(ppit.sig));
+	else
+		sig = NULL;
+	if (ppit.p)
+		p = new Vector(*(ppit.p));
+	else
+		p = NULL;
+}
+
+@ 
+@<|prodpit| destructor@>=
+prodpit::~prodpit()
+{
+	if (jseq)
+		delete jseq;
+	if (sig)
+		delete sig;
+	if (p)
+		delete p;
+}
+
+@ 
+@<|prodpit::operator==| code@>=
+bool prodpit::operator==(const prodpit& ppit) const
+{
+	bool ret = true;
+	ret = ret & prodq == ppit.prodq;
+	ret = ret & end_flag == ppit.end_flag;
+	ret = ret & ((jseq==NULL && ppit.jseq==NULL) ||
+				 (jseq!=NULL && ppit.jseq!=NULL && *jseq == *(ppit.jseq)));
+	return ret;
+}
+
+@ 
+@<|prodpit::operator=| code@>=
+const prodpit& prodpit::operator=(const prodpit& ppit)
+{
+	prodq = ppit.prodq;
+	end_flag = ppit.end_flag;
+	w = ppit.w;
+
+	if (jseq)
+		delete jseq;
+	if (sig)
+		delete sig;
+	if (p)
+		delete p;
+
+	if (ppit.jseq)
+		jseq = new IntSequence(*(ppit.jseq));
+	else
+		jseq = NULL;
+	if (ppit.sig)
+		sig = new ParameterSignal(*(ppit.sig));
+	else
+		sig = NULL;
+	if (ppit.p)
+		p = new Vector(*(ppit.p));
+	else
+		p = NULL;
+
+	return *this;
+}
+
+@ 
+@<|prodpit::operator++| code@>=
+prodpit& prodpit::operator++()
+{
+	// todo: throw if |prodq==NULL| or |jseq==NULL| or |sig==NULL| or |end_flag==true|
+	int i = prodq->dimen()-1;
+	(*jseq)[i]++;
+	while (i >= 0 && (*jseq)[i] == npoints) {
+		(*jseq)[i] = 0;
+		i--;
+		if (i >= 0)
+			(*jseq)[i]++;
+	}
+	sig->signalAfter(std::max(i,0));
+
+	if (i == -1)
+		end_flag = true;
+
+	if (! end_flag)
+		setPointAndWeight();
+
+	return *this;
+}
+
+
+@ This calculates the weight and sets point coordinates from the indices.
+@<|prodpit::setPointAndWeight| code@>=
+void prodpit::setPointAndWeight()
+{
+	// todo: raise if |prodq==NULL| or |jseq==NULL| or |sig==NULL| or
+	// |p==NULL| or |end_flag==true|
+	w = 1.0;
+	for (int i = 0; i < prodq->dimen(); i++) {
+		(*p)[i] = (prodq->uquad).point(level, (*jseq)[i]);
+		w* = (prodq->uquad).weight(level, (*jseq)[i]);
+	}
+}
+
+@ Debug print.
+@<|prodpit::print| code@>=
+void prodpit::print() const
+{
+	printf("j=[");
+	for (int i = 0; i < prodq->dimen(); i++)
+		printf("%2d ", (*jseq)[i]);
+	printf("] %+4.3f*(",w);
+	for (int i = 0; i < prodq->dimen()-1; i++)
+		printf("%+4.3f ", (*p)[i]);
+	printf("%+4.3f)\n",(*p)[prodq->dimen()-1]);
+}
+
+@ 
+@<|ProductQuadrature| constructor@>=
+ProductQuadrature::ProductQuadrature(int d, const OneDQuadrature& uq)
+	: QuadratureImpl<prodpit>(d), uquad(uq)
+{
+	// todo: check |d>=1|
+}
+
+@ This calls |prodpit| constructor to return an iterator which points
+approximatelly at |ti|-th portion out of |tn| portions. First we find
+out how many points are in the level, and then construct an interator
+$(j0,0,\ldots,0)$ where $j0=$|ti*npoints/tn|.
+
+@<|ProductQuadrature::begin| code@>=
+prodpit ProductQuadrature::begin(int ti, int tn, int l) const
+{
+	// todo: raise is |l<dimen()|
+	// todo: check |l<=uquad.numLevels()|
+	int npoints = uquad.numPoints(l);
+	return prodpit(*this, ti*npoints/tn, l);
+}
+
+@ This just starts at the first level and goes to a higher level as
+long as a number of evaluations (which is $n_k^d$ for $k$ being the
+level) is less than the given number of evaluations.
+
+@<|ProductQuadrature::designLevelForEvals| code@>=
+void ProductQuadrature::designLevelForEvals(int max_evals, int& lev, int& evals) const
+{
+	int last_evals;
+	evals = 1;
+	lev = 1;
+	do {
+		lev++;
+		last_evals = evals;
+		evals = numEvals(lev);
+	} while (lev < uquad.numLevels()-2 && evals < max_evals);
+	lev--;
+	evals = last_evals;
+
+}
+
+@ End of {\tt product.cpp} file
diff --git a/dynare++/integ/cc/product.hweb b/dynare++/integ/cc/product.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..59483a76ebb8dc34744af6e3700266dd8a418ad4
--- /dev/null
+++ b/dynare++/integ/cc/product.hweb
@@ -0,0 +1,107 @@
+@q $Id: product.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Product quadrature. This is {\tt product.h} file
+
+This file defines a product multidimensional quadrature. If $Q_k$
+denotes the one dimensional quadrature, then the product quadrature
+$Q$ of $k$ level and dimension $d$ takes the form
+$$Qf=\sum_{i_1=1}^{n_k}\ldots\sum_{i_d=1}^{n^k}w_{i_1}\cdot\ldots\cdot w_{i_d}
+f(x_{i_1},\ldots,x_{i_d})$$
+which can be written in terms of the one dimensional quadrature $Q_k$ as 
+$$Qf=(Q_k\otimes\ldots\otimes Q_k)f$$
+
+Here we define the product quadrature iterator |prodpit| and plug it
+into |QuadratureImpl| to obtains |ProductQuadrature|.
+
+@s prodpit int
+@s ProductQuadrature int
+
+@c
+#ifndef PRODUCT_H
+#define PRODUCT_H
+
+#include "int_sequence.h"
+#include "vector_function.h"
+#include "quadrature.h"
+
+@<|prodpit| class declaration@>;
+@<|ProductQuadrature| class declaration@>;
+
+#endif
+
+@ This defines a product point iterator. We have to maintain the
+following: a pointer to product quadrature in order to know the
+dimension and the underlying one dimensional quadrature, then level,
+number of points in the level, integer sequence of indices, signal,
+the coordinates of the point and the weight.
+
+The point indices, signal, and point coordinates are implmented as
+pointers in order to allow for empty constructor.
+
+The constructor |prodpit(const ProductQuadrature& q, int j0, int l)|
+constructs an iterator pointing to $(j0,0,\ldots,0)$, which is used by
+|begin| dictated by |QuadratureImpl|.
+
+@<|prodpit| class declaration@>=
+class ProductQuadrature;
+
+class prodpit {
+protected:@;
+	const ProductQuadrature* prodq;
+	int level;
+	int npoints;
+	IntSequence* jseq;
+	bool end_flag;
+	ParameterSignal* sig;
+	Vector* p;
+	double w;
+public:@;
+	prodpit();
+	prodpit(const ProductQuadrature& q, int j0, int l);
+	prodpit(const prodpit& ppit);
+	~prodpit();
+	bool operator==(const prodpit& ppit) const;
+	bool operator!=(const prodpit& ppit) const
+		{@+ return ! operator==(ppit);@+}
+	const prodpit& operator=(const prodpit& spit);
+	prodpit& operator++();
+	const ParameterSignal& signal() const
+		{@+ return *sig;@+}
+	const Vector& point() const
+		{@+ return *p;@+}
+	double weight() const
+		{@+ return w;@+}
+	void print() const;
+protected:@;
+	void setPointAndWeight();
+};
+
+@ The product quadrature is just |QuadratureImpl| with the product
+iterator plugged in. The object is constructed by just giving the
+underlying one dimensional quadrature, and the dimension. The only
+extra method is |designLevelForEvals| which for the given maximum
+number of evaluations (and dimension and underlying quadrature from
+the object) returns a maximum level yeilding number of evaluations
+less than the given number.
+
+@<|ProductQuadrature| class declaration@>=
+class ProductQuadrature : public QuadratureImpl<prodpit> {
+	friend class prodpit;
+	const OneDQuadrature& uquad;
+public:@;
+	ProductQuadrature(int d, const OneDQuadrature& uq);
+	virtual ~ProductQuadrature()@+ {}
+	int numEvals(int l) const
+		{
+			int res = 1;
+			for (int i = 0; i < dimen(); i++)
+				res *= uquad.numPoints(l);
+			return res;
+		}
+	void designLevelForEvals(int max_eval, int& lev, int& evals) const;
+protected:@;
+	prodpit begin(int ti, int tn, int level) const;
+};
+
+@ End of {\tt product.h} file
diff --git a/dynare++/integ/cc/quadrature.cweb b/dynare++/integ/cc/quadrature.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..bf78fc2afedbaf5341b2e5f7c514dc63ea6be888
--- /dev/null
+++ b/dynare++/integ/cc/quadrature.cweb
@@ -0,0 +1,63 @@
+@q $Id: quadrature.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ This is {\tt quadrature.cpp} file.
+
+@c
+#include "quadrature.h"
+#include "precalc_quadrature.dat"
+
+#include <cmath>
+
+@<|OneDPrecalcQuadrature::calcOffsets| code@>;
+@<|GaussHermite| constructor code@>;
+@<|GaussLegendre| constructor code@>;
+@<|NormalICDF| get code@>;
+
+@ 
+@<|OneDPrecalcQuadrature::calcOffsets| code@>=
+void OneDPrecalcQuadrature::calcOffsets()
+{
+	offsets[0] = 0;
+	for (int i = 1; i < num_levels; i++)
+		offsets[i] = offsets[i-1] + num_points[i-1];
+}
+
+@ 
+@<|GaussHermite| constructor code@>=
+GaussHermite::GaussHermite()
+	: OneDPrecalcQuadrature(gh_num_levels, gh_num_points, gh_weights, gh_points)@+ {}
+
+@ 
+@<|GaussLegendre| constructor code@>=
+GaussLegendre::GaussLegendre()
+	: OneDPrecalcQuadrature(gl_num_levels, gl_num_points, gl_weights, gl_points)@+ {}
+
+@ Here we transform a draw from univariate $\langle 0,1\rangle$ to the
+draw from Gaussina $N(0,1)$. This is done by a table lookup, the table
+is given by |normal_icdf_step|, |normal_icfd_data|, |normal_icdf_num|,
+and a number |normal_icdf_end|. In order to avoid wrong tails for lookups close
+to zero or one, we rescale input |x| by $(1-2*(1-end))=2*end-1$.
+
+@<|NormalICDF| get code@>=
+double NormalICDF::get(double x)
+{
+	double xx = (2*normal_icdf_end-1)*std::abs(x-0.5);
+	int i = (int)floor(xx/normal_icdf_step);
+	double xx1 = normal_icdf_step*i;
+	double yy1 = normal_icdf_data[i];
+	double y;
+	if (i < normal_icdf_num-1) {
+		double yy2 = normal_icdf_data[i+1];
+		y = yy1 + (yy2-yy1)*(xx-xx1)/normal_icdf_step;
+	} else { // this should never happen
+		y = yy1;
+	}
+	if (x > 0.5)
+		return y;
+	else
+		return -y;
+}
+
+
+@ End of {\tt quadrature.cpp} file
diff --git a/dynare++/integ/cc/quadrature.hweb b/dynare++/integ/cc/quadrature.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..7aa22c2074d7bb3aee4e3608c428b3252b5f4be3
--- /dev/null
+++ b/dynare++/integ/cc/quadrature.hweb
@@ -0,0 +1,311 @@
+@q $Id: quadrature.hweb 2269 2008-11-23 14:33:22Z michel $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Quadrature. This is {\tt quadrature.h} file
+
+This file defines an interface for one dimensional (non-nested) quadrature
+|OneDQuadrature|, and a parent for all multi-dimensional
+quadratures. This parent class |Quadrature| presents a general concept of
+quadrature, this is
+$$\int f(x){\rm d}x \approx\sum_{i=1}^N w_ix_i$$
+The class |Quadrature| just declares this concept. The concept is
+implemented by class |QuadratureImpl| which paralelizes the
+summation. All implementations therefore wishing to use the parallel
+implementation should inherit from |QuadratureImpl| and integration is
+done.
+
+The integration concept relies on a point iterator, which goes through
+all $x_i$ and $w_i$ for $i=1,\ldots,N$. All the iterators must be able
+to go through only a portion of the set $i=1,\ldots,N$. This enables
+us to implement paralelism, for two threads for example, one iterator
+goes from the beginning to the (approximately) half, and the other
+goes from the half to the end.
+
+Besides this concept of the general quadrature, this file defines also
+one dimensional quadrature, which is basically a scheme of points and
+weights for different levels. The class |OneDQuadrature| is a parent
+of all such objects, the classes |GaussHermite| and |GaussLegendre|
+are specific implementations for Gauss--Hermite and Gauss--Legendre
+quadratures resp.
+
+@s OneDQuadrature int
+@s Quadrature int
+@s IntegrationWorker int
+@s QuadratureImpl int
+@s OneDPrecalcQuadrature int
+@s GaussHermite int
+@s GaussLegendre int
+@s NormalICDF int
+@s _Tpit int
+
+@c
+#ifndef QUADRATURE_H
+#define QUADRATURE_H
+
+#include <cstdlib>
+#include "vector_function.h"
+#include "int_sequence.h"
+#include "sthread.h"
+
+@<|OneDQuadrature| class declaration@>;
+@<|Quadrature| class declaration@>;
+@<|IntegrationWorker| class declaration@>;
+@<|QuadratureImpl| class declaration@>;
+@<|OneDPrecalcQuadrature| class declaration@>;
+@<|GaussHermite| class declaration@>;
+@<|GaussLegendre| class declaration@>;
+@<|NormalICDF| class declaration@>;
+
+#endif
+
+@ This pure virtual class represents a concept of one-dimensional
+(non-nested) quadrature. So, one dimensional quadrature must return
+number of levels, number of points in a given level, and then a point
+and a weight in a given level and given order.
+
+@<|OneDQuadrature| class declaration@>=
+class OneDQuadrature {
+public:@;
+	virtual ~OneDQuadrature()@+ {}
+	virtual int numLevels() const =0;
+	virtual int numPoints(int level) const =0;
+	virtual double point(int level, int i) const =0;
+	virtual double weight(int lelel, int i) const =0;
+};
+
+@ This is a general concept of multidimensional quadrature. at this
+general level, we maintain only a dimension, and declare virtual
+functions for integration. The function take two forms; first takes a
+constant |VectorFunction| as an argument, creates locally
+|VectorFunctionSet| and do calculation, second one takes as an
+argument |VectorFunctionSet|.
+
+Part of the interface is a method returning a number of evaluations
+for a specific level. Note two things: this assumes that the number of
+evaluations is known apriori and thus it is not applicable for
+adaptive quadratures, second for Monte Carlo type of quadrature, the
+level is a number of evaluations.
+
+@<|Quadrature| class declaration@>=
+class Quadrature {
+protected:@;
+	int dim;
+public:@;
+	Quadrature(int d) : dim(d)@+ {}
+	virtual ~Quadrature()@+ {}
+	int dimen() const
+		{@+ return dim;@+}
+	virtual void integrate(const VectorFunction& func, int level,
+						   int tn, Vector& out) const =0;
+	virtual void integrate(VectorFunctionSet& fs, int level, Vector& out) const =0;
+	virtual int numEvals(int level) const =0;
+};
+
+@ This is just an integration worker, which works over a given
+|QuadratureImpl|. It also needs the function, level, a specification
+of the subgroup of points, and output vector.
+
+See |@<|QuadratureImpl| class declaration@>| for details.
+
+@<|IntegrationWorker| class declaration@>=
+template <typename _Tpit>
+class QuadratureImpl;
+
+template <typename _Tpit>
+class IntegrationWorker : public THREAD {
+	const QuadratureImpl<_Tpit>& quad;
+	VectorFunction& func;
+	int level;
+	int ti;
+	int tn;
+	Vector& outvec;
+public:@;
+	IntegrationWorker(const QuadratureImpl<_Tpit>& q, VectorFunction& f, int l,
+					  int tii, int tnn, Vector& out)
+		: quad(q), func(f), level(l), ti(tii), tn(tnn), outvec(out) @+{}
+	@<|IntegrationWorker::operator()()| code@>;
+};
+
+
+@ This integrates the given portion of the integral. We obtain first
+and last iterators for the portion (|beg| and |end|). Then we iterate
+through the portion. and finally we add the intermediate result to the
+result |outvec|.
+
+This method just everything up as it is coming. This might be imply
+large numerical errors, perhaps in future I will implement something
+smarter.
+
+@<|IntegrationWorker::operator()()| code@>=
+void operator()() {
+	_Tpit beg = quad.begin(ti, tn, level);
+	_Tpit end = quad.begin(ti+1, tn, level);
+	Vector tmpall(outvec.length());
+	tmpall.zeros();
+	Vector tmp(outvec.length());
+
+	// note that since beg came from begin, it has empty signal
+	// and first evaluation gets no signal
+	for (_Tpit run = beg; run != end; ++run) {
+		func.eval(run.point(), run.signal(), tmp);
+		tmpall.add(run.weight(), tmp);
+	}
+
+	{
+		SYNCHRO@, syn(&outvec, "IntegrationWorker");
+		outvec.add(1.0, tmpall);
+	}
+}
+
+
+@ This is the class which implements the integration. The class is
+templated by the iterator type. We declare a method |begin| returning
+an iterator to the beginnning of the |ti|-th portion out of total |tn|
+portions for a given level.
+
+In addition, we define a method which saves all the points to a given
+file. Only for debugging purposes.
+
+@<|QuadratureImpl| class declaration@>=
+template <typename _Tpit>
+class QuadratureImpl : public Quadrature {
+	friend class IntegrationWorker<_Tpit>;
+public:@;
+	QuadratureImpl(int d) : Quadrature(d)@+ {}
+	@<|QuadratureImpl::integrate| code@>;
+	void integrate(const VectorFunction& func,
+				   int level, int tn, Vector& out) const {
+		VectorFunctionSet fs(func, tn);
+		integrate(fs, level, out);
+	}
+	@<|Quadrature::savePoints| code@>;
+	_Tpit start(int level) const
+		{@+ return begin(0,1,level);@+}
+	_Tpit end(int level) const
+		{@+ return begin(1,1,level);@+}
+protected:@;
+	virtual _Tpit begin(int ti, int tn, int level) const =0;
+};
+
+@ Just fill a thread group with workes and run it.
+@<|QuadratureImpl::integrate| code@>=
+void integrate(VectorFunctionSet& fs, int level, Vector& out) const {
+	// todo: out.length()==func.outdim()
+	// todo: dim == func.indim()
+	out.zeros();
+	THREAD_GROUP@, gr;
+	for (int ti = 0; ti < fs.getNum(); ti++) {
+		gr.insert(new IntegrationWorker<_Tpit>(*this, fs.getFunc(ti),
+											   level, ti, fs.getNum(), out));
+	}
+	gr.run();
+}
+
+
+@ Just for debugging.
+@<|Quadrature::savePoints| code@>=
+void savePoints(const char* fname, int level) const
+{
+	FILE* fd;
+	if (NULL==(fd = fopen(fname,"w"))) {
+		// todo: raise
+		fprintf(stderr, "Cannot open file %s for writing.\n", fname);
+		exit(1);
+	}
+	_Tpit beg = begin(0,1,level);
+	_Tpit end = begin(1,1,level);
+	for (_Tpit run = beg; run != end; ++run) {
+		fprintf(fd, "%16.12g", run.weight());
+		for (int i = 0;	 i < dimen(); i++)
+			fprintf(fd, "\t%16.12g", run.point()[i]);
+		fprintf(fd, "\n");
+	}
+	fclose(fd);
+}
+
+
+@ This is only an interface to a precalculated data in file {\tt
+precalc\_quadrature.dat} which is basically C coded static data. It
+implements |OneDQuadrature|. The data file is supposed to define the
+following data: number of levels, array of number of points at each
+level, an array of weights and array of points. The both latter array
+store data level by level. An offset for a specific level is stored in
+|offsets| integer sequence.
+
+The implementing subclasses just fill the necessary data from the
+file, the rest is calculated here.
+
+@<|OneDPrecalcQuadrature| class declaration@>=
+class OneDPrecalcQuadrature : public OneDQuadrature {
+	int num_levels;
+	const int* num_points;
+	const double* weights;
+	const double* points;
+	IntSequence offsets;
+public:@;
+	OneDPrecalcQuadrature(int nlevels, const int* npoints,
+						  const double* wts, const double* pts)
+		: num_levels(nlevels),  num_points(npoints),
+		  weights(wts), points(pts), offsets(num_levels)
+		{@+ calcOffsets();@+}
+	virtual ~OneDPrecalcQuadrature()@+ {}
+	int numLevels() const
+		{@+ return num_levels;@+}
+	int numPoints(int level) const
+		{@+ return num_points[level-1];@+}
+	double point(int level, int i) const
+		{@+ return points[offsets[level-1]+i];@+}
+	double weight(int level, int i) const
+		{@+ return weights[offsets[level-1]+i];@+}
+protected:@;
+	void calcOffsets();
+};
+
+@ Just precalculated Gauss--Hermite quadrature. This quadrature integrates exactly integrals
+$$\int_{-\infty}^{\infty} x^ke^{-x^2}{\rm d}x$$
+for level $k$.
+
+Note that if pluging this one-dimensional quadrature to product or
+Smolyak rule in order to integrate a function $f$ through normally
+distributed inputs, one has to wrap $f$ to
+|GaussConverterFunction| and apply the product or Smolyak rule to the
+new function.
+
+Check {\tt precalc\_quadrature.dat} for available levels.
+ 
+@<|GaussHermite| class declaration@>=
+class GaussHermite : public OneDPrecalcQuadrature {
+public:@;
+	GaussHermite();
+};
+
+@ Just precalculated Gauss--Legendre quadrature. This quadrature integrates exactly integrals
+$$\int_0^1x^k{\rm d}x$$
+for level $k$.
+
+Check {\tt precalc\_quadrature.dat} for available levels.
+
+@<|GaussLegendre| class declaration@>=
+class GaussLegendre : public OneDPrecalcQuadrature {
+public:@;
+	GaussLegendre();
+};
+
+@ This is just an inverse cummulative density function of normal
+distribution. Its only method |get| returns for a given number
+$x\in(0,1)$ a number $y$ such that $P(z<y)=x$, where the probability
+is taken over normal distribution $N(0,1)$.
+
+Currently, the implementation is done by a table lookup which implies
+that the tails had to be chopped off. This further implies that Monte
+Carlo quadratures using this transformation to draw points from
+multidimensional $N(0,I)$ fail to integrate with satisfactory
+precision polynomial functions, for which the tails matter.
+
+@<|NormalICDF| class declaration@>=
+class NormalICDF {
+public:@;
+	static double get(double x);
+};
+
+@ End of {\tt quadrature.h} file
diff --git a/dynare++/integ/cc/quasi_mcarlo.cweb b/dynare++/integ/cc/quasi_mcarlo.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..a4c480d3bc684a99a3ca61fd54a62b08ae6932bc
--- /dev/null
+++ b/dynare++/integ/cc/quasi_mcarlo.cweb
@@ -0,0 +1,341 @@
+@q $Id: quasi_mcarlo.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ This is {\tt quasi\_mcarlo.cpp} file.
+
+@c
+#include "quasi_mcarlo.h"
+
+#include <math.h>
+
+@<|RadicalInverse| constructor code@>;
+@<|RadicalInverse::eval| code@>;
+@<|RadicalInverse::increase| code@>;
+@<|RadicalInverse::print| code@>;
+@<|HaltonSequence| static data@>;
+@<|HaltonSequence| constructor code@>;
+@<|HaltonSequence::operator=| code@>;
+@<|HaltonSequence::increase| code@>;
+@<|HaltonSequence::eval| code@>;
+@<|HaltonSequence::print| code@>;
+@<|qmcpit| empty constructor code@>;
+@<|qmcpit| regular constructor code@>;
+@<|qmcpit| copy constructor code@>;
+@<|qmcpit| destructor@>;
+@<|qmcpit::operator==| code@>;
+@<|qmcpit::operator=| code@>;
+@<|qmcpit::operator++| code@>;
+@<|qmcpit::weight| code@>;
+@<|qmcnpit| empty constructor code@>;
+@<|qmcnpit| regular constructor code@>;
+@<|qmcnpit| copy constructor code@>;
+@<|qmcnpit| destructor@>;
+@<|qmcnpit::operator=| code@>;
+@<|qmcnpit::operator++| code@>;
+@<|WarnockPerScheme::permute| code@>;
+@<|ReversePerScheme::permute| code@>;
+
+@ Here in the constructor, we have to calculate a maximum length of
+|coeff| array for a given |base| and given maximum |maxn|. After
+allocation, we calculate the coefficients.
+
+@<|RadicalInverse| constructor code@>=
+RadicalInverse::RadicalInverse(int n, int b, int mxn)
+	: num(n), base(b), maxn(mxn),
+	  coeff((int)(floor(log((double)maxn)/log((double)b))+2), 0)
+{
+	int nr = num;
+	j = -1;
+	do {
+		j++;
+		coeff[j] = nr % base;
+		nr = nr / base;
+	} while (nr > 0);
+}
+
+@ This evaluates the radical inverse. If there was no permutation, we have to calculate
+$$
+{c_0\over b}+{c_1\over b^2}+\ldots+{c_j\over b^{j+1}}
+$$
+which is evaluated as
+$$
+\left(\ldots\left(\left({c_j\over b}\cdot{1\over b}+{c_{j-1}\over b}\right)
+\cdot{1\over b}+{c_{j-2}\over b}\right)
+\ldots\right)\cdot{1\over b}+{c_0\over b}
+$$
+Now with permutation $\pi$, we have
+$$
+\left(\ldots\left(\left({\pi(c_j)\over b}\cdot{1\over b}+
+{\pi(c_{j-1})\over b}\right)\cdot{1\over b}+
+{\pi(c_{j-2})\over b}\right)
+\ldots\right)\cdot{1\over b}+{\pi(c_0)\over b}
+$$
+
+
+@<|RadicalInverse::eval| code@>=
+double RadicalInverse::eval(const PermutationScheme& p) const
+{
+	double res = 0;
+	for (int i = j; i >= 0; i--) {
+		int cper = p.permute(i, base, coeff[i]);
+		res = (cper + res)/base;
+	}
+	return res;
+}
+
+@ We just add 1 to the lowest coefficient and check for overflow with respect to the base.
+@<|RadicalInverse::increase| code@>=
+void RadicalInverse::increase()
+{
+	// todo: raise if |num+1 > maxn|
+	num++;
+	int i = 0;
+	coeff[i]++;
+	while (coeff[i] == base) {
+		coeff[i] = 0;
+		coeff[++i]++;
+	}
+	if (i > j)
+		j = i;
+}
+
+@ Debug print.
+@<|RadicalInverse::print| code@>=
+void RadicalInverse::print() const
+{
+	printf("n=%d b=%d c=", num, base);
+	coeff.print();
+}
+
+@ Here we have the first 170 primes. This means that we are not able
+to integrate dimensions greater than 170.
+
+@<|HaltonSequence| static data@>=
+int HaltonSequence::num_primes = 170;
+int HaltonSequence::primes[] = {
+      2,     3,     5,     7,    11,    13,    17,    19,    23,    29,
+     31,    37,    41,    43,    47,    53,    59,    61,    67,    71,
+     73,    79,    83,    89,    97,   101,   103,   107,   109,   113,
+    127,   131,   137,   139,   149,   151,   157,   163,   167,   173,
+    179,   181,   191,   193,   197,   199,   211,   223,   227,   229,
+    233,   239,   241,   251,   257,   263,   269,   271,   277,   281,
+    283,   293,   307,   311,   313,   317,   331,   337,   347,   349,
+    353,   359,   367,   373,   379,   383,   389,   397,   401,   409,
+    419,   421,   431,   433,   439,   443,   449,   457,   461,   463,
+    467,   479,   487,   491,   499,   503,   509,   521,   523,   541,
+    547,   557,   563,   569,   571,   577,   587,   593,   599,   601,
+    607,   613,   617,   619,   631,   641,   643,   647,   653,   659,
+    661,   673,   677,   683,   691,   701,   709,   719,   727,   733,
+    739,   743,   751,   757,   761,   769,   773,   787,   797,   809,
+    811,   821,   823,   827,   829,   839,   853,   857,   859,   863,
+    877,   881,   883,   887,   907,   911,   919,   929,   937,   941,
+    947,   953,   967,   971,   977,   983,   991,   997,  1009,  1013
+};
+
+
+@ This takes first |dim| primes and constructs |dim| radical inverses
+and calls |eval|.
+
+@<|HaltonSequence| constructor code@>=
+HaltonSequence::HaltonSequence(int n, int mxn, int dim, const PermutationScheme& p)
+	: num(n), maxn(mxn), per(p), pt(dim)
+{
+	// todo: raise if |dim > num_primes|
+	// todo: raise if |n > mxn|
+	for (int i = 0; i < dim; i++)
+		ri.push_back(RadicalInverse(num, primes[i], maxn));
+	eval();
+}
+
+@ 
+@<|HaltonSequence::operator=| code@>=
+const HaltonSequence& HaltonSequence::operator=(const HaltonSequence& hs)
+{
+	num = hs.num;
+	maxn = hs.maxn;
+	ri.clear();
+	for (unsigned int i = 0; i < hs.ri.size(); i++)
+		ri.push_back(RadicalInverse(hs.ri[i]));
+	pt = hs.pt;
+	return *this;
+}
+
+
+
+@ This calls |RadicalInverse::increase| for all radical inverses and
+calls |eval|.
+
+@<|HaltonSequence::increase| code@>=
+void HaltonSequence::increase()
+{
+	for (unsigned int i = 0; i < ri.size(); i++)
+		ri[i].increase();
+	num++;
+	if (num <= maxn)
+		eval();
+}
+
+@ This sets point |pt| to radical inverse evaluations in each dimension.
+@<|HaltonSequence::eval| code@>=
+void HaltonSequence::eval()
+{
+	for (unsigned int i = 0; i < ri.size(); i++)
+		pt[i] = ri[i].eval(per);
+}
+
+@ Debug print.
+@<|HaltonSequence::print| code@>=
+void HaltonSequence::print() const
+{
+	for (unsigned int i = 0; i < ri.size(); i++)
+		ri[i].print();
+	printf("point=[ ");
+	for (unsigned int i = 0; i < ri.size(); i++)
+		printf("%7.6f ", pt[i]);
+	printf("]\n");
+}
+
+@ 
+@<|qmcpit| empty constructor code@>=
+qmcpit::qmcpit()
+	: spec(NULL), halton(NULL), sig(NULL)@+ {}
+
+@ 
+@<|qmcpit| regular constructor code@>=
+qmcpit::qmcpit(const QMCSpecification& s, int n)
+	: spec(&s), halton(new HaltonSequence(n, s.level(), s.dimen(), s.getPerScheme())),
+	  sig(new ParameterSignal(s.dimen()))
+{
+}
+
+@ 
+@<|qmcpit| copy constructor code@>=
+qmcpit::qmcpit(const qmcpit& qpit)
+	: spec(qpit.spec), halton(NULL), sig(NULL)
+{
+	if (qpit.halton)
+		halton = new HaltonSequence(*(qpit.halton));
+	if (qpit.sig)
+		sig = new ParameterSignal(qpit.spec->dimen());
+}
+
+@ 
+@<|qmcpit| destructor@>=
+qmcpit::~qmcpit()
+{
+	if (halton)
+		delete halton;
+	if (sig)
+		delete sig;
+}
+
+@ 
+@<|qmcpit::operator==| code@>=
+bool qmcpit::operator==(const qmcpit& qpit) const
+{
+	return (spec == qpit.spec) &&
+		((halton == NULL && qpit.halton == NULL) ||
+		 (halton != NULL && qpit.halton != NULL && halton->getNum() == qpit.halton->getNum())); 
+}
+
+@ 
+@<|qmcpit::operator=| code@>=
+const qmcpit& qmcpit::operator=(const qmcpit& qpit)
+{
+	spec = qpit.spec;
+	if (halton)
+		delete halton;
+	if (qpit.halton)
+		halton = new HaltonSequence(*(qpit.halton));
+	else
+		halton = NULL;
+	return *this;
+}
+
+
+@ 
+@<|qmcpit::operator++| code@>=
+qmcpit& qmcpit::operator++()
+{
+	// todo: raise if |halton == null || qmcq == NULL|
+	halton->increase();
+	return *this;
+}
+
+@ 
+@<|qmcpit::weight| code@>=
+double qmcpit::weight() const
+{
+	return 1.0/spec->level();
+}
+
+@ 
+@<|qmcnpit| empty constructor code@>=
+qmcnpit::qmcnpit()
+	: qmcpit(), pnt(NULL)@+ {}
+
+@ 
+@<|qmcnpit| regular constructor code@>=
+qmcnpit::qmcnpit(const QMCSpecification& s, int n)
+	: qmcpit(s, n), pnt(new Vector(s.dimen()))
+{
+}
+
+@ 
+@<|qmcnpit| copy constructor code@>=
+qmcnpit::qmcnpit(const qmcnpit& qpit)
+	: qmcpit(qpit), pnt(NULL)
+{
+	if (qpit.pnt)
+		pnt = new Vector(*(qpit.pnt));
+}
+
+@ 
+@<|qmcnpit| destructor@>=
+qmcnpit::~qmcnpit()
+{
+	if (pnt)
+		delete pnt;
+}
+
+@ 
+@<|qmcnpit::operator=| code@>=
+const qmcnpit& qmcnpit::operator=(const qmcnpit& qpit)
+{
+	qmcpit::operator=(qpit);
+	if (pnt)
+		delete pnt;
+	if (qpit.pnt)
+		pnt = new Vector(*(qpit.pnt));
+	else
+		pnt = NULL;
+	return *this;
+}
+
+@ Here we inccrease a point in Halton sequence ant then store images
+of the points in |NormalICDF| function.
+
+@<|qmcnpit::operator++| code@>=
+qmcnpit& qmcnpit::operator++()
+{
+	qmcpit::operator++();
+	for (int i = 0; i < halton->point().length(); i++)
+		(*pnt)[i] = NormalICDF::get(halton->point()[i]);
+	return *this;
+}
+
+@ Clear from code.
+@<|WarnockPerScheme::permute| code@>=
+int WarnockPerScheme::permute(int i, int base, int c) const
+{
+	return (c+i) % base;
+}
+
+@ Clear from code.
+@<|ReversePerScheme::permute| code@>=
+int ReversePerScheme::permute(int i, int base, int c) const
+{
+	return (base-c) % base;
+}
+
+@ End of {\tt quasi\_mcarlo.cpp} file.
\ No newline at end of file
diff --git a/dynare++/integ/cc/quasi_mcarlo.hweb b/dynare++/integ/cc/quasi_mcarlo.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..d720d6bed5c42aefbb7786d8cd8e9caad30b990a
--- /dev/null
+++ b/dynare++/integ/cc/quasi_mcarlo.hweb
@@ -0,0 +1,286 @@
+@q $Id: quasi_mcarlo.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Quasi Monte Carlo quadrature. This is {\tt quasi\_mcarlo.h} file.
+
+This defines quasi Monte Carlo quadratures for cube and for a function
+multiplied by normal density. The quadrature for a cube is named
+|QMCarloCubeQuadrature| and integrates:
+$$\int_{\langle 0,1\rangle^n}f(x){\rm d}x$$
+The quadrature for a function of normally distributed parameters is
+named |QMCarloNormalQuadrature| and integrates:
+$${1\over\sqrt{(2\pi)^n}}\int_{(-\infty,\infty)^n}f(x)e^{-{1\over 2}x^Tx}{\rm d}x$$
+
+For a cube we define |qmcpit| as iterator of |QMCarloCubeQuadrature|,
+and for the normal density multiplied function we define |qmcnpit| as
+iterator of |QMCarloNormalQuadrature|.
+
+The quasi Monte Carlo method generates low discrepancy points with
+equal weights. The one dimensional low discrepancy sequences are
+generated by |RadicalInverse| class, the sequences are combined for
+higher dimensions by |HaltonSequence| class. The Halton sequence can
+use a permutation scheme; |PermutattionScheme| is an abstract class
+for all permutaton schemes. We have three implementations:
+|WarnockPerScheme|, |ReversePerScheme|, and |IdentityPerScheme|.
+
+@s PermutationScheme int
+@s RadicalInverse int
+@s HaltonSequence int
+@s QMCSpecification int
+@s qmcpit int
+@s QMCarloCubeQuadrature int
+@s qmcnpit int
+@s QMCarloNormalQuadrature int
+@s WarnockPerScheme int
+@s ReversePerScheme int
+@s IdentityPerScheme int
+
+@c
+#ifndef QUASI_MCARLO_H
+#define QUASI_MCARLO_H
+
+#include "int_sequence.h"
+#include "quadrature.h"
+
+#include "Vector.h"
+
+#include <vector>
+
+@<|PermutationScheme| class declaration@>;
+@<|RadicalInverse| class declaration@>;
+@<|HaltonSequence| class declaration@>;
+@<|QMCSpecification| class declaration@>;
+@<|qmcpit| class declaration@>;
+@<|QMCarloCubeQuadrature| class declaration@>;
+@<|qmcnpit| class declaration@>;
+@<|QMCarloNormalQuadrature| class declaration@>;
+@<|WarnockPerScheme| class declaration@>;
+@<|ReversePerScheme| class declaration@>;
+@<|IdentityPerScheme| class declaration@>;
+
+#endif
+
+@ This abstract class declares |permute| method which permutes
+coefficient |c| having index of |i| fro the base |base| and returns
+the permuted coefficient which must be in $0,\ldots,base-1$.
+
+@<|PermutationScheme| class declaration@>=
+class PermutationScheme {
+public:@;
+	PermutationScheme()@+ {}
+	virtual ~PermutationScheme()@+ {}
+	virtual int permute(int i, int base, int c) const  =0;
+};
+
+
+@ This class represents an integer number |num| as
+$c_0+c_1b+c_2b^2+\ldots+c_jb^j$, where $b$ is |base| and
+$c_0,\ldots,c_j$ is stored in |coeff|. The size of |IntSequence| coeff
+does not grow with growing |num|, but is fixed from the very beginning
+and is set according to supplied maximum |maxn|.
+
+The basic method is |eval| which evaluates the |RadicalInverse| with a
+given permutation scheme and returns the point, and |increase| which
+increases |num| and recalculates the coefficients.
+
+@<|RadicalInverse| class declaration@>=
+class RadicalInverse {
+	int num;
+	int base;
+	int maxn;
+	int j;
+	IntSequence coeff;
+public:@;
+	RadicalInverse(int n, int b, int mxn);
+	RadicalInverse(const RadicalInverse& ri)
+		: num(ri.num), base(ri.base), maxn(ri.maxn), j(ri.j), coeff(ri.coeff)@+ {}
+	const RadicalInverse& operator=(const RadicalInverse& radi)
+		{
+			num = radi.num; base = radi.base; maxn = radi.maxn;
+			j = radi.j; coeff = radi.coeff;
+			return *this;
+		}
+	double eval(const PermutationScheme& p) const;
+	void increase();
+	void print() const;
+};
+
+@ This is a vector of |RadicalInverse|s, each |RadicalInverse| has a
+different prime as its base. The static members |primes| and
+|num_primes| define a precalculated array of primes. The |increase|
+method of the class increases indices in all |RadicalInverse|s and
+sets point |pt| to contain the points in each dimension.
+
+@<|HaltonSequence| class declaration@>=
+class HaltonSequence {
+private:@;
+	static int primes[];
+	static int num_primes;
+protected:@;
+	int num;
+	int maxn;
+	vector<RadicalInverse> ri;
+	const PermutationScheme& per;
+	Vector pt;
+public:@;
+	HaltonSequence(int n, int mxn, int dim, const PermutationScheme& p);
+	HaltonSequence(const HaltonSequence& hs)
+		: num(hs.num), maxn(hs.maxn), ri(hs.ri), per(hs.per), pt(hs.pt)@+ {}
+	const HaltonSequence& operator=(const HaltonSequence& hs);
+	void increase();
+	const Vector& point() const
+		{@+ return pt;@+}
+	const int getNum() const
+		{@+ return num;@+}
+	void print() const;
+protected:@;
+	void eval();
+};
+
+@ This is a specification of quasi Monte Carlo quadrature. It consists
+of dimension |dim|, number of points (or level) |lev|, and the
+permutation scheme. This class is common to all quasi Monte Carlo
+classes.
+
+@<|QMCSpecification| class declaration@>=
+class QMCSpecification {
+protected:@;
+	int dim;
+	int lev;
+	const PermutationScheme& per_scheme;
+public:@;
+	QMCSpecification(int d, int l, const PermutationScheme& p)
+		: dim(d), lev(l), per_scheme(p)@+ {}
+	virtual ~QMCSpecification() {}
+	int dimen() const
+		{@+ return dim;@+}
+	int level() const
+		{@+ return lev;@+}
+	const PermutationScheme& getPerScheme() const
+		{@+ return per_scheme;@+}
+};
+
+
+@ This is an iterator for quasi Monte Carlo over a cube
+|QMCarloCubeQuadrature|. The iterator maintains |HaltonSequence| of
+the same dimension as given by the specification. An iterator can be
+constructed from a given number |n|, or by a copy constructor. For
+technical reasons, there is also an empty constructor; for that
+reason, every member is a pointer.
+
+@<|qmcpit| class declaration@>=
+class qmcpit {
+protected:@;
+	const QMCSpecification* spec;
+	HaltonSequence* halton;
+	ParameterSignal* sig;
+public:@;
+	qmcpit();
+	qmcpit(const QMCSpecification& s, int n);
+	qmcpit(const qmcpit& qpit);
+	~qmcpit();
+	bool operator==(const qmcpit& qpit) const;
+	bool operator!=(const qmcpit& qpit) const
+		{@+ return ! operator==(qpit);@+}
+	const qmcpit& operator=(const qmcpit& qpit);
+	qmcpit& operator++();
+	const ParameterSignal& signal() const
+		{@+ return *sig;@+}
+	const Vector& point() const
+		{@+ return halton->point();@+}
+	double weight() const;
+	void print() const
+		{@+ halton->print();@+}
+};
+
+@ This is an easy declaration of quasi Monte Carlo quadrature for a
+cube. Everything important has been done in its iterator |qmcpit|, so
+we only inherit from general |Quadrature| and reimplement |begin| and
+|numEvals|.
+
+@<|QMCarloCubeQuadrature| class declaration@>=
+class QMCarloCubeQuadrature : public QuadratureImpl<qmcpit>, public QMCSpecification {
+public:@;
+	QMCarloCubeQuadrature(int d, int l, const PermutationScheme& p)
+		: QuadratureImpl<qmcpit>(d), QMCSpecification(d, l, p)@+ {}
+	virtual ~QMCarloCubeQuadrature()@+ {}
+	int numEvals(int l) const
+		{@+ return l;@+}
+protected:@;
+	qmcpit begin(int ti, int tn, int lev) const
+		{@+ return qmcpit(*this, ti*level()/tn + 1);@+} 
+};
+
+@ This is an iterator for |QMCarloNormalQuadrature|. It is equivalent
+to an iterator for quasi Monte Carlo cube quadrature but a point. The
+point is obtained from a point of |QMCarloCubeQuadrature| by a
+transformation $\langle
+0,1\rangle\rightarrow\langle-\infty,\infty\rangle$ aplied to all
+dimensions. The transformation yields a normal distribution from a
+uniform distribution on $\langle0,1\rangle$. It is in fact
+|NormalICDF|.
+
+@<|qmcnpit| class declaration@>=
+class qmcnpit : public qmcpit {
+protected:@;
+	Vector* pnt;
+public:@;
+	qmcnpit();
+	qmcnpit(const QMCSpecification& spec, int n);
+	qmcnpit(const qmcnpit& qpit);
+	~qmcnpit();
+	bool operator==(const qmcnpit& qpit) const
+		{@+ return qmcpit::operator==(qpit);@+}
+	bool operator!=(const qmcnpit& qpit) const
+		{@+ return ! operator==(qpit);@+}
+	const qmcnpit& operator=(const qmcnpit& qpit);
+	qmcnpit& operator++();
+	const ParameterSignal& signal() const
+		{@+ return *sig;@+}
+	const Vector& point() const
+		{@+ return *pnt;@+}
+	void print() const
+		{@+ halton->print();pnt->print();@+}
+};
+
+@ This is an easy declaration of quasi Monte Carlo quadrature for a
+cube. Everything important has been done in its iterator |qmcnpit|, so
+we only inherit from general |Quadrature| and reimplement |begin| and
+|numEvals|.
+
+@<|QMCarloNormalQuadrature| class declaration@>=
+class QMCarloNormalQuadrature : public QuadratureImpl<qmcnpit>, public QMCSpecification {
+public:@;
+	QMCarloNormalQuadrature(int d, int l, const PermutationScheme& p)
+		: QuadratureImpl<qmcnpit>(d), QMCSpecification(d, l, p)@+ {}
+	virtual ~QMCarloNormalQuadrature()@+ {}
+	int numEvals(int l) const
+		{@+ return l;@+}
+protected:@;
+	qmcnpit begin(int ti, int tn, int lev) const
+		{@+ return qmcnpit(*this, ti*level()/tn + 1);@+} 
+};
+
+@ Declares Warnock permutation scheme.
+@<|WarnockPerScheme| class declaration@>=
+class WarnockPerScheme : public PermutationScheme {
+public:@;
+	int permute(int i, int base, int c) const;
+};
+
+@ Declares reverse permutation scheme.
+@<|ReversePerScheme| class declaration@>=
+class ReversePerScheme : public PermutationScheme {
+public:@;
+	int permute(int i, int base, int c) const;
+};
+
+@ Declares no permutation (identity) scheme.
+@<|IdentityPerScheme| class declaration@>=
+class IdentityPerScheme : public PermutationScheme {
+public:@;
+	int permute(int i, int base, int c) const
+		{@+ return c;@+}
+};
+
+@ End of {\tt quasi\_mcarlo.h} file
diff --git a/dynare++/integ/cc/smolyak.cweb b/dynare++/integ/cc/smolyak.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..8e0a57374cb931e0eac5056fe2d850d5c43eb783
--- /dev/null
+++ b/dynare++/integ/cc/smolyak.cweb
@@ -0,0 +1,294 @@
+@q $Id: smolyak.cweb 1208 2007-03-19 21:33:12Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ This is {\tt smolyak.cpp} file.
+
+@c
+#include "smolyak.h"
+#include "symmetry.h"
+
+@<|smolpit| empty constructor@>;
+@<|smolpit| regular constructor@>;
+@<|smolpit| copy constructor@>;
+@<|smolpit| destructor@>;
+@<|smolpit::operator==| code@>;
+@<|smolpit::operator=| code@>;
+@<|smolpit::operator++| code@>;
+@<|smolpit::setPointAndWeight| code@>;
+@<|smolpit::print| code@>;
+@<|SmolyakQuadrature| constructor@>;
+@<|SmolyakQuadrature::numEvals| code@>;
+@<|SmolyakQuadrature::begin| code@>;
+@<|SmolyakQuadrature::calcNumEvaluations| code@>;
+@<|SmolyakQuadrature::designLevelForEvals| code@>;
+
+@ 
+@<|smolpit| empty constructor@>=
+smolpit::smolpit()
+	: smolq(NULL), isummand(0), jseq(NULL), sig(NULL), p(NULL)
+{
+}
+
+@ This constructs a beginning of |isum| summand in |smolq|. We must be
+careful here, since |isum| can be past-the-end, so no reference to
+vectors in |smolq| by |isum| must be done in this case.
+
+@<|smolpit| regular constructor@>=
+smolpit::smolpit(const SmolyakQuadrature& q, unsigned int isum)
+	: smolq(&q), isummand(isum), jseq(new IntSequence(q.dimen(), 0)),
+	  sig(new ParameterSignal(q.dimen())), p(new Vector(q.dimen()))
+{
+	if (isummand < q.numSummands()) {
+		setPointAndWeight();
+	}
+}
+
+@ 
+@<|smolpit| copy constructor@>=
+smolpit::smolpit(const smolpit& spit)
+	: smolq(spit.smolq), isummand(spit.isummand), w(spit.w)
+{
+	if (spit.jseq)
+		jseq = new IntSequence(*(spit.jseq));
+	else
+		jseq = NULL;
+	if (spit.sig)
+		sig = new ParameterSignal(*(spit.sig));
+	else
+		sig = NULL;
+	if (spit.p)
+		p = new Vector(*(spit.p));
+	else
+		p = NULL;
+}
+
+@ 
+@<|smolpit| destructor@>=
+smolpit::~smolpit()
+{
+	if (jseq)
+		delete jseq;
+	if (sig)
+		delete sig;
+	if (p)
+		delete p;
+}
+
+@ 
+@<|smolpit::operator==| code@>=
+bool smolpit::operator==(const smolpit& spit) const
+{
+	bool ret = true;
+	ret = ret & smolq == spit.smolq;
+	ret = ret & isummand == spit.isummand;
+	ret = ret & ((jseq==NULL && spit.jseq==NULL) ||
+				 (jseq!=NULL && spit.jseq!=NULL && *jseq == *(spit.jseq)));
+	return ret;
+}
+
+@ 
+@<|smolpit::operator=| code@>=
+const smolpit& smolpit::operator=(const smolpit& spit)
+{
+	smolq = spit.smolq;
+	isummand = spit.isummand;
+	w = spit.w;
+
+	if (jseq)
+		delete jseq;
+	if (sig)
+		delete sig;
+	if (p)
+		delete p;
+
+	if (spit.jseq)
+		jseq = new IntSequence(*(spit.jseq));
+	else
+		jseq = NULL;
+	if (spit.sig)
+		sig = new ParameterSignal(*(spit.sig));
+	else
+		sig = NULL;
+	if (spit.p)
+		p = new Vector(*(spit.p));
+	else
+		p = NULL;
+
+	return *this;
+}
+
+@ We first try to increase index within the current summand. If we are
+at maximum, we go to a subsequent summand. Note that in this case all
+indices in |jseq| will be zero, so no change is needed.
+
+@<|smolpit::operator++| code@>=
+smolpit& smolpit::operator++()
+{
+	// todo: throw if |smolq==NULL| or |jseq==NULL| or |sig==NULL|
+	const IntSequence& levpts = smolq->levpoints[isummand];
+	int i = smolq->dimen()-1;
+	(*jseq)[i]++;
+	while (i >= 0 && (*jseq)[i] == levpts[i]) {
+		(*jseq)[i] = 0;
+		i--;
+		if (i >= 0)
+			(*jseq)[i]++;
+	}
+	sig->signalAfter(std::max(i,0));
+
+	if (i < 0)
+		isummand++;
+
+	if (isummand < smolq->numSummands())
+		setPointAndWeight();
+
+	return *this;
+}
+
+
+@ Here we set the point coordinates according to |jseq| and
+|isummand|. Also the weight is set here.
+
+@<|smolpit::setPointAndWeight| code@>=
+void smolpit::setPointAndWeight()
+{
+	// todo: raise if |smolq==NULL| or |jseq==NULL| or |sig==NULL| or
+	// |p==NULL| or |isummand>=smolq->numSummands()|
+	int l = smolq->level;
+	int d = smolq->dimen();
+	int sumk = (smolq->levels[isummand]).sum();
+	int m1exp = l + d - sumk - 1;
+	w = (2*(m1exp/2)==m1exp)? 1.0 : -1.0;
+	w *= smolq->psc.noverk(d-1, sumk-l);
+	for (int i = 0; i < d; i++) {
+		int ki = (smolq->levels[isummand])[i];
+		(*p)[i] = (smolq->uquad).point(ki, (*jseq)[i]);
+		w *= (smolq->uquad).weight(ki, (*jseq)[i]);
+	}
+}
+
+@ Debug print.
+@<|smolpit::print| code@>=
+void smolpit::print() const
+{
+	printf("isum=%-3d: [", isummand);
+	for (int i = 0; i < smolq->dimen(); i++)
+		printf("%2d ", (smolq->levels[isummand])[i]);
+	printf("] j=[");
+	for (int i = 0; i < smolq->dimen(); i++)
+		printf("%2d ", (*jseq)[i]);
+	printf("] %+4.3f*(",w);
+	for (int i = 0; i < smolq->dimen()-1; i++)
+		printf("%+4.3f ", (*p)[i]);
+	printf("%+4.3f)\n",(*p)[smolq->dimen()-1]);
+}
+
+@ Here is the constructor of |SmolyakQuadrature|. We have to setup
+|levels|, |levpoints| and |cumevals|. We have to go through all
+$d$-dimensional sequences $k$, such that $l\leq \vert k\vert\leq
+l+d-1$ and all $k_i$ are positive integers. This is equivalent to
+going through all $k$ such that $l-d\leq\vert k\vert\leq l-1$ and all
+$k_i$ are non-negative integers. This is equivalent to going through
+$d+1$ dimensional sequences $(k,x)$ such that $\vert(k,x)\vert =l-1$
+and $x=0,\ldots,d-1$. The resulting sequence of positive integers is
+obtained by adding $1$ to all $k_i$.
+
+@<|SmolyakQuadrature| constructor@>=
+SmolyakQuadrature::SmolyakQuadrature(int d, int l, const OneDQuadrature& uq)
+	: QuadratureImpl<smolpit>(d), level(l), uquad(uq), psc(d-1,d-1)
+{
+	// todo: check |l>1|, |l>=d|
+	// todo: check |l>=uquad.miLevel()|, |l<=uquad.maxLevel()|
+	int cum = 0;
+	SymmetrySet ss(l-1, d+1);
+	for (symiterator si(ss); !si.isEnd(); ++si) {
+		if ((*si)[d] <= d-1) {
+			IntSequence lev((const IntSequence&)*si, 0, d);
+			lev.add(1);
+			levels.push_back(lev);
+			IntSequence levpts(d);
+			for (int i = 0; i < d; i++)
+				levpts[i] = uquad.numPoints(lev[i]);
+			levpoints.push_back(levpts);
+			cum += levpts.mult();
+			cumevals.push_back(cum);
+		}
+	}
+}
+
+@ Here we return a number of evalutions of the quadrature for the
+given level. If the given level is the current one, we simply return
+the maximum cumulative number of evaluations. Otherwise we call costly
+|calcNumEvaluations| method.
+
+@<|SmolyakQuadrature::numEvals| code@>=
+int SmolyakQuadrature::numEvals(int l) const
+{
+	if (l != level)
+		return calcNumEvaluations(l);
+	else
+		return cumevals[numSummands()-1];
+}
+
+
+@ This divides all the evaluations to |tn| approximately equal groups,
+and returns the beginning of the specified group |ti|. The granularity
+of divisions are summands as listed by |levels|.
+
+@<|SmolyakQuadrature::begin| code@>=
+smolpit SmolyakQuadrature::begin(int ti, int tn, int l) const
+{
+	// todo: raise is |level!=l|
+	if (ti == tn)
+		return smolpit(*this, numSummands());
+
+	int totevals = cumevals[numSummands()-1];
+	int evals = (totevals*ti)/tn;
+	unsigned int isum = 0;
+	while (isum+1 < numSummands() && cumevals[isum+1] < evals)
+		isum++;
+	return smolpit(*this, isum);
+}
+
+@ This is the same in a structure as |@<|SmolyakQuadrature| constructor@>|.
+We have to go through all summands and calculate
+a number of evaluations in each summand.
+
+@<|SmolyakQuadrature::calcNumEvaluations| code@>=
+int SmolyakQuadrature::calcNumEvaluations(int lev) const
+{
+	int cum = 0;
+	SymmetrySet ss(lev-1, dim+1);
+	for (symiterator si(ss); !si.isEnd(); ++si) {
+		if ((*si)[dim] <= dim-1) {
+			IntSequence lev((const IntSequence&)*si, 0, dim);
+			lev.add(1);
+			IntSequence levpts(dim);
+			for (int i = 0; i < dim; i++)
+				levpts[i] = uquad.numPoints(lev[i]);
+			cum += levpts.mult();
+		}
+	}
+	return cum;
+}
+
+@ This returns a maximum level such that the number of evaluations is
+less than the given number.
+
+@<|SmolyakQuadrature::designLevelForEvals| code@>=
+void SmolyakQuadrature::designLevelForEvals(int max_evals, int& lev, int& evals) const
+{
+	int last_evals;
+	evals = 1;
+	lev = 1;
+	do {
+		lev++;
+		last_evals = evals;
+		evals = calcNumEvaluations(lev);
+	} while (lev < uquad.numLevels() && evals <= max_evals);
+	lev--;
+	evals = last_evals;
+}
+
+
+@ End of {\tt smolyak.cpp} file
diff --git a/dynare++/integ/cc/smolyak.hweb b/dynare++/integ/cc/smolyak.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..4ce9c64fa01409e5ee46746534b225065e501d4f
--- /dev/null
+++ b/dynare++/integ/cc/smolyak.hweb
@@ -0,0 +1,123 @@
+@q $Id: smolyak.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Smolyak quadrature. This is {\tt smolyak.h} file
+
+This file defines Smolyak (sparse grid) multidimensional quadrature
+for non-nested underlying one dimensional quadrature. Let $Q^1_l$ denote
+the one dimensional quadrature of $l$ level. Let $n_l$ denote a
+number of points in the $l$ level. Than the Smolyak quadrature can be
+defined as
+$$Q^df=\sum_{l\leq\vert k\vert\leq l+d-1}(-1)^{l+d-\vert k\vert-1}\left(\matrix{d-1\cr
+\vert k\vert-l}\right)(Q_{k_1}^1\otimes\ldots\otimes Q_{k_d}^1)f,$$
+where $d$ is the dimension, $k$ is $d$-dimensional sequence of
+integers, and $\vert k\vert$ denotes a sum of the sequence.
+
+Here we define |smolpit| as Smolyak iterator and |SmolyakQuadrature|.
+
+@s smolpit int
+@s SmolyakQuadrature int
+@s PascalTriangle int
+@s SymmetrySet int
+@s symiterator int
+
+@c
+#ifndef SMOLYAK_H
+#define SMOLYAK_H
+
+#include "int_sequence.h"
+#include "tl_static.h"
+#include "vector_function.h"
+#include "quadrature.h"
+
+@<|smolpit| class declaration@>;
+@<|SmolyakQuadrature| class declaration@>;
+
+#endif
+
+@ Here we define the Smolyak point iterator. The Smolyak formula can
+be broken to a sum of product quadratures with various combinations of
+levels. The iterator follows this pattern. It maintains an index to a
+summand and then a point coordinates within the summand (product
+quadrature). The array of summands to which the |isummand| points is
+maintained by the |SmolyakQuadrature| class to which the object knows
+the pointer |smolq|.
+
+We provide a constructor which points to the beginning of the given
+summand. This constructor is used in |SmolyakQuadrature::begin| method
+which approximately divideds all the iterators to subsets of equal
+size.
+
+@<|smolpit| class declaration@>=
+class SmolyakQuadrature;
+
+class smolpit {
+protected:@;
+	const SmolyakQuadrature* smolq;
+	unsigned int isummand;
+	IntSequence* jseq;
+	ParameterSignal* sig;
+	Vector* p;
+	double w;
+public:@;
+	smolpit();
+	smolpit(const SmolyakQuadrature& q, unsigned int isum);
+	smolpit(const smolpit& spit);
+	~smolpit();
+	bool operator==(const smolpit& spit) const;
+	bool operator!=(const smolpit& spit) const
+		{@+ return ! operator==(spit);@+}
+	const smolpit& operator=(const smolpit& spit);
+	smolpit& operator++();
+	const ParameterSignal& signal() const
+		{@+ return *sig;@+}
+	const Vector& point() const
+		{@+ return *p;@+}
+	double weight() const
+		{@+ return w;@+}
+	void print() const;
+protected:@;
+	void setPointAndWeight();
+};
+
+@ Here we define the class |SmolyakQuadrature|. It maintains an array
+of summands of the Smolyak quadrature formula:
+$$\sum_{l\leq\vert k\vert\leq l+d-1}(-1)^{l+d-\vert
+k\vert-1}\left(\matrix{d-1\cr
+\vert k\vert-l}\right)(Q_{k_1}^1\otimes\ldots\otimes Q_{k_d}^1)f$$
+Each summand is fully specified by sequence $k$. The summands are here
+represented (besides $k$) also by sequence of number of points in each
+level selected by $k$, and also by a cummulative number of
+evaluations. The latter two are added only for conveniency.
+
+The summands in the code are given by |levels|, which is a vector of
+$k$ sequences, further by |levpoints| which is a vector of sequences
+of nuber of points in each level, and by |cumevals| which is the
+cumulative number of points, this is $\sum_k\prod_{i=1}^dn_{k_i}$,
+where the sum is done through all $k$ before the current.
+
+The |levels| and |levpoints| vectors are used by |smolpit|.
+
+@<|SmolyakQuadrature| class declaration@>=
+class SmolyakQuadrature : public QuadratureImpl<smolpit> {
+	friend class smolpit;
+	int level;
+	const OneDQuadrature& uquad;
+	vector<IntSequence> levels;
+	vector<IntSequence> levpoints;
+	vector<int> cumevals;
+	PascalTriangle psc;
+public:@;
+	SmolyakQuadrature(int d, int l, const OneDQuadrature& uq);
+	virtual ~SmolyakQuadrature()@+ {}
+	virtual int numEvals(int level) const;
+	void designLevelForEvals(int max_eval, int& lev, int& evals) const;
+protected:@;
+	smolpit begin(int ti, int tn, int level) const;
+	unsigned int numSummands() const
+		{@+ return levels.size();@+}
+private:@;
+	int calcNumEvaluations(int level) const;
+};
+
+@ End of {\tt smolyak.h} file
diff --git a/dynare++/integ/cc/vector_function.cweb b/dynare++/integ/cc/vector_function.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..ba0d538fc2fc5cbd3d02042e0d02b82574c437d4
--- /dev/null
+++ b/dynare++/integ/cc/vector_function.cweb
@@ -0,0 +1,177 @@
+@q $Id: vector_function.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ This is {\tt vector\_function.cpp} file
+
+@c
+
+#include "vector_function.h"
+#include "cpplapack.h"
+
+#include <math.h>
+
+#include <string.h>
+#include <algorithm>
+
+@<|ParameterSignal| constructor code@>;
+@<|ParameterSignal| copy constructor code@>;
+@<|ParameterSignal::signalAfter| code@>;
+@<|VectorFunctionSet| constructor 1 code@>;
+@<|VectorFunctionSet| constructor 2 code@>;
+@<|VectorFunctionSet| destructor code@>;
+@<|GaussConverterFunction| constructor code 1@>;
+@<|GaussConverterFunction| constructor code 2@>;
+@<|GaussConverterFunction| copy constructor code@>;
+@<|GaussConverterFunction::eval| code@>;
+@<|GaussConverterFunction::multiplier| code@>;
+@<|GaussConverterFunction::calcCholeskyFactor| code@>;
+
+@ Just an easy constructor of sequence of booleans defaulting to
+change everywhere.
+
+@<|ParameterSignal| constructor code@>=
+ParameterSignal::ParameterSignal(int n)
+	: data(new bool[n]), num(n)
+{
+	for (int i = 0; i < num; i++)
+		data[i] = true;
+}
+
+@ 
+@<|ParameterSignal| copy constructor code@>=
+ParameterSignal::ParameterSignal(const ParameterSignal& sig)
+	: data(new bool[sig.num]), num(sig.num)
+{
+	memcpy(data, sig.data, num);
+}
+
+@ This sets |false| (no change) before a given parameter, and |true|
+(change) after the given parameter (including).
+
+@<|ParameterSignal::signalAfter| code@>=
+void ParameterSignal::signalAfter(int l)
+{
+	for (int i = 0; i < std::min(l,num); i++)
+		data[i] = false;
+	for (int i = l; i < num; i++)
+		data[i] = true;
+}
+
+@ This constructs a function set hardcopying also the first.
+@<|VectorFunctionSet| constructor 1 code@>=
+VectorFunctionSet::VectorFunctionSet(const VectorFunction& f, int n)
+	: funcs(n), first_shallow(false)
+{
+	for (int i = 0; i < n; i++)
+		funcs[i] = f.clone();
+}
+
+@ This constructs a function set with shallow copy in the first and
+hard copies in others.
+
+@<|VectorFunctionSet| constructor 2 code@>=
+VectorFunctionSet::VectorFunctionSet(VectorFunction& f, int n)
+	: funcs(n), first_shallow(true)
+{
+	if (n > 0)
+		funcs[0] = &f;
+	for (int i = 1; i < n; i++)
+		funcs[i] = f.clone();
+}
+
+@ This deletes the functions. The first is deleted only if it was not
+a shallow copy.
+
+@<|VectorFunctionSet| destructor code@>=
+VectorFunctionSet::~VectorFunctionSet()
+{
+	unsigned int start = first_shallow ? 1 : 0;
+	for (unsigned int i = start; i < funcs.size(); i++)
+		delete funcs[i];
+}
+
+@ Here we construct the object from the given function $f$ and given
+variance-covariance matrix $\Sigma=$|vcov|. The matrix $A$ is
+calculated as lower triangular and yields $\Sigma=AA^T$.
+
+@<|GaussConverterFunction| constructor code 1@>=
+GaussConverterFunction::GaussConverterFunction(VectorFunction& f, const GeneralMatrix& vcov)
+	: VectorFunction(f), func(&f), delete_flag(false), A(vcov.numRows(), vcov.numRows()),
+	  multiplier(calcMultiplier()) 
+{
+	// todo: raise if |A.numRows() != indim()|
+	calcCholeskyFactor(vcov);
+}
+
+@ Here we construct the object in the same way, however we mark the
+function as to be deleted.
+
+@<|GaussConverterFunction| constructor code 2@>=
+GaussConverterFunction::GaussConverterFunction(VectorFunction* f, const GeneralMatrix& vcov)
+	: VectorFunction(*f), func(f), delete_flag(true), A(vcov.numRows(), vcov.numRows()),
+	  multiplier(calcMultiplier()) 
+{
+	// todo: raise if |A.numRows() != indim()|
+	calcCholeskyFactor(vcov);
+}
+
+
+@ 
+@<|GaussConverterFunction| copy constructor code@>=
+GaussConverterFunction::GaussConverterFunction(const GaussConverterFunction& f)
+	: VectorFunction(f), func(f.func->clone()), delete_flag(true), A(f.A),
+	  multiplier(f.multiplier)
+{
+} 
+
+@ Here we evaluate the function
+$g(y)={1\over\sqrt{\pi^n}}f\left(\sqrt{2}Ay\right)$. Since the matrix $A$ is lower
+triangular, the change signal for the function $f$ will look like
+$(0,\ldots,0,1,\ldots,1)$ where the first $1$ is in the same position
+as the first change in the given signal |sig| of the input
+$y=$|point|.
+
+@<|GaussConverterFunction::eval| code@>=
+void GaussConverterFunction::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
+{
+	ParameterSignal s(sig);
+	int i = 0;
+	while (i < indim() && !sig[i])
+		i++;
+	s.signalAfter(i);
+
+	Vector x(indim());
+	x.zeros();
+	A.multaVec(x, point);
+	x.mult(sqrt(2.0));
+
+	func->eval(x, s, out);
+
+	out.mult(multiplier);
+}
+
+@ This returns $1\over\sqrt{\pi^n}$.
+@<|GaussConverterFunction::multiplier| code@>=
+double GaussConverterFunction::calcMultiplier() const
+{
+	return sqrt(pow(M_PI, -1*indim()));
+}
+
+@ 
+@<|GaussConverterFunction::calcCholeskyFactor| code@>=
+void GaussConverterFunction::calcCholeskyFactor(const GeneralMatrix& vcov)
+{
+	A = vcov;
+
+	int rows = A.numRows();
+	for (int i = 0; i < rows; i++)
+		for (int j = i+1; j < rows; j++)
+			A.get(i,j) = 0.0;
+
+	int info;
+	LAPACK_dpotrf("L", &rows, A.base(), &rows, &info);
+	// todo: raise if |info!=1|
+}
+
+
+@ End of {\tt vector\_function.cpp} file
diff --git a/dynare++/integ/cc/vector_function.hweb b/dynare++/integ/cc/vector_function.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..840b5892b093fa95983b0406d06c104005fe0e29
--- /dev/null
+++ b/dynare++/integ/cc/vector_function.hweb
@@ -0,0 +1,156 @@
+@q $Id: vector_function.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Vector function. This is {\tt vector\_function.h} file
+
+This file defines interface for functions taking a vector as an input
+and returning a vector (with a different size) as an output. We are
+also introducing a parameter signalling; it is a boolean vector which
+tracks parameters which were changed from the previous call. The
+|VectorFunction| implementation can exploit this information and
+evaluate the function more efficiently. The information can be
+completely ignored.
+
+From the signalling reason, and from other reasons, the function
+evaluation is not |const|.
+
+@s ParameterSignal int
+@s VectorFunction int
+@s VectorFunctionSet int
+@s GaussConverterFunction int
+
+@c
+#ifndef VECTOR_FUNCTION_H
+#define VECTOR_FUNCTION_H
+
+#include "Vector.h"
+#include "GeneralMatrix.h"
+
+#include <vector>
+
+@<|ParameterSignal| class declaration@>;
+@<|VectorFunction| class declaration@>;
+@<|VectorFunctionSet| class declaration@>;
+@<|GaussConverterFunction| class declaration@>;
+
+#endif
+
+@ This is a simple class representing a vector of booleans. The items
+night be retrieved or changed, or can be set |true| after some
+point. This is useful when we multiply the vector with lower
+triangular matrix.
+
+|true| means that a parameter was changed.
+
+@<|ParameterSignal| class declaration@>=
+class ParameterSignal {
+protected:@;
+	bool* data;
+	int num;
+public:@;
+	ParameterSignal(int n);
+	ParameterSignal(const ParameterSignal& sig);
+	~ParameterSignal()
+		{@+ delete [] data;@+}
+	void signalAfter(int l);
+	const bool& operator[](int i) const
+		{@+ return data[i];@+} 
+	bool& operator[](int i)
+		{@+ return data[i];@+} 
+};
+
+@ This is the abstract class for vector function. At this level of
+abstraction we only need to know size of input vector and a size of
+output vector.
+
+The important thing here is a clone method, we will need to make hard
+copies of vector functions since the evaluations are not |const|. The
+hardcopies apply for parallelization.
+
+@<|VectorFunction| class declaration@>=
+class VectorFunction {
+protected:@;
+	int in_dim;
+	int out_dim;
+public:@;
+	VectorFunction(int idim, int odim)
+		: in_dim(idim), out_dim(odim)@+ {}
+	VectorFunction(const VectorFunction& func)
+		: in_dim(func.in_dim), out_dim(func.out_dim)@+ {}
+	virtual ~VectorFunction()@+ {}
+	virtual VectorFunction* clone() const =0;
+	virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out) =0;
+	int indim() const
+		{@+ return in_dim;@+}
+	int outdim() const
+		{@+ return out_dim;@+}
+};
+
+@ This makes |n| copies of |VectorFunction|. The first constructor
+make exactly |n| new copies, the second constructor copies only the
+pointer to the first and others are hard (real) copies.
+
+The class is useful for making a given number of copies at once, and
+this set can be reused many times if we need mupliple copis of the
+function (for example for paralelizing the code).
+
+@<|VectorFunctionSet| class declaration@>=
+class VectorFunctionSet {
+protected:@;
+	std::vector<VectorFunction*> funcs;
+	bool first_shallow;
+public:@;
+	VectorFunctionSet(const VectorFunction& f, int n);
+	VectorFunctionSet(VectorFunction& f, int n);
+	~VectorFunctionSet();
+	VectorFunction& getFunc(int i)
+		{@+ return *(funcs[i]);@+}
+	int getNum() const
+		{@+ return funcs.size(); @+}
+};
+
+@ This class wraps another |VectorFunction| to allow integration of a
+function through normally distributed inputs. Namely, if one wants to
+integrate
+$${1\over\sqrt{(2\pi)^n\vert\Sigma\vert}}\int f(x)e^{-{1\over2}x^T\Sigma^{-1}x}{\rm d}x$$
+then if we write $\Sigma=AA^T$ and $x=\sqrt{2}Ay$, we get integral
+$${1\over\sqrt{(2\pi)^n\vert\Sigma\vert}}
+\int f\left(\sqrt{2}Ay\right)e^{-y^Ty}\sqrt{2^n}\vert A\vert{\rm d}y=
+{1\over\sqrt{\pi^n}}\int f\left(\sqrt{2}Ay\right)e^{-y^Ty}{\rm d}y,$$
+which means that a given function $f$ we have to wrap to yield a function
+$$g(y)={1\over\sqrt{\pi^n}}f\left(\sqrt{2}Ay\right).$$
+This is exactly what this class is doing. This transformation is
+useful since the Gauss--Hermite points and weights are defined for
+weighting function $e^{-y^2}$, so this transformation allows using
+Gauss--Hermite quadratures seemlessly in a context of integration through
+normally distributed inputs.
+
+The class maintains a pointer to the function $f$. When the object is
+constructed by the first constructor, the $f$ is not copied. If the
+object of this class is copied, then $f$ is copied and we need to
+remember to destroy it in the desctructor; hence |delete_flag|. The
+second constructor takes a pointer to the function and differs from
+the first only by setting |delete_flag| to |true|.
+
+@<|GaussConverterFunction| class declaration@>=
+class GaussConverterFunction : public VectorFunction {
+protected:@;
+	VectorFunction* func;
+	bool delete_flag;
+	GeneralMatrix A;
+	double multiplier;
+public:@;
+	GaussConverterFunction(VectorFunction& f, const GeneralMatrix& vcov);
+	GaussConverterFunction(VectorFunction* f, const GeneralMatrix& vcov);
+	GaussConverterFunction(const GaussConverterFunction& f);
+	virtual ~GaussConverterFunction()
+		{@+ if (delete_flag) delete func; @+}
+	virtual VectorFunction* clone() const
+		{@+ return new GaussConverterFunction(*this);@+}
+	virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);	
+private:@;
+	double calcMultiplier() const;
+	void calcCholeskyFactor(const GeneralMatrix& vcov);
+};
+
+@ End of {\tt vector\_function.h} file
diff --git a/dynare++/integ/src/Makefile b/dynare++/integ/src/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..2e59ea3105026332359848dba3ff90163ee8246f
--- /dev/null
+++ b/dynare++/integ/src/Makefile
@@ -0,0 +1,103 @@
+CC_FLAGS = -I../.. -I../../sylv/cc -I../../integ/cc -I../../tl/cc $(CC_FLAGS)
+
+#LDFLAGS = -llapack -lcblas -lf77blas -latlas -lg2c -lstdc++
+
+ifeq ($(CC),)
+	CC := gcc
+endif
+
+ifneq ($(LD_LIBRARY_PATH),)	# use LD_LIBRARY_PATH from environment
+	LDFLAGS := -Wl,--library-path $(LD_LIBRARY_PATH) $(LDFLAGS)
+endif
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O3 -DPOSIX_THREADS
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+	LDFLAGS := -mno-cygwin -mthreads $(LDFLAGS) -lpthreadGC2
+	ARCH := w32
+else
+	LDFLAGS := $(LDFLAGS) -lpthread
+	ARCH := linux
+endif
+
+
+sylvcppsource := $(wildcard ../../sylv/cc/*.cpp)
+sylvhsource := $(wildcard ../../sylv/cc/*.h)
+sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
+
+
+tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
+tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
+
+integcwebsource := $(wildcard ../../integ/cc/*.cweb)
+integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
+integhwebsource := $(wildcard ../../integ/cc/*.hweb)
+integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
+integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
+
+parserhsource := $(wildcard ../../parser/cc/*.h)
+parsercppsource := $(wildcard ../../parser/cc/*.cpp)
+
+utilshsource := $(wildcard ../../utils/cc/*.h)
+utilscppsource := $(wildcard ../../utils/cc/*.cpp)
+utilsobjects := $(patsubst %.cpp,%.o,$(utilscppsource))
+
+../../tl/cc/dummy.ch:
+	make -C ../../tl/cc dummy.ch
+
+../../tl/cc/%.cpp: ../../tl/cc/%.cweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.cpp
+
+../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.h
+
+../../tl/cc/%.o: ../../tl/cc/%.cpp $(tlhsource)
+	make -C ../../tl/cc $*.o
+
+../../integ/cc/dummy.ch:
+	make -C ../../integ/cc dummy.ch
+
+../../integ/cc/%.cpp: ../../integ/cc/%.cweb ../../integ/cc/dummy.ch
+	make -C ../../integ/cc $*.cpp
+
+../../integ/cc/%.h: ../../integ/cc/%.hweb ../../integ/cc/dummy.ch
+	make -C ../../integ/cc $*.h
+
+../../integ/cc/%.o: ../../integ/cc/%.cpp $(integhsource) $(tlhsource)
+	make -C ../../integ/cc $*.o
+
+
+../../sylv/cc/%.o: ../../sylv/cc/%.cpp $(sylvhsource)
+	make -C ../../sylv/cc $*.o
+
+../../utils/cc/%.o: ../../utils/cc/%.cpp $(utilshsource)
+	make -C ../../utils/cc $*.o
+
+../../parser/cc/%.o: ../../parser/cc/%.cpp $(parserhsource)
+	make -C ../../parser/cc $*.o
+
+../../parser/cc/matrix_tab.o:
+	make -C ../../parser/cc matrix_tab.o
+
+../../parser/cc/matrix_ll.o:
+	make -C ../../parser/cc matrix_ll.o
+
+quadrature-points: quadrature-points.cpp $(sylvhsource) $(sylvobjects) $(integhsource) $(integobjects) $(parserhsource) $(utilshsource) $(tlhsource) $(tlobjects) $(utilsobjects)
+	$(CC) $(CC_FLAGS) quadrature-points.cpp -o quadrature-points ../../integ/cc/quadrature.o ../../integ/cc/smolyak.o ../../integ/cc/product.o ../../integ/cc/vector_function.o ../../tl/cc/sthread.o ../../tl/cc/symmetry.o ../../tl/cc/equivalence.o ../../tl/cc/int_sequence.o ../../tl/cc/tl_static.o ../../tl/cc/permutation.o ../../parser/cc/matrix_parser.o ../../parser/cc/matrix_tab.o ../../parser/cc/matrix_ll.o ../../parser/cc/parser_exception.o ../../sylv/cc/GeneralMatrix.o ../../sylv/cc/Vector.o ../../sylv/cc/SymSchurDecomp.o ../../sylv/cc/SylvException.o ../../utils/cc/memory_file.o $(LDFLAGS)
+ 
+
+clear:
+	make -C ../../tl/cc/ clear
+	make -C ../../integ/cc clear
+	make -C ../../parser/cc clear
+	make -C ../../utils/cc clear
+	make -C ../../sylv/cc clear
+	rm -rf quadrature-points
diff --git a/dynare++/integ/src/quadrature-points.cpp b/dynare++/integ/src/quadrature-points.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bbc35f6e93d7ddb3c1ab620c63c2106672aed7cf
--- /dev/null
+++ b/dynare++/integ/src/quadrature-points.cpp
@@ -0,0 +1,192 @@
+
+
+#include "parser/cc/matrix_parser.h"
+#include "utils/cc/memory_file.h"
+#include "utils/cc/exception.h"
+#include "sylv/cc/GeneralMatrix.h"
+#include "sylv/cc/Vector.h"
+#include "sylv/cc/SymSchurDecomp.h"
+#include "sylv/cc/SylvException.h"
+#include "integ/cc/quadrature.h"
+#include "integ/cc/smolyak.h"
+#include "integ/cc/product.h"
+
+#include <getopt.h>
+#include <stdio.h>
+
+#include <cmath>
+
+struct QuadParams {
+	const char* outname;
+	const char* vcovname;
+	int max_level;
+	double discard_weight;
+	QuadParams(int argc, char** argv);
+	void check_consistency() const;
+private:
+	enum {opt_max_level, opt_discard_weight, opt_vcov};
+};
+
+QuadParams::QuadParams(int argc, char** argv)
+	: outname(NULL), vcovname(NULL), max_level(3), discard_weight(0.0)
+{
+	if (argc == 1) {
+		// print the help and exit
+		exit(1);
+	}
+
+	outname = argv[argc-1];
+	argc--;
+
+	struct option const opts [] = {
+		{"max-level", required_argument, NULL, opt_max_level},
+		{"discard-weight", required_argument, NULL, opt_discard_weight},
+		{"vcov", required_argument, NULL, opt_vcov},
+		{NULL, 0, NULL, 0}
+	};
+
+	int ret;
+	int index;
+	while (-1 != (ret = getopt_long(argc, argv, "", opts, &index))) {
+		switch (ret) {
+		case opt_max_level:
+			if (1 != sscanf(optarg, "%d", &max_level))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_discard_weight:
+			if (1 != sscanf(optarg, "%lf", &discard_weight))
+				fprintf(stderr, "Couldn't parse float %s, ignored\n", optarg);
+			break;
+		case opt_vcov:
+			vcovname = optarg;
+			break;
+		}
+	}
+
+	check_consistency();
+}
+
+void QuadParams::check_consistency() const
+{
+	if (outname == NULL) {
+		fprintf(stderr, "Error: output name not set\n");
+		exit(1);
+	}
+
+	if (vcovname == NULL) {
+		fprintf(stderr, "Error: vcov file name not set\n");
+		exit(1);
+	}
+}
+
+/** Utility class for ordering pointers to vectors according their
+ * ordering. */
+struct OrderVec {
+	bool operator()(const Vector* a, const Vector* b) const
+		{return *a < *b;}
+};
+
+int main(int argc, char** argv)
+{
+	QuadParams params(argc, argv);
+
+	// open output file for writing
+	FILE* fout;
+	if (NULL == (fout=fopen(params.outname, "w"))) {
+		fprintf(stderr, "Could not open %s for writing\n", params.outname);
+		exit(1);
+	}
+
+	try {
+
+		// open memory file for vcov
+		ogu::MemoryFile vcov_mf(params.vcovname);
+	
+		// parse the vcov matrix
+		ogp::MatrixParser mp;
+		mp.parse(vcov_mf.length(), vcov_mf.base());
+		if (mp.nrows() != mp.ncols())
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "VCOV matrix not square");
+		// and put to the GeneralMatrix
+		GeneralMatrix vcov(mp.nrows(), mp.ncols());
+		vcov.zeros();
+		for (ogp::MPIterator it = mp.begin(); it != mp.end(); ++it)
+			vcov.get(it.row(), it.col()) = *it;
+	
+		// calculate the factor A of vcov, so that A*A^T=VCOV
+		GeneralMatrix A(vcov.numRows(), vcov.numRows());
+		SymSchurDecomp ssd(vcov);
+		ssd.getFactor(A);
+
+		// construct Gauss-Hermite quadrature
+		GaussHermite ghq;
+		// construct Smolyak quadrature
+		int level = params.max_level;
+		SmolyakQuadrature sq(vcov.numRows(), level, ghq);
+
+		printf("Dimension:                %d\n", vcov.numRows());
+		printf("Maximum level:            %d\n", level);
+		printf("Total number of nodes:    %d\n", sq.numEvals(level));
+
+		// put the points to the vector
+		std::vector<Vector*> points;
+		for (smolpit qit = sq.start(level); qit != sq.end(level); ++qit)
+			points.push_back(new Vector((const Vector&)qit.point()));
+		// sort and uniq
+		OrderVec ordvec;
+		std::sort(points.begin(), points.end(), ordvec);
+		std::vector<Vector*>::iterator new_end = std::unique(points.begin(), points.end());
+		for (std::vector<Vector*>::iterator it = new_end; it != points.end(); ++it)
+			delete *it;
+		points.erase(new_end, points.end());
+
+		printf("Duplicit nodes removed:   %d\n", sq.numEvals(level)-points.size());
+
+		// calculate weights and mass
+		double mass = 0.0;
+		std::vector<double> weights;
+		for (int i = 0; i < (int)points.size(); i++) {
+			weights.push_back(std::exp(-points[i]->dot(*(points[i]))));
+			mass += weights.back();
+		}
+
+		// calculate discarded mass
+		double discard_mass = 0.0;
+		for (int i = 0; i < (int)weights.size(); i++)
+			if (weights[i]/mass < params.discard_weight)
+				discard_mass += weights[i];
+
+		printf("Total mass discarded:     %f\n", discard_mass/mass);
+	
+		// dump the results
+		int npoints = 0;
+		double upscale_weight = 1/(mass-discard_mass);
+		Vector x(vcov.numRows());
+		for (int i = 0; i < (int)weights.size(); i++)
+			if (weights[i]/mass >= params.discard_weight) {
+				// print the upscaled weight
+				fprintf(fout, "%20.16g", upscale_weight*weights[i]);
+				// multiply point with the factor A and sqrt(2)
+				A.multVec(0.0, x, std::sqrt(2.), *(points[i]));
+				// print the coordinates
+				for (int j = 0; j < x.length(); j++)
+					fprintf(fout, " %20.16g", x[j]);
+				fprintf(fout, "\n");
+				npoints++;
+			}
+
+		printf("Final number of points:   %d\n", npoints);
+
+		fclose(fout);
+
+	} catch (const SylvException& e) {
+		e.printMessage();
+		return 1;
+	} catch (const ogu::Exception& e) {
+		e.print();
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/dynare++/integ/testing/Makefile b/dynare++/integ/testing/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..7f57fe3dafa7c2b2fb6b37b01a7550f8204c6e41
--- /dev/null
+++ b/dynare++/integ/testing/Makefile
@@ -0,0 +1,62 @@
+# $Id: Makefile 843 2006-07-28 08:54:19Z tamas $
+# Copyright 2005, Ondra Kamenik
+
+
+LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c -lpthread
+CC_FLAGS := -Wall -I../cc -I ../../tl/cc -I../../sylv/cc
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
+endif
+
+matrix_interface := GeneralMatrix Vector SylvException 
+matobjs := $(patsubst %, ../../sylv/cc/%.o, $(matrix_interface))
+cwebsource := $(wildcard ../cc/*.cweb)
+cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource)) 
+objects := $(patsubst %.cweb,%.o,$(cwebsource))
+hwebsource := $(wildcard ../cc/*.hweb)
+hsource := $(patsubst %.hweb,%.h,$(hwebsource))
+tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource)) 
+tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
+tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+
+../cc/dummy.ch:
+	make -C ../cc dummy.ch
+
+../cc/%.cpp: ../cc/%.cweb ../cc/dummy.ch
+	make -C ../cc $*.cpp
+
+../cc/%.h: ../cc/%.hweb ../cc/dummy.ch
+	make -C ../cc $*.h
+
+../cc/%.o: ../cc/%.cpp $(hsource)
+	make -C ../cc $*.o
+
+../../tl/cc/dummy.ch:
+	make -C ../../tl/cc dummy.ch
+
+../../tl/cc/%.cpp: ../../tl/cc/%.cweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.cpp
+
+../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
+	make -C ../../tl/cc $*.h
+
+../../tl/cc/%.o: ../../tl/cc/%.cpp $(tlhsource)
+	make -C ../../tl/cc $*.o
+
+%.o: %.cpp $(hwebsource) $(hsource) $(tlhwebsource) $(tlhsource) 
+	$(CC) $(CC_FLAGS) -c $*.cpp
+
+tests: $(hwebsource) $(cwebsoure) $(hsource) $(cppsource) \
+       $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
+       tests.o $(objects) $(tlobjects) 
+	$(CC) $(CC_FLAGS) $(objects) $(tlobjects) $(matobjs) tests.o -o tests $(LD_LIBS) 
+
+clear:
+	rm -f *.o
+	rm -f tests
+	make -C ../cc clear
+	make -C ../../tl/cc clear
diff --git a/dynare++/integ/testing/tests.cpp b/dynare++/integ/testing/tests.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ec2dda97be02855f1fb6eda7fad0e96720b15b6c
--- /dev/null
+++ b/dynare++/integ/testing/tests.cpp
@@ -0,0 +1,544 @@
+/* $Id: tests.cpp 431 2005-08-16 15:41:01Z kamenik $ */
+/* Copyright 2005, Ondra Kamenik */
+
+#include "GeneralMatrix.h"
+#include "cpplapack.h"
+#include "SylvException.h"
+
+#include "rfs_tensor.h"
+#include "normal_moments.h"
+
+#include "vector_function.h"
+#include "quadrature.h"
+#include "smolyak.h"
+#include "product.h"
+#include "quasi_mcarlo.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+#include <math.h>
+
+const int num_threads = 2; // does nothing if DEBUG defined
+
+// evaluates unfolded (Dx)^k power, where x is a vector, D is a
+// Cholesky factor (lower triangular)
+class MomentFunction : public VectorFunction {
+	GeneralMatrix D;
+	int k;
+public:
+	MomentFunction(const GeneralMatrix& inD, int kk)
+		: VectorFunction(inD.numRows(), UFSTensor::calcMaxOffset(inD.numRows(), kk)),
+		  D(inD), k(kk) {}
+	MomentFunction(const MomentFunction& func)
+		: VectorFunction(func), D(func.D), k(func.k) {}
+	VectorFunction* clone() const
+		{return new MomentFunction(*this);}
+	void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
+};
+
+void MomentFunction::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
+{
+	if (point.length() != indim() || out.length() != outdim()) {
+		printf("Wrong length of vectors in MomentFunction::eval\n");
+		exit(1);
+	}
+	Vector y(point);
+	y.zeros();
+	D.multaVec(y, point);
+	URSingleTensor ypow(y, k);
+	out.zeros();
+	out.add(1.0, ypow.getData());
+}
+
+class TensorPower : public VectorFunction {
+	int k;
+public:
+	TensorPower(int nvar, int kk)
+		: VectorFunction(nvar, UFSTensor::calcMaxOffset(nvar, kk)), k(kk) {}
+	TensorPower(const TensorPower& func)
+		: VectorFunction(func), k(func.k) {}
+	VectorFunction* clone() const
+		{return new TensorPower(*this);}
+	void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
+};
+
+void TensorPower::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
+{
+	if (point.length() != indim() || out.length() != outdim()) {
+		printf("Wrong length of vectors in TensorPower::eval\n");
+		exit(1);
+	}
+	URSingleTensor ypow(point, k);
+	out.zeros();
+	out.add(1.0, ypow.getData());
+}
+
+
+// evaluates (1+1/d)^d*(x_1*...*x_d)^(1/d), its integral over <0,1>^d
+// is 1.0, and its variation grows exponetially
+// d = dim
+class Function1 : public VectorFunction {
+	int dim;
+public:
+	Function1(int d)
+		: VectorFunction(d, 1), dim(d) {}
+	Function1(const Function1& f)
+		: VectorFunction(f.indim(), f.outdim()), dim(f.dim) {}
+	VectorFunction* clone() const
+		{return new Function1(*this);}
+	virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
+};
+
+void Function1::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
+{
+	if (point.length() != dim || out.length() != 1) {
+		printf("Wrong length of vectors in Function1::eval\n");
+		exit(1);
+	}
+	double r = 1;
+	for (int i = 0; i < dim; i++)
+		r *= point[i];
+	r = pow(r, 1.0/dim);
+	r *= pow(1.0 + 1.0/dim, (double)dim);
+	out[0] = r;
+}
+
+// evaluates Function1 but with transformation x_i=0.5(y_i+1)
+// this makes the new function integrate over <-1,1>^d to 1.0
+class Function1Trans : public Function1 {
+public:
+	Function1Trans(int d)
+		: Function1(d) {}
+	Function1Trans(const Function1Trans& func)
+		: Function1(func) {}
+	VectorFunction* clone() const
+		{return new Function1Trans(*this);}
+	virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
+};
+
+void Function1Trans::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
+{
+	Vector p(point.length());
+	for (int i = 0; i < p.length(); i++)
+		p[i] = 0.5*(point[i]+1);
+	Function1::eval(p, sig, out);
+	out.mult(pow(0.5,indim()));
+}
+
+
+// WallTimer class. Constructor saves the wall time, destructor
+// cancels the current time from the saved, and prints the message
+// with time information
+class WallTimer {
+	char mes[100];
+	struct timeval start;
+	bool new_line;
+public:
+	WallTimer(const char* m, bool nl = true)
+		{strcpy(mes, m);new_line = nl; gettimeofday(&start, NULL);}
+	~WallTimer()
+		{
+			struct timeval end;
+			gettimeofday(&end, NULL);
+			printf("%s%8.4g", mes,
+				   end.tv_sec-start.tv_sec + (end.tv_usec-start.tv_usec)*1.0e-6);
+			if (new_line)
+				printf("\n");
+		}
+};
+
+/****************************************************/
+/*     declaration of TestRunnable class            */
+/****************************************************/
+class TestRunnable {
+	char name[100];
+public:
+	int dim; // dimension of the solved problem
+	int nvar; // number of variable of the solved problem
+	TestRunnable(const char* n, int d, int nv)
+		: dim(d), nvar(nv)
+		{strncpy(name, n, 100);}
+	bool test() const;
+	virtual bool run() const =0;
+	const char* getName() const
+		{return name;}
+protected:
+	static bool smolyak_normal_moments(const GeneralMatrix& m, int imom, int level);
+	static bool product_normal_moments(const GeneralMatrix& m, int imom, int level);
+	static bool qmc_normal_moments(const GeneralMatrix& m, int imom, int level);
+	static bool smolyak_product_cube(const VectorFunction& func, const Vector& res,
+									 double tol, int level);
+	static bool qmc_cube(const VectorFunction& func, double res, double tol, int level);
+};
+
+bool TestRunnable::test() const
+{
+	printf("Running test <%s>\n",name);
+	bool passed;
+	{
+		WallTimer tim("Wall clock time ", false);
+		passed = run();
+	}
+	if (passed) {
+		printf("............................ passed\n\n");
+		return passed;
+	} else {
+		printf("............................ FAILED\n\n");
+		return passed;
+	}
+}
+
+
+/****************************************************/
+/*     definition of TestRunnable static methods    */
+/****************************************************/
+bool TestRunnable::smolyak_normal_moments(const GeneralMatrix& m, int imom, int level)
+{
+	// first make m*m' and then Cholesky factor
+	GeneralMatrix mtr(m, "transpose");
+	GeneralMatrix msq(m, mtr);
+
+	// make vector function
+	int dim = m.numRows();
+	TensorPower tp(dim, imom);
+	GaussConverterFunction func(tp, msq);
+
+	// smolyak quadrature
+	Vector smol_out(UFSTensor::calcMaxOffset(dim, imom));
+	{
+		WallTimer tim("\tSmolyak quadrature time:         ");
+		GaussHermite gs;
+		SmolyakQuadrature quad(dim, level, gs);
+		quad.integrate(func, level, num_threads, smol_out);
+		printf("\tNumber of Smolyak evaluations:    %d\n", quad.numEvals(level));
+	}
+
+	// check against theoretical moments
+	UNormalMoments moments(imom, msq);
+	smol_out.add(-1.0, (moments.get(Symmetry(imom)))->getData());
+	printf("\tError:                         %16.12g\n", smol_out.getMax());
+	return smol_out.getMax() < 1.e-7;
+}
+
+bool TestRunnable::product_normal_moments(const GeneralMatrix& m, int imom, int level)
+{
+	// first make m*m' and then Cholesky factor
+	GeneralMatrix mtr(m, "transpose");
+	GeneralMatrix msq(m, mtr);
+
+	// make vector function
+	int dim = m.numRows();
+	TensorPower tp(dim, imom);
+	GaussConverterFunction func(tp, msq);
+
+	// product quadrature
+	Vector prod_out(UFSTensor::calcMaxOffset(dim, imom));
+	{
+		WallTimer tim("\tProduct quadrature time:         ");
+		GaussHermite gs;
+		ProductQuadrature quad(dim, gs);
+		quad.integrate(func, level, num_threads, prod_out);
+		printf("\tNumber of product evaluations:    %d\n", quad.numEvals(level));
+	}
+
+	// check against theoretical moments
+	UNormalMoments moments(imom, msq);
+	prod_out.add(-1.0, (moments.get(Symmetry(imom)))->getData());
+	printf("\tError:                         %16.12g\n", prod_out.getMax());
+	return prod_out.getMax() < 1.e-7;
+}
+
+bool TestRunnable::qmc_normal_moments(const GeneralMatrix& m, int imom, int level)
+{
+	// first make m*m' and then Cholesky factor
+	GeneralMatrix mtr(m, "transpose");
+	GeneralMatrix msq(m, mtr);
+	GeneralMatrix mchol(msq);
+	int rows = mchol.numRows();
+	for (int i = 0; i < rows; i++)
+		for (int j = i+1; j < rows; j++)
+			mchol.get(i,j) = 0.0;
+	int info;
+	LAPACK_dpotrf("L", &rows, mchol.base(), &rows, &info);
+
+	// make vector function
+	MomentFunction func(mchol, imom);
+
+	// permutation schemes
+	WarnockPerScheme wps;
+	ReversePerScheme rps;
+	IdentityPerScheme ips;
+	PermutationScheme* scheme[] = {&wps, &rps, &ips};
+	const char* labs[] = {"Warnock", "Reverse", "Identity"};
+
+	// theoretical result
+	int dim = mchol.numRows();
+	UNormalMoments moments(imom, msq);
+	Vector res((const Vector&)((moments.get(Symmetry(imom)))->getData()));
+
+	// quasi monte carlo normal quadrature
+	double max_error = 0.0;
+	Vector qmc_out(UFSTensor::calcMaxOffset(dim, imom));
+	for (int i = 0; i < 3; i++) {
+		{
+			char mes[100];
+			sprintf(mes, "\tQMC normal quadrature time %8s:         ", labs[i]);
+			WallTimer tim(mes);
+			QMCarloNormalQuadrature quad(dim, level, *(scheme[i]));
+			quad.integrate(func, level, num_threads, qmc_out);
+		}
+		qmc_out.add(-1.0, res);
+		printf("\tError %8s:                         %16.12g\n", labs[i], qmc_out.getMax());
+		if (qmc_out.getMax() > max_error) {
+			max_error = qmc_out.getMax();
+		}
+	}
+
+	return max_error < 1.e-7;
+}
+
+
+bool TestRunnable::smolyak_product_cube(const VectorFunction& func, const Vector& res,
+										double tol, int level)
+{
+	if (res.length() != func.outdim()) {
+		fprintf(stderr, "Incompatible dimensions of check value and function.\n");
+		exit(1);
+	}
+
+	GaussLegendre glq;
+	Vector out(func.outdim());
+	double smol_error;
+	double prod_error;
+	{
+		WallTimer tim("\tSmolyak quadrature time:         ");
+		SmolyakQuadrature quad(func.indim(), level, glq);
+		quad.integrate(func, level, num_threads, out);
+		out.add(-1.0, res);
+		smol_error = out.getMax();
+		printf("\tNumber of Smolyak evaluations:    %d\n", quad.numEvals(level));
+		printf("\tError:                            %16.12g\n", smol_error);
+	}
+	{
+		WallTimer tim("\tProduct quadrature time:         ");
+		ProductQuadrature quad(func.indim(), glq);
+		quad.integrate(func, level, num_threads, out);
+		out.add(-1.0, res);
+		prod_error = out.getMax();
+		printf("\tNumber of product evaluations:    %d\n", quad.numEvals(level));
+		printf("\tError:                            %16.12g\n", prod_error);
+	}
+
+	return smol_error < tol && prod_error < tol;
+}
+
+bool TestRunnable::qmc_cube(const VectorFunction& func, double res, double tol, int level)
+{
+	Vector r(1);
+	double error1;
+	{
+		WallTimer tim("\tQuasi-Monte Carlo (Warnock scrambling) time:  ");
+		WarnockPerScheme wps;
+		QMCarloCubeQuadrature qmc(func.indim(), level, wps);
+//		qmc.savePoints("warnock.txt", level);
+		qmc.integrate(func, level, num_threads, r);
+		error1 = std::max(res - r[0], r[0] - res);
+		printf("\tQuasi-Monte Carlo (Warnock scrambling) error: %16.12g\n",
+			   error1);
+	}
+	double error2;
+	{
+		WallTimer tim("\tQuasi-Monte Carlo (reverse scrambling) time:  ");
+		ReversePerScheme rps;
+		QMCarloCubeQuadrature qmc(func.indim(), level, rps);
+//		qmc.savePoints("reverse.txt", level);
+		qmc.integrate(func, level, num_threads, r);
+		error2 = std::max(res - r[0], r[0] - res);
+		printf("\tQuasi-Monte Carlo (reverse scrambling) error: %16.12g\n",
+			   error2);
+	}
+	double error3;
+	{
+		WallTimer tim("\tQuasi-Monte Carlo (no scrambling) time:       ");
+		IdentityPerScheme ips;
+		QMCarloCubeQuadrature qmc(func.indim(), level, ips);
+//		qmc.savePoints("identity.txt", level);
+		qmc.integrate(func, level, num_threads, r);
+		error3 = std::max(res - r[0], r[0] - res);
+		printf("\tQuasi-Monte Carlo (no scrambling) error:      %16.12g\n",
+			   error3);
+	}
+
+
+	return error1 < tol && error2 < tol && error3 < tol;
+}
+
+/****************************************************/
+/*     definition of TestRunnable subclasses        */
+/****************************************************/
+class SmolyakNormalMom1 : public TestRunnable {
+public:
+	SmolyakNormalMom1()
+		: TestRunnable("Smolyak normal moments (dim=2, level=4, order=4)", 4, 2) {}
+
+	bool run() const
+		{
+			GeneralMatrix m(2,2);
+			m.zeros(); m.get(0,0)=1; m.get(1,1)=1;
+			return smolyak_normal_moments(m, 4, 4);
+		}
+};
+
+class SmolyakNormalMom2 : public TestRunnable {
+public:
+	SmolyakNormalMom2()
+		: TestRunnable("Smolyak normal moments (dim=3, level=8, order=8)", 8, 3) {}
+
+	bool run() const
+		{
+			GeneralMatrix m(3,3);
+			m.zeros();
+			m.get(0,0)=1; m.get(0,2)=0.5; m.get(1,1)=1;
+			m.get(1,0)=0.5;m.get(2,2)=2;m.get(2,1)=4;
+			return smolyak_normal_moments(m, 8, 8);
+		}
+};
+
+class ProductNormalMom1 : public TestRunnable {
+public:
+	ProductNormalMom1()
+		: TestRunnable("Product normal moments (dim=2, level=4, order=4)", 4, 2) {}
+
+	bool run() const
+		{
+			GeneralMatrix m(2,2);
+			m.zeros(); m.get(0,0)=1; m.get(1,1)=1;
+			return product_normal_moments(m, 4, 4);
+		}
+};
+
+class ProductNormalMom2 : public TestRunnable {
+public:
+	ProductNormalMom2()
+		: TestRunnable("Product normal moments (dim=3, level=8, order=8)", 8, 3) {}
+
+	bool run() const
+		{
+			GeneralMatrix m(3,3);
+			m.zeros();
+			m.get(0,0)=1; m.get(0,2)=0.5; m.get(1,1)=1;
+			m.get(1,0)=0.5;m.get(2,2)=2;m.get(2,1)=4;
+			return product_normal_moments(m, 8, 8);
+		}
+};
+
+class QMCNormalMom1 : public TestRunnable {
+public:
+	QMCNormalMom1()
+		: TestRunnable("QMC normal moments (dim=2, level=1000, order=4)", 4, 2) {}
+
+	bool run() const
+		{
+			GeneralMatrix m(2,2);
+			m.zeros(); m.get(0,0)=1; m.get(1,1)=1;
+			return qmc_normal_moments(m, 4, 1000);
+		}
+};
+
+class QMCNormalMom2 : public TestRunnable {
+public:
+	QMCNormalMom2()
+		: TestRunnable("QMC normal moments (dim=3, level=10000, order=8)", 8, 3) {}
+
+	bool run() const
+		{
+			GeneralMatrix m(3,3);
+			m.zeros();
+			m.get(0,0)=1; m.get(0,2)=0.5; m.get(1,1)=1;
+			m.get(1,0)=0.5;m.get(2,2)=2;m.get(2,1)=4;
+			return qmc_normal_moments(m, 8, 10000);
+		}
+};
+
+
+
+// note that here we pass 1,1 to tls since smolyak has its own PascalTriangle
+class F1GaussLegendre : public TestRunnable {
+public:
+	F1GaussLegendre()
+		: TestRunnable("Function1 Gauss-Legendre (dim=6, level=13", 1, 1) {}
+
+	bool run() const
+		{
+			Function1Trans f1(6);
+			Vector res(1); res[0] = 1.0;
+			return smolyak_product_cube(f1, res, 1e-2, 13);
+		}
+};
+
+
+class F1QuasiMCarlo : public TestRunnable {
+public:
+	F1QuasiMCarlo()
+		: TestRunnable("Function1 Quasi-Monte Carlo (dim=6, level=1000000)", 1, 1) {}
+
+	bool run() const
+		{
+			Function1 f1(6);
+			return qmc_cube(f1, 1.0, 1.e-4, 1000000);
+		}
+};
+
+int main()
+{
+	TestRunnable* all_tests[50];
+	// fill in vector of all tests
+	int num_tests = 0;
+	all_tests[num_tests++] = new SmolyakNormalMom1();
+	all_tests[num_tests++] = new SmolyakNormalMom2();
+	all_tests[num_tests++] = new ProductNormalMom1();
+	all_tests[num_tests++] = new ProductNormalMom2();
+	all_tests[num_tests++] = new QMCNormalMom1();
+	all_tests[num_tests++] = new QMCNormalMom2();
+/*
+	all_tests[num_tests++] = new F1GaussLegendre();
+	all_tests[num_tests++] = new F1QuasiMCarlo();
+*/
+	// find maximum dimension and maximum nvar
+	int dmax=0;
+	int nvmax = 0;
+	for (int i = 0; i < num_tests; i++) {
+		if (dmax < all_tests[i]->dim)
+			dmax = all_tests[i]->dim;
+		if (nvmax < all_tests[i]->nvar)
+			nvmax = all_tests[i]->nvar;
+	}
+	tls.init(dmax, nvmax); // initialize library
+	THREAD_GROUP::max_parallel_threads = num_threads;
+
+	// launch the tests
+	int success = 0;
+	for (int i = 0; i < num_tests; i++) {
+		try {
+			if (all_tests[i]->test())
+				success++;
+		} catch (const TLException& e) {
+			printf("Caugth TL exception in <%s>:\n", all_tests[i]->getName());
+			e.print();
+		} catch (SylvException& e) {
+			printf("Caught Sylv exception in <%s>:\n", all_tests[i]->getName());
+			e.printMessage();
+		}
+	}
+
+	printf("There were %d tests that failed out of %d tests run.\n",
+		   num_tests - success, num_tests);
+
+	// destroy
+	for (int i = 0; i < num_tests; i++) {
+		delete all_tests[i];
+	}
+
+	return 0;
+}
diff --git a/dynare++/kord/Makefile b/dynare++/kord/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..2e43fd8c575d2c24910ecbde707f04da5348b2ac
--- /dev/null
+++ b/dynare++/kord/Makefile
@@ -0,0 +1,78 @@
+# $Id: Makefile 2344 2009-02-09 20:36:08Z michel $
+# Copyright 2004, Ondra Kamenik
+
+include ../Makefile.include
+
+CC_FLAGS := -I../tl/cc -I../sylv/cc -I../integ/cc $(CC_FLAGS)
+
+xbsylvcppsource := $(wildcard ../sylv/cc/*.cpp)
+sylvhsource := $(wildcard ../sylv/cc/*.h)
+sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource)) 
+tlcwebsource := $(wildcard ../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
+tlhwebsource := $(wildcard ../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
+integhwebsource := $(wildcard ../integ/cc/*.hweb)
+integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
+cwebsource := $(wildcard *.cweb)
+cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource)) 
+objects := $(patsubst %.cweb,%.o,$(cwebsource))
+hwebsource := $(wildcard *.hweb)
+hsource := $(patsubst %.hweb,%.h,$(hwebsource))
+
+../integ/cc/dummy.ch:
+	make -C ../integ/cc dummy.ch
+
+../tl/cc/dummy.ch:
+	make -C ../tl/cc dummy.ch
+
+../tl/cc/%.cpp: ../tl/cc/%.cweb ../tl/cc/dummy.ch
+	make -C ../tl/cc $*.cpp
+
+../tl/cc/%.h: ../tl/cc/%.hweb ../tl/cc/dummy.ch
+	make -C ../tl/cc $*.h
+
+../integ/cc/%.h: ../integ/cc/%.hweb ../integ/cc/dummy.ch
+	make -C ../integ/cc $*.h
+
+../tl/cc/%.o: ../tl/cc/%.cpp $(tlhsource)
+	make -C ../tl/cc $*.o
+
+../sylv/cc/%.o: ../sylv/cc/%.cpp $(sylvhsource)
+	make -C ../sylv/cc $*.o
+
+dummy.ch:
+	touch dummy.ch
+
+%.cpp: %.cweb dummy.ch
+	ctangle -bhp $*.cweb dummy.ch $*.cpp
+
+%.h: %.hweb dummy.ch
+	ctangle -bhp $*.hweb dummy.ch $*.h
+
+%.o: %.cpp $(hwebsource) $(hsource) $(tlhwebsource) $(tlhsource) \
+     $(integhwebsource) $(integhsource)
+	$(CC) $(CC_FLAGS) -c $*.cpp
+
+tests: $(hwebsource) $(cwebsoure) $(hsource) $(cppsource) \
+       $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
+       $(sylvhsource) $(sylvcppsource) \
+       tests.o $(objects) $(tlobjects) $(sylvobjects)
+	$(CC) $(CC_FLAGS) $(objects) $(tlobjects) $(sylvobjects) tests.o -o tests $(LD_LIBS) 
+
+kord.pdf: doc
+
+doc: main.web $(hwebsource) $(cwebsource)
+	cweave -bhp main.web
+	pdftex main
+	mv main.pdf kord.pdf 
+
+clear:
+	rm -f $(hsource)
+	rm -f $(cppsource)
+	rm -f *.o
+	rm -f tests
+	make -C ../tl/cc clear
+	make -C ../sylv/cc clear
+	rm -f main.{idx,dvi,pdf,scn,log,tex,toc}
diff --git a/dynare++/kord/approximation.cweb b/dynare++/kord/approximation.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..4fdd8a0033d5eb4b696979a5e83fde7f62215660
--- /dev/null
+++ b/dynare++/kord/approximation.cweb
@@ -0,0 +1,421 @@
+@q $Id: approximation.cweb 2344 2009-02-09 20:36:08Z michel $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ Start of {\tt approximation.cpp} file.
+
+@c
+#include "kord_exception.h"
+#include "approximation.h"
+#include "first_order.h"
+#include "korder_stoch.h"
+
+@<|ZAuxContainer| constructor code@>;
+@<|ZAuxContainer::getType| code@>;
+@<|Approximation| constructor code@>;
+@<|Approximation| destructor code@>;
+@<|Approximation::getFoldDecisionRule| code@>;
+@<|Approximation::getUnfoldDecisionRule| code@>;
+@<|Approximation::approxAtSteady| code@>;
+@<|Approximation::walkStochSteady| code@>;
+@<|Approximation::saveRuleDerivs| code@>;
+@<|Approximation::calcStochShift| code@>;
+@<|Approximation::check| code@>;
+@<|Approximation::calcYCov| code@>;
+
+@ 
+@<|ZAuxContainer| constructor code@>=
+ZAuxContainer::ZAuxContainer(const _Ctype* gss, int ngss, int ng, int ny, int nu)
+	: StackContainer<FGSTensor>(4,1)
+{
+	stack_sizes[0] = ngss; stack_sizes[1] = ng;
+	stack_sizes[2] = ny; stack_sizes[3] = nu;
+	conts[0] = gss;
+	calculateOffsets();
+}
+
+
+@ The |getType| method corresponds to
+$f(g^{**}(y^*,u',\sigma),0,0,0)$. For the first argument we return
+|matrix|, for other three we return |zero|.
+
+@<|ZAuxContainer::getType| code@>=
+ZAuxContainer::itype ZAuxContainer::getType(int i, const Symmetry& s) const
+{
+	if (i == 0)
+		if (s[2] > 0)
+			return zero;
+		else
+			return matrix;
+	return zero;
+}
+
+
+@ 
+@<|Approximation| constructor code@>=
+Approximation::Approximation(DynamicModel& m, Journal& j, int ns, bool dr_centr, double qz_crit)
+	: model(m), journal(j), rule_ders(NULL), rule_ders_ss(NULL), fdr(NULL), udr(NULL),
+	  ypart(model.nstat(), model.npred(), model.nboth(), model.nforw()),
+	  mom(UNormalMoments(model.order(), model.getVcov())), nvs(4), steps(ns),
+	  dr_centralize(dr_centr), qz_criterium(qz_crit), ss(ypart.ny(), steps+1)
+{
+	nvs[0] = ypart.nys(); nvs[1] = model.nexog();
+	nvs[2] = model.nexog(); nvs[3] = 1;
+
+	ss.nans();
+}
+
+@ 
+@<|Approximation| destructor code@>=
+Approximation::~Approximation()
+{
+	if (rule_ders_ss) delete rule_ders_ss;
+	if (rule_ders) delete rule_ders;
+	if (fdr) delete fdr;
+	if (udr) delete udr;
+}
+
+@ This just returns |fdr| with a check that it is created.
+@<|Approximation::getFoldDecisionRule| code@>=
+const FoldDecisionRule& Approximation::getFoldDecisionRule() const
+{
+	KORD_RAISE_IF(fdr == NULL,
+				  "Folded decision rule has not been created in Approximation::getFoldDecisionRule");
+	return *fdr;
+}
+
+
+@ This just returns |udr| with a check that it is created.
+@<|Approximation::getUnfoldDecisionRule| code@>=
+const UnfoldDecisionRule& Approximation::getUnfoldDecisionRule() const
+{
+	KORD_RAISE_IF(udr == NULL,
+				  "Unfolded decision rule has not been created in Approximation::getUnfoldDecisionRule");
+	return *udr;
+}
+
+
+@ This methods assumes that the deterministic steady state is
+|model.getSteady()|. It makes an approximation about it and stores the
+derivatives to |rule_ders| and |rule_ders_ss|. Also it runs a |check|
+for $\sigma=0$.
+
+@<|Approximation::approxAtSteady| code@>=
+void Approximation::approxAtSteady()
+{
+	model.calcDerivativesAtSteady();
+	FirstOrder fo(model.nstat(), model.npred(), model.nboth(), model.nforw(),
+				  model.nexog(), *(model.getModelDerivatives().get(Symmetry(1))),
+				  journal, qz_criterium);
+	KORD_RAISE_IF_X(! fo.isStable(),
+					"The model is not Blanchard-Kahn stable",
+					KORD_MD_NOT_STABLE);
+
+	if (model.order() >= 2) {
+		KOrder korder(model.nstat(), model.npred(), model.nboth(), model.nforw(),
+					  model.getModelDerivatives(), fo.getGy(), fo.getGu(),
+					  model.getVcov(), journal);
+		korder.switchToFolded();
+		for (int k = 2; k <= model.order(); k++)
+			korder.performStep<KOrder::fold>(k);
+		
+		saveRuleDerivs(korder.getFoldDers());
+	} else {
+		FirstOrderDerivs<KOrder::fold> fo_ders(fo);
+		saveRuleDerivs(fo_ders);
+	}
+	check(0.0);
+}
+
+@ This is the core routine of |Approximation| class.
+
+First we solve for the approximation about the deterministic steady
+state. Then we perform |steps| cycles toward the stochastic steady
+state. Each cycle moves the size of shocks by |dsigma=1.0/steps|. At
+the end of a cycle, we have |rule_ders| being the derivatives at
+stochastic steady state for $\sigma=sigma\_so\_far+dsigma$ and
+|model.getSteady()| being the steady state.
+
+If the number of |steps| is zero, the decision rule |dr| at the bottom
+is created from derivatives about deterministic steady state, with
+size of $\sigma=1$. Otherwise, the |dr| is created from the
+approximation about stochastic steady state with $\sigma=0$.
+
+Within each cycle, we first make a backup of the last steady (from
+initialization or from a previous cycle), then we calculate the fix
+point of the last rule with $\sigma=dsigma$. This becomes a new steady
+state at the $\sigma=sigma\_so\_far+dsigma$. We calculate expectations
+of $g^{**}(y,\sigma\eta_{t+1},\sigma$ expressed as a Taylor expansion
+around the new $\sigma$ and the new steady state. Then we solve for
+the decision rule with explicit $g^{**}$ at $t+1$ and save the rule.
+
+After we reached $\sigma=1$, the decision rule is formed.
+
+The biproduct of this method is the matrix |ss|, whose columns are
+steady states for subsequent $\sigma$s. The first column is the
+deterministic steady state, the last column is the stochastic steady
+state for a full size of shocks ($\sigma=1$). There are |steps+1|
+columns.
+
+@<|Approximation::walkStochSteady| code@>=
+void Approximation::walkStochSteady()
+{
+	@<initial approximation at deterministic steady@>;
+	double sigma_so_far = 0.0;
+	double dsigma = (steps == 0)? 0.0 : 1.0/steps;
+	for (int i = 1; i <= steps; i++) {
+		JournalRecordPair pa(journal);
+		pa << "Approximation about stochastic steady for sigma=" << sigma_so_far+dsigma << endrec;
+
+		Vector last_steady((const Vector&)model.getSteady());
+
+		@<calculate fix-point of the last rule for |dsigma|@>;
+		@<calculate |hh| as expectations of the last $g^{**}$@>;
+		@<form |KOrderStoch|, solve and save@>;
+
+		check(sigma_so_far+dsigma);
+		sigma_so_far += dsigma;
+	}
+
+	@<construct the resulting decision rules@>;
+}
+
+@ Here we solve for the deterministic steady state, calculate
+approximation at the deterministic steady and save the steady state
+to |ss|.
+
+@<initial approximation at deterministic steady@>=
+	model.solveDeterministicSteady();
+	approxAtSteady();
+	Vector steady0(ss, 0);
+	steady0 = (const Vector&)model.getSteady();
+
+@ We form the |DRFixPoint| object from the last rule with
+$\sigma=dsigma$. Then we save the steady state to |ss|. The new steady
+is also put to |model.getSteady()|.
+
+@<calculate fix-point of the last rule for |dsigma|@>=
+	DRFixPoint<KOrder::fold> fp(*rule_ders, ypart, model.getSteady(), dsigma);
+	bool converged = fp.calcFixPoint(DecisionRule::horner, model.getSteady());
+	JournalRecord rec(journal);
+	rec << "Fix point calcs: iter=" << fp.getNumIter() << ", newton_iter="
+		<< fp.getNewtonTotalIter() << ", last_newton_iter=" << fp.getNewtonLastIter() << ".";
+	if (converged)
+		rec << " Converged." << endrec;
+	else {
+		rec << " Not converged!!" << endrec;
+		KORD_RAISE_X("Fix point calculation not converged", KORD_FP_NOT_CONV);
+	}
+	Vector steadyi(ss, i);
+	steadyi = (const Vector&)model.getSteady();
+
+@ We form the steady state shift |dy|, which is the new steady state
+minus the old steady state. Then we create |StochForwardDerivs|
+object, which calculates the derivatives of $g^{**}$ expectations at
+new sigma and new steady.
+
+@<calculate |hh| as expectations of the last $g^{**}$@>=
+	Vector dy((const Vector&)model.getSteady());
+	dy.add(-1.0, last_steady);
+
+	StochForwardDerivs<KOrder::fold> hh(ypart, model.nexog(), *rule_ders_ss, mom, dy,
+										dsigma, sigma_so_far);
+	JournalRecord rec1(journal);
+	rec1 << "Calculation of g** expectations done" << endrec;
+
+
+@ We calculate derivatives of the model at the new steady, form
+|KOrderStoch| object and solve, and save the rule.
+
+@<form |KOrderStoch|, solve and save@>=
+	model.calcDerivativesAtSteady();
+	KOrderStoch korder_stoch(ypart, model.nexog(), model.getModelDerivatives(),
+							 hh, journal);
+	for (int d = 1; d <= model.order(); d++) {
+		korder_stoch.performStep<KOrder::fold>(d);
+	}
+	saveRuleDerivs(korder_stoch.getFoldDers());
+
+
+@ 
+@<construct the resulting decision rules@>=
+	if (fdr) {
+		delete fdr;
+		fdr = NULL;
+	}
+	if (udr) {
+		delete udr;
+		udr = NULL;
+	}
+
+	fdr = new FoldDecisionRule(*rule_ders, ypart, model.nexog(),
+							   model.getSteady(), 1.0-sigma_so_far);
+	if (steps == 0 && dr_centralize) {
+		@<centralize decision rule for zero steps@>;
+	}
+
+
+@ 
+@<centralize decision rule for zero steps@>=
+	DRFixPoint<KOrder::fold> fp(*rule_ders, ypart, model.getSteady(), 1.0);
+	bool converged = fp.calcFixPoint(DecisionRule::horner, model.getSteady());
+	JournalRecord rec(journal);
+	rec << "Fix point calcs: iter=" << fp.getNumIter() << ", newton_iter="
+		<< fp.getNewtonTotalIter() << ", last_newton_iter=" << fp.getNewtonLastIter() << ".";
+	if (converged)
+		rec << " Converged." << endrec;
+	else {
+		rec << " Not converged!!" << endrec;
+		KORD_RAISE_X("Fix point calculation not converged", KORD_FP_NOT_CONV);
+	}
+
+	{
+		JournalRecordPair recp(journal);
+		recp << "Centralizing about fix-point." << endrec;
+		FoldDecisionRule* dr_backup = fdr;
+		fdr = new FoldDecisionRule(*dr_backup, model.getSteady());
+		delete dr_backup;
+	}
+
+
+@ Here we simply make a new hardcopy of the given rule |rule_ders|,
+and make a new container of in-place subtensors of the derivatives
+corresponding to forward looking variables. The given container comes
+from a temporary object and will be destroyed.
+ 
+@<|Approximation::saveRuleDerivs| code@>=
+void Approximation::saveRuleDerivs(const FGSContainer& g)
+{
+	if (rule_ders) {
+		delete rule_ders;
+		delete rule_ders_ss;
+	}
+	rule_ders = new FGSContainer(g);
+	rule_ders_ss = new FGSContainer(4);
+	for (FGSContainer::iterator run = (*rule_ders).begin(); run != (*rule_ders).end(); ++run) {
+		FGSTensor* ten = new FGSTensor(ypart.nstat+ypart.npred, ypart.nyss(), *((*run).second));
+		rule_ders_ss->insert(ten);
+	}
+}
+
+@ This method calculates a shift of the system equations due to
+integrating shocks at a given $\sigma$ and current steady state. More precisely, if
+$$F(y,u,u',\sigma)=f(g^{**}(g^*(y,u,\sigma),u',\sigma),g(y,u,\sigma),y,u)$$
+then the method returns a vector
+$$\sum_{d=1}{1\over d!}\sigma^d\left[F_{u'^d}\right]_{\alpha_1\ldots\alpha_d}
+\Sigma^{\alpha_1\ldots\alpha_d}$$
+
+For a calculation of $\left[F_{u'^d}\right]$ we use |@<|ZAuxContainer|
+class declaration@>|, so we create its object. In each cycle we
+calculate $\left[F_{u'^d}\right]$@q'@>, and then multiply with the shocks,
+and add the ${\sigma^d\over d!}$ multiple to the result.
+
+@<|Approximation::calcStochShift| code@>=
+void Approximation::calcStochShift(Vector& out, double at_sigma) const
+{
+	KORD_RAISE_IF(out.length() != ypart.ny(),
+				  "Wrong length of output vector for Approximation::calcStochShift");
+	out.zeros();
+
+	ZAuxContainer zaux(rule_ders_ss, ypart.nyss(), ypart.ny(),
+					   ypart.nys(), model.nexog());
+
+	int dfac = 1;
+	for (int d = 1; d <= rule_ders->getMaxDim(); d++, dfac*=d) {
+		if ( KOrder::is_even(d)) {
+			Symmetry sym(0,d,0,0);
+			@<calculate $F_{u'^d}$ via |ZAuxContainer|@>;@q'@>
+			@<multiply with shocks and add to result@>;
+		}
+	}
+}
+
+@ 
+@<calculate $F_{u'^d}$ via |ZAuxContainer|@>=
+	FGSTensor* ten = new FGSTensor(ypart.ny(), TensorDimens(sym, nvs));
+	ten->zeros();
+	for (int l = 1; l <= d; l++) {
+		const FSSparseTensor* f = model.getModelDerivatives().get(Symmetry(l));
+		zaux.multAndAdd(*f, *ten);
+	}
+
+@
+@<multiply with shocks and add to result@>=
+	FGSTensor* tmp = new FGSTensor(ypart.ny(), TensorDimens(Symmetry(0,0,0,0), nvs));
+	tmp->zeros();
+	ten->contractAndAdd(1, *tmp, *(mom.get(Symmetry(d))));
+
+	out.add(pow(at_sigma,d)/dfac, tmp->getData());
+	delete ten;
+	delete tmp;
+
+
+@ This method calculates and reports
+$$f(\bar y)+\sum_{d=1}{1\over d!}\sigma^d\left[F_{u'^d}\right]_{\alpha_1\ldots\alpha_d}
+\Sigma^{\alpha_1\ldots\alpha_d}$$
+at $\bar y$, zero shocks and $\sigma$. This number should be zero.
+
+We evaluate the error both at a given $\sigma$ and $\sigma=1.0$.
+
+@<|Approximation::check| code@>=
+void Approximation::check(double at_sigma) const
+{
+	Vector stoch_shift(ypart.ny());
+	Vector system_resid(ypart.ny());
+	Vector xx(model.nexog());
+	xx.zeros();
+	model.evaluateSystem(system_resid, model.getSteady(), xx);
+	calcStochShift(stoch_shift, at_sigma);
+	stoch_shift.add(1.0, system_resid);
+	JournalRecord rec1(journal);
+	rec1 << "Error of current approximation for shocks at sigma " << at_sigma
+		 << " is " << stoch_shift.getMax() << endrec;
+	calcStochShift(stoch_shift, 1.0);
+	stoch_shift.add(1.0, system_resid);
+	JournalRecord rec2(journal);
+	rec2 << "Error of current approximation for full shocks is " << stoch_shift.getMax() << endrec;
+}
+
+@ The method returns unconditional variance of endogenous variables
+based on the first order. The first order approximation looks like
+$$\hat y_t=g_{y^*}\hat y^*_{t-1}+g_uu_t$$
+where $\hat y$ denotes a deviation from the steady state. It can be written as
+$$\hat y_t=\left[0\, g_{y^*}\, 0\right]\hat y_{t-1}+g_uu_t$$
+which yields unconditional covariance $V$ for which
+$$V=GVG^T + g_u\Sigma g_u^T,$$
+where $G=[0\, g_{y^*}\, 0]$ and $\Sigma$ is the covariance of the shocks. 
+
+For solving this Lyapunov equation we use the Sylvester module, which
+solves equation of the type
+$$AX+BX(C\otimes\cdots\otimes C)=D$$
+So we invoke the Sylvester solver for the first dimension with $A=I$,
+$B=-G$, $C=G^T$ and $D=g_u\Sigma g_u^T$.
+
+
+@<|Approximation::calcYCov| code@>=
+TwoDMatrix* Approximation::calcYCov() const
+{
+	const TwoDMatrix& gy = *(rule_ders->get(Symmetry(1,0,0,0)));
+	const TwoDMatrix& gu = *(rule_ders->get(Symmetry(0,1,0,0)));
+	TwoDMatrix G(model.numeq(), model.numeq());
+	G.zeros();
+	G.place(gy, 0, model.nstat());
+	TwoDMatrix B((const TwoDMatrix&)G);
+	B.mult(-1.0);
+	TwoDMatrix C(G, "transpose");
+	TwoDMatrix A(model.numeq(), model.numeq());
+	A.zeros();
+	for (int i = 0; i < model.numeq(); i++)
+		A.get( i,i)	= 1.0;
+
+	TwoDMatrix guSigma(gu, model.getVcov());
+	TwoDMatrix guTrans(gu, "transpose");
+	TwoDMatrix* X = new TwoDMatrix(guSigma, guTrans);
+	
+	GeneralSylvester gs(1, model.numeq(), model.numeq(), 0,
+						A.base(), B.base(), C.base(), X->base());
+	gs.solve();
+
+	return X;
+}
+
+@ End of {\tt approximation.cpp} file.
diff --git a/dynare++/kord/approximation.hweb b/dynare++/kord/approximation.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..8412b53ffbd3c829d9bf111a08657fff7f831131
--- /dev/null
+++ b/dynare++/kord/approximation.hweb
@@ -0,0 +1,157 @@
+@q $Id: approximation.hweb 2352 2009-09-03 19:18:15Z michel $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Approximating model solution. Start of {\tt approximation.h} file.
+
+The class |Approximation| in this file is a main interface to the
+algorithms calculating approximations to the decision rule about
+deterministic and stochastic steady states.
+
+The approximation about a deterministic steady state is solved by
+classes |@<|FirstOrder| class declaration@>| and |@<|KOrder| class
+declaration@>|. The approximation about the stochastic steady state is
+solved by class |@<|KOrderStoch| class declaration@>| together with a
+method of |Approximation| class |@<|Approximation::walkStochSteady|
+code@>|.
+
+The approximation about the stochastic steady state is done with
+explicit expression of forward derivatives of $g^{**}$. More formally,
+we have to solve the decision rule $g$ from the implicit system:
+$$E_t(f(g^{**}(g^*(y^*,u_t,\sigma),u_{t+1},\sigma),g(y^*,u_t,\sigma),y_t,u_t))=0$$
+The term within the expectations can be Taylor expanded, and the
+expectation can be driven into the formula. However, when doing this
+at $\sigma\not=0$, the term $g^{**}$ at $\sigma\not=0$ is dependent on
+$u_{t+1}$ and thus the integral of its approximation includes all
+derivatives wrt. $u$ of $g^{**}$. Note that for $\sigma=0$, the
+derivatives of $g^{**}$ in this context are constant. This is the main
+difference between the approximation at deterministic steady
+($\sigma=0$), and stochastic steady ($\sigma\not=0$). This means that
+$k$-order derivative of the above equation at $\sigma\not=0$ depends of
+all derivatives of $g^**$ (including those with order greater than
+$k$).
+
+The explicit expression of the forward $g^{**}$ means that the
+derivatives of $g$ are not solved simultaneously, but that the forward
+derivatives of $g^{**}$ are calculated as an extrapolation based on
+the approximation at lower $\sigma$. This is exactly what does the
+|@<|Approximation::walkStochSteady| code@>|. It starts at the
+deterministic steady state, and in a few steps it adds to $\sigma$
+explicitly expressing forward $g^{**}$ from a previous step.
+
+Further details on the both solution methods are given in (todo: put
+references here when they exist).
+
+Very important note: all classes here used for calculation of decision
+rule approximation are folded. For the time being, it seems that faa
+Di Bruno formula is quicker for folded tensors, and that is why we
+stick to folded tensors here. However, when the calcs are done, we
+calculate also its unfolded versions, to be available for simulations
+and so on.
+
+@s ZAuxContainer int
+@s Approximation int
+@c
+#ifndef APPROXIMATION_H
+#define APPROXIMATION_H
+
+#include "dynamic_model.h"
+#include "decision_rule.h"
+#include "korder.h"
+#include "journal.h"
+
+@<|ZAuxContainer| class declaration@>;
+@<|Approximation| class declaration@>;
+
+#endif
+
+
+@ This class is used to calculate derivatives by faa Di Bruno of the
+$$f(g^{**}(g^*(y^*,u,\sigma),u',\sigma),g(y^*,u,\sigma),y^*,u)$$ with
+respect $u'$. In order to keep it as simple as possible, the class
+represents an equivalent (with respect to $u'$) container for
+$f(g^{**}(y^*,u',\sigma),0,0,0)$. The class is used only for
+evaluation of approximation error in |Approximation| class, which is
+calculated in |Approximation::calcStochShift| method.
+
+Since it is a folded version, we inherit from
+|StackContainer<FGSTensor>| and |FoldedStackContainer|. To construct
+it, we need only the $g^{**}$ container and size of stacks.
+
+@<|ZAuxContainer| class declaration@>=
+class ZAuxContainer : public StackContainer<FGSTensor>, public FoldedStackContainer {
+public:@;
+	typedef StackContainer<FGSTensor>::_Ctype _Ctype;
+	typedef StackContainer<FGSTensor>::itype itype;
+	ZAuxContainer(const _Ctype* gss, int ngss, int ng, int ny, int nu);
+	itype getType(int i, const Symmetry& s) const;
+};
+
+
+
+@ This class provides an interface to approximation algorithms. The
+core method is |walkStochSteady| which calculates the approximation
+about stochastic steady state in a given number of steps. The number
+is given as a parameter |ns| of the constructor. If the number is
+equal to zero, the resulted approximation is about the deterministic
+steady state.
+
+An object is constructed from the |DynamicModel|, and the number of
+steps |ns|. Also, we pass a reference to journal. That's all. The
+result of the core method |walkStochSteady| is a decision rule |dr|
+and a matrix |ss| whose columns are steady states for increasing
+$\sigma$ during the walk. Both can be retrived by public methods. The
+first column of the matrix is the deterministic steady state, the last
+is the stochastic steady state for the full size shocks.
+
+The method |walkStochSteady| calls the following methods:
+|approxAtSteady| calculates an initial approximation about the
+deterministic steady, |saveRuleDerivs| saves derivatives of a rule for
+the following step in |rule_ders| and |rule_ders_ss| (see
+|@<|Approximation::saveRuleDerivs| code@>| for their description),
+|check| reports an error of the current approximation and
+|calcStochShift| (called from |check|) calculates a shift of the
+system equations due to uncertainity.
+
+dr_centralize is a new option. dynare++ was automatically expressing 
+results around the fixed point instead of the deterministic steady 
+state. dr_centralize controls this behavior. 
+
+
+@<|Approximation| class declaration@>=
+class Approximation {
+	DynamicModel& model;
+	Journal& journal;
+	FGSContainer* rule_ders;
+	FGSContainer* rule_ders_ss;
+	FoldDecisionRule* fdr;
+	UnfoldDecisionRule* udr;
+	const PartitionY ypart;
+	const FNormalMoments mom;
+	IntSequence nvs;
+	int steps;
+	bool dr_centralize;
+	double qz_criterium;
+	TwoDMatrix ss;
+public:@;
+	Approximation(DynamicModel& m, Journal& j, int ns, bool dr_centr, double qz_crit);
+	virtual ~Approximation();
+
+	const FoldDecisionRule& getFoldDecisionRule() const;
+	const UnfoldDecisionRule& getUnfoldDecisionRule() const;
+	const TwoDMatrix& getSS() const
+		{@+ return ss;@+}
+	const DynamicModel& getModel() const
+		{@+ return model;@+}
+
+	void walkStochSteady();
+	TwoDMatrix* calcYCov() const;
+protected:@;
+	void approxAtSteady();
+	void calcStochShift(Vector& out, double at_sigma) const;
+	void saveRuleDerivs(const FGSContainer& g);
+	void check(double at_sigma) const;
+};
+
+
+@ End of {\tt approximation.h} file.
+
diff --git a/dynare++/kord/decision_rule.cweb b/dynare++/kord/decision_rule.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..c38f5c2843396306a72c2c032c3b43e591af345b
--- /dev/null
+++ b/dynare++/kord/decision_rule.cweb
@@ -0,0 +1,690 @@
+@q $Id: decision_rule.cweb 1896 2008-06-24 04:01:05Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt decision\_rule.cpp} file.
+@c
+
+#include "kord_exception.h"
+#include "decision_rule.h"
+#include "dynamic_model.h"
+
+#include "SymSchurDecomp.h"
+#include "cpplapack.h"
+
+#include <limits>
+
+template <>
+int DRFixPoint<KOrder::fold>::max_iter = 10000;
+template <>
+int DRFixPoint<KOrder::unfold>::max_iter = 10000;
+template <>
+double DRFixPoint<KOrder::fold>::tol = 1.e-10;
+template <>
+double DRFixPoint<KOrder::unfold>::tol = 1.e-10;
+template <>
+int DRFixPoint<KOrder::fold>::max_newton_iter = 50;
+template <>
+int DRFixPoint<KOrder::unfold>::max_newton_iter = 50;
+template <>
+int DRFixPoint<KOrder::fold>::newton_pause = 100;
+template <>
+int DRFixPoint<KOrder::unfold>::newton_pause = 100;
+@#
+@<|FoldDecisionRule| conversion from |UnfoldDecisionRule|@>;
+@<|UnfoldDecisionRule| conversion from |FoldDecisionRule|@>;
+@<|SimResults| destructor@>;
+@<|SimResults::simulate| code1@>;
+@<|SimResults::simulate| code2@>;
+@<|SimResults::addDataSet| code@>;
+@<|SimResults::writeMat4| code1@>;
+@<|SimResults::writeMat4| code2@>;
+@<|SimResultsStats::simulate| code@>;
+@<|SimResultsStats::writeMat4| code@>;
+@<|SimResultsStats::calcMean| code@>;
+@<|SimResultsStats::calcVcov| code@>;
+@<|SimResultsDynamicStats::simulate| code@>;
+@<|SimResultsDynamicStats::writeMat4| code@>;
+@<|SimResultsDynamicStats::calcMean| code@>;
+@<|SimResultsDynamicStats::calcVariance| code@>;
+@<|SimResultsIRF::simulate| code1@>;
+@<|SimResultsIRF::simulate| code2@>;
+@<|SimResultsIRF::calcMeans| code@>;
+@<|SimResultsIRF::calcVariances| code@>;
+@<|SimResultsIRF::writeMat4| code@>;
+@<|RTSimResultsStats::simulate| code1@>;
+@<|RTSimResultsStats::simulate| code2@>;
+@<|RTSimResultsStats::writeMat4| code@>;
+@<|IRFResults| constructor@>;
+@<|IRFResults| destructor@>;
+@<|IRFResults::writeMat4| code@>;
+@<|SimulationWorker::operator()()| code@>;
+@<|SimulationIRFWorker::operator()()| code@>;
+@<|RTSimulationWorker::operator()()| code@>;
+@<|RandomShockRealization::choleskyFactor| code@>;
+@<|RandomShockRealization::schurFactor| code@>;
+@<|RandomShockRealization::get| code@>;
+@<|ExplicitShockRealization| constructor code@>;
+@<|ExplicitShockRealization::get| code@>;
+@<|ExplicitShockRealization::addToShock| code@>;
+@<|GenShockRealization::get| code@>;
+
+@ 
+@<|FoldDecisionRule| conversion from |UnfoldDecisionRule|@>=
+FoldDecisionRule::FoldDecisionRule(const UnfoldDecisionRule& udr)
+	: DecisionRuleImpl<KOrder::fold>(ctraits<KOrder::fold>::Tpol(udr.nrows(), udr.nvars()),
+									 udr.ypart, udr.nu, udr.ysteady)
+{
+	for (ctraits<KOrder::unfold>::Tpol::const_iterator it = udr.begin();
+		 it != udr.end(); ++it) {
+		insert(new ctraits<KOrder::fold>::Ttensym(*((*it).second)));
+	}
+}
+
+@ 
+@<|UnfoldDecisionRule| conversion from |FoldDecisionRule|@>=
+UnfoldDecisionRule::UnfoldDecisionRule(const FoldDecisionRule& fdr)
+	: DecisionRuleImpl<KOrder::unfold>(ctraits<KOrder::unfold>::Tpol(fdr.nrows(), fdr.nvars()),
+									 fdr.ypart, fdr.nu, fdr.ysteady)
+{
+	for (ctraits<KOrder::fold>::Tpol::const_iterator it = fdr.begin();
+		 it != fdr.end(); ++it) {
+		insert(new ctraits<KOrder::unfold>::Ttensym(*((*it).second)));
+	}
+}
+
+@ 
+@<|SimResults| destructor@>=
+SimResults::~SimResults()
+{
+	for (int i = 0; i < getNumSets(); i++) {
+		delete data[i];
+		delete shocks[i];
+	}
+}
+
+@ This runs simulations with an output to journal file. Note that we
+report how many simulations had to be thrown out due to Nan or Inf.
+
+@<|SimResults::simulate| code1@>=
+void SimResults::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+						  const TwoDMatrix& vcov, Journal& journal)
+{
+	JournalRecordPair paa(journal);
+	paa << "Performing " << num_sim << " stochastic simulations for "
+		<< num_per << " periods" << endrec;
+	simulate(num_sim, dr, start, vcov);
+	int thrown = num_sim - data.size();
+	if (thrown > 0) {
+		JournalRecord rec(journal);
+		rec << "I had to throw " << thrown << " simulations away due to Nan or Inf" << endrec;
+	}
+}
+
+@ This runs a given number of simulations by creating
+|SimulationWorker| for each simulation and inserting them to the
+thread group.
+
+@<|SimResults::simulate| code2@>=
+void SimResults::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+						  const TwoDMatrix& vcov)
+{
+	std::vector<RandomShockRealization> rsrs;
+	rsrs.reserve(num_sim);
+
+	THREAD_GROUP gr;
+	for (int i = 0; i < num_sim; i++) {
+		RandomShockRealization sr(vcov, system_random_generator.int_uniform());
+		rsrs.push_back(sr);
+		THREAD* worker = new
+			SimulationWorker(*this, dr, DecisionRule::horner,
+							 num_per, start, rsrs.back());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ This adds the data with the realized shocks. If the data is not
+finite, the both data and shocks are thrown away.
+
+@<|SimResults::addDataSet| code@>=
+bool SimResults::addDataSet(TwoDMatrix* d, ExplicitShockRealization* sr)
+{
+	KORD_RAISE_IF(d->nrows() != num_y,
+				  "Incompatible number of rows for SimResults::addDataSets");
+	KORD_RAISE_IF(d->ncols() != num_per,
+				  "Incompatible number of cols for SimResults::addDataSets");
+	if (d->isFinite()) {
+		data.push_back(d);
+		shocks.push_back(sr);
+		return true;
+	} else {
+		delete d;
+		delete sr;
+		return false;
+	}
+}
+
+@ 
+@<|SimResults::writeMat4| code1@>=
+void SimResults::writeMat4(const char* base, const char* lname) const
+{
+	char matfile_name[100];
+	sprintf(matfile_name, "%s.mat", base);
+	FILE* out;
+	if (NULL != (out=fopen(matfile_name, "wb"))) {
+		writeMat4(out, lname);
+		fclose(out);
+	}
+}
+
+@ This save the results as matrices with given prefix and with index
+appended. If there is only one matrix, the index is not appended.
+
+@<|SimResults::writeMat4| code2@>=
+void SimResults::writeMat4(FILE* fd, const char* lname) const
+{
+	char tmp[100];
+	for (int i = 0; i < getNumSets(); i++) {
+		if (getNumSets() > 1)
+			sprintf(tmp, "%s_data%d", lname, i+1);
+		else
+			sprintf(tmp, "%s_data", lname);
+		ConstTwoDMatrix m(*(data[i]));
+		m.writeMat4(fd, tmp);
+	}
+}
+
+@ 
+@<|SimResultsStats::simulate| code@>=
+void SimResultsStats::simulate(int num_sim, const DecisionRule& dr,
+							   const Vector& start,
+							   const TwoDMatrix& vcov, Journal& journal)
+{
+	SimResults::simulate(num_sim, dr, start, vcov, journal);
+	{
+		JournalRecordPair paa(journal);
+		paa << "Calculating means from the simulations." << endrec;
+		calcMean();
+	}
+	{
+		JournalRecordPair paa(journal);
+		paa << "Calculating covariances from the simulations." << endrec;
+		calcVcov();
+	}
+}
+
+
+@ Here we do not save the data itself, we save only mean and vcov.
+@<|SimResultsStats::writeMat4| code@>=
+void SimResultsStats::writeMat4(FILE* fd, const char* lname) const
+{
+	char tmp[100];
+	sprintf(tmp, "%s_mean", lname);
+	ConstTwoDMatrix m(num_y, 1, mean.base());
+	m.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_vcov", lname);
+	ConstTwoDMatrix(vcov).writeMat4(fd, tmp);
+}
+
+@ 
+@<|SimResultsStats::calcMean| code@>=
+void SimResultsStats::calcMean()
+{
+	mean.zeros();
+	if (data.size()*num_per > 0) {
+		double mult = 1.0/data.size()/num_per;
+		for (unsigned int i = 0; i < data.size(); i++) {
+			for (int j = 0; j < num_per; j++) {
+				ConstVector col(*data[i], j);
+				mean.add(mult, col);
+			}
+		}
+	}
+}
+
+@ 
+@<|SimResultsStats::calcVcov| code@>=
+void SimResultsStats::calcVcov()
+{
+	if (data.size()*num_per > 1) {
+		vcov.zeros();
+		double mult = 1.0/(data.size()*num_per - 1);
+		for (unsigned int i = 0; i < data.size(); i++) {
+			const TwoDMatrix& d = *(data[i]);
+			for (int j = 0; j < num_per; j++) {
+				for (int m = 0; m < num_y; m++) {
+					for (int n = m; n < num_y; n++) {
+						double s = (d.get(m,j)-mean[m])*(d.get(n,j)-mean[n]);
+						vcov.get(m,n) += mult*s;
+						if (m != n)
+							vcov.get(n,m) += mult*s;
+					}
+				}
+			}
+		}
+	} else {
+		vcov.infs();
+	}
+}
+
+@ 
+@<|SimResultsDynamicStats::simulate| code@>=
+void SimResultsDynamicStats::simulate(int num_sim, const DecisionRule& dr,
+									  const Vector& start,
+									  const TwoDMatrix& vcov, Journal& journal)
+{
+	SimResults::simulate(num_sim, dr, start, vcov, journal);
+	{
+		JournalRecordPair paa(journal);
+		paa << "Calculating means of the conditional simulations." << endrec;
+		calcMean();
+	}
+	{
+		JournalRecordPair paa(journal);
+		paa << "Calculating variances of the conditional simulations." << endrec;
+		calcVariance();
+	}
+}
+
+@ 
+@<|SimResultsDynamicStats::writeMat4| code@>=
+void SimResultsDynamicStats::writeMat4(FILE* fd, const char* lname) const
+{
+	char tmp[100];
+	sprintf(tmp, "%s_cond_mean", lname);
+	ConstTwoDMatrix(mean).writeMat4(fd, tmp);
+	sprintf(tmp, "%s_cond_variance", lname);
+	ConstTwoDMatrix(variance).writeMat4(fd, tmp);
+}
+
+@ 
+@<|SimResultsDynamicStats::calcMean| code@>=
+void SimResultsDynamicStats::calcMean()
+{
+	mean.zeros();
+	if (data.size() > 0) {
+		double mult = 1.0/data.size();
+		for (int j = 0; j < num_per; j++) {
+			Vector meanj(mean, j);
+			for (unsigned int i = 0; i < data.size(); i++) {
+				ConstVector col(*data[i], j);
+				meanj.add(mult, col);
+			}
+		}
+	}
+}
+
+@ 
+@<|SimResultsDynamicStats::calcVariance| code@>=
+void SimResultsDynamicStats::calcVariance()
+{
+	if (data.size() > 1) {
+		variance.zeros();
+		double mult = 1.0/(data.size()-1);
+		for (int j = 0; j < num_per; j++) {
+			ConstVector meanj(mean, j);
+			Vector varj(variance, j);
+			for (int i = 0; i < (int)data.size(); i++) {
+				Vector col(ConstVector((*data[i]), j));
+				col.add(-1.0, meanj);
+				for (int k = 0; k < col.length(); k++)
+					col[k] = col[k]*col[k];
+				varj.add(mult, col);
+			}
+		}
+	} else {
+		variance.infs();
+	}
+}
+
+
+@ 
+@<|SimResultsIRF::simulate| code1@>=
+void SimResultsIRF::simulate(const DecisionRule& dr, const Vector& start,
+							 Journal& journal)
+{
+	JournalRecordPair paa(journal);
+	paa << "Performing " << control.getNumSets() << " IRF simulations for "
+		<< num_per << " periods; shock=" << ishock << ", impulse=" << imp << endrec;
+	simulate(dr, start);
+	int thrown = control.getNumSets() - data.size();
+	if (thrown > 0) {
+		JournalRecord rec(journal);
+		rec << "I had to throw " << thrown
+			<< " simulations away due to Nan or Inf" << endrec;
+	}	
+	calcMeans();
+	calcVariances();
+}
+
+@ 
+@<|SimResultsIRF::simulate| code2@>=
+void SimResultsIRF::simulate(const DecisionRule& dr, const Vector& start)
+{
+	THREAD_GROUP gr;
+	for (int idata = 0; idata < control.getNumSets(); idata++) {
+		THREAD* worker = new
+			SimulationIRFWorker(*this, dr, DecisionRule::horner,
+								num_per, start, idata, ishock, imp);
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ 
+@<|SimResultsIRF::calcMeans| code@>=
+void SimResultsIRF::calcMeans()
+{
+	means.zeros();
+	if (data.size() > 0) {
+		for (unsigned int i = 0; i < data.size(); i++)
+			means.add(1.0, *(data[i]));
+		means.mult(1.0/data.size());
+	}
+}
+
+@ 
+@<|SimResultsIRF::calcVariances| code@>=
+void SimResultsIRF::calcVariances()
+{
+	if (data.size() > 1) {
+		variances.zeros();
+		for (unsigned int i = 0; i < data.size(); i++) {
+			TwoDMatrix d((const TwoDMatrix&)(*(data[i])));
+			d.add(-1.0, means);
+			for (int j = 0; j < d.nrows(); j++)
+				for (int k = 0;	k < d.ncols(); k++)
+					variances.get(j,k) += d.get(j,k)*d.get(j,k);
+			d.mult(1.0/(data.size()-1));
+		}
+	} else {
+		variances.infs();
+	}
+}
+
+@ 
+@<|SimResultsIRF::writeMat4| code@>=
+void SimResultsIRF::writeMat4(FILE* fd, const char* lname) const
+{
+	char tmp[100];
+	sprintf(tmp, "%s_mean", lname);
+	means.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_var", lname);
+	variances.writeMat4(fd, tmp);
+}
+
+@ 
+@<|RTSimResultsStats::simulate| code1@>=
+void RTSimResultsStats::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+								 const TwoDMatrix& v, Journal& journal)
+{
+	JournalRecordPair paa(journal);
+	paa << "Performing " << num_sim << " real-time stochastic simulations for "
+		<< num_per << " periods" << endrec;
+	simulate(num_sim, dr, start, v);
+	mean = nc.getMean();
+	mean.add(1.0, dr.getSteady());
+	nc.getVariance(vcov);
+	if (thrown_periods > 0) {
+		JournalRecord rec(journal);
+		rec << "I had to throw " << thrown_periods << " periods away due to Nan or Inf" << endrec;
+		JournalRecord rec1(journal);
+		rec1 << "This affected " << incomplete_simulations << " out of "
+			 << num_sim << " simulations" << endrec;
+	}
+}
+
+@ 
+@<|RTSimResultsStats::simulate| code2@>=
+void RTSimResultsStats::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+								 const TwoDMatrix& vcov)
+{
+	std::vector<RandomShockRealization> rsrs;
+	rsrs.reserve(num_sim);
+
+	THREAD_GROUP gr;
+	for (int i = 0; i < num_sim; i++) {
+		RandomShockRealization sr(vcov, system_random_generator.int_uniform());
+		rsrs.push_back(sr);
+		THREAD* worker = new
+			RTSimulationWorker(*this, dr, DecisionRule::horner,
+							   num_per, start, rsrs.back());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ 
+@<|RTSimResultsStats::writeMat4| code@>=
+void RTSimResultsStats::writeMat4(FILE* fd, const char* lname)
+{
+	char tmp[100];
+	sprintf(tmp, "%s_rt_mean", lname);
+	ConstTwoDMatrix m(nc.getDim(), 1, mean.base());
+	m.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_rt_vcov", lname);
+	ConstTwoDMatrix(vcov).writeMat4(fd, tmp);
+}
+
+@ 
+@<|IRFResults| constructor@>=
+IRFResults::IRFResults(const DynamicModel& mod, const DecisionRule& dr,
+					   const SimResults& control, const vector<int>& ili,
+					   Journal& journal)
+	: model(mod), irf_list_ind(ili)
+{
+	int num_per = control.getNumPer();
+	JournalRecordPair pa(journal);
+	pa << "Calculating IRFs against control for " << (int)irf_list_ind.size() << " shocks and for "
+	   << num_per << " periods" << endrec;
+	const TwoDMatrix& vcov = mod.getVcov();
+	for (unsigned int ii = 0; ii < irf_list_ind.size(); ii++) {
+		int ishock = irf_list_ind[ii];
+		double stderror = sqrt(vcov.get(ishock,ishock));
+		irf_res.push_back(new SimResultsIRF(control, model.numeq(), num_per,
+											ishock, stderror));
+		irf_res.push_back(new SimResultsIRF(control, model.numeq(), num_per,
+											ishock, -stderror));
+	}
+
+	for (unsigned int ii = 0; ii < irf_list_ind.size(); ii++) {
+		irf_res[2*ii]->simulate(dr, model.getSteady(), journal);
+		irf_res[2*ii+1]->simulate(dr, model.getSteady(), journal);
+	}
+}
+
+@ 
+@<|IRFResults| destructor@>=
+IRFResults::~IRFResults()
+{
+	for (unsigned int i = 0; i < irf_res.size(); i++)
+		delete irf_res[i];
+}
+
+@ 
+@<|IRFResults::writeMat4| code@>=
+void IRFResults::writeMat4(FILE* fd, const char* prefix) const
+{
+	for (unsigned int i = 0; i < irf_list_ind.size(); i++) {
+		char tmp[100];
+		int ishock = irf_list_ind[i];
+		const char* shockname = model.getExogNames().getName(ishock);
+		sprintf(tmp, "%s_irfp_%s", prefix, shockname);
+		irf_res[2*i]->writeMat4(fd, tmp);
+		sprintf(tmp, "%s_irfm_%s", prefix, shockname);
+		irf_res[2*i+1]->writeMat4(fd, tmp);
+	}
+}
+
+@ 
+@<|SimulationWorker::operator()()| code@>=
+void SimulationWorker::operator()()
+{
+	ExplicitShockRealization* esr = new ExplicitShockRealization(sr, np);
+	TwoDMatrix* m = dr.simulate(em, np, st, *esr);
+	{
+		SYNCHRO syn(&res, "simulation");
+		res.addDataSet(m, esr);
+	}
+}
+
+@ Here we create a new instance of |ExplicitShockRealization| of the
+corresponding control, add the impulse, and simulate.
+
+@<|SimulationIRFWorker::operator()()| code@>=
+void SimulationIRFWorker::operator()()
+{
+	ExplicitShockRealization* esr =
+	    new ExplicitShockRealization(res.control.getShocks(idata));
+	esr->addToShock(ishock, 0, imp);
+	TwoDMatrix* m = dr.simulate(em, np, st, *esr);
+	m->add(-1.0, res.control.getData(idata));
+	{
+		SYNCHRO syn(&res, "simulation");
+		res.addDataSet(m, esr);
+	}
+}
+
+@ 
+@<|RTSimulationWorker::operator()()| code@>=
+void RTSimulationWorker::operator()()
+{
+	NormalConj nc(res.nc.getDim());
+	const PartitionY& ypart = dr.getYPart();
+	int nu = dr.nexog();
+	const Vector& ysteady = dr.getSteady();
+
+	@<initialize vectors and subvectors for simulation@>;
+	@<simulate the first real-time period@>;
+    @<simulate other real-time periods@>;
+	{
+		SYNCHRO syn(&res, "rtsimulation");
+		res.nc.update(nc);
+		if (res.num_per-ip > 0) {
+			res.incomplete_simulations++;
+			res.thrown_periods += res.num_per-ip;
+		}		
+	}
+}
+
+@ 
+@<initialize vectors and subvectors for simulation@>=
+	Vector dyu(ypart.nys()+nu);
+	ConstVector ystart_pred(ystart, ypart.nstat, ypart.nys());
+	ConstVector ysteady_pred(ysteady, ypart.nstat, ypart.nys());
+	Vector dy(dyu, 0, ypart.nys());
+	Vector u(dyu, ypart.nys(), nu);
+	Vector y(nc.getDim());
+	ConstVector ypred(y, ypart.nstat, ypart.nys());
+
+@ 
+@<simulate the first real-time period@>=
+	int ip = 0;
+	dy = ystart_pred;
+	dy.add(-1.0, ysteady_pred);
+	sr.get(ip, u);
+	dr.eval(em, y, dyu);
+	nc.update(y);
+
+@
+@<simulate other real-time periods@>=
+while (y.isFinite() && ip < res.num_per) {
+	ip++;
+	dy = ypred;
+	sr.get(ip, u);
+	dr.eval(em, y, dyu);
+	nc.update(y);
+}
+
+@ This calculates factorization $FF^T=V$ in the Cholesky way. It does
+not work for semidefinite matrices.
+ 
+@<|RandomShockRealization::choleskyFactor| code@>=
+void RandomShockRealization::choleskyFactor(const TwoDMatrix& v)
+{
+	factor = v;
+	int rows = factor.nrows();
+	for (int i = 0; i < rows; i++)
+		for (int j = i+1; j < rows; j++)
+			factor.get(i,j) = 0.0;
+	int info;
+
+	LAPACK_dpotrf("L", &rows, factor.base(), &rows, &info);
+	KORD_RAISE_IF(info != 0,
+				  "Info!=0 in RandomShockRealization::choleskyFactor");
+}
+
+@ This calculates $FF^T=V$ factorization by symmetric Schur
+decomposition. It works for semidifinite matrices.
+ 
+@<|RandomShockRealization::schurFactor| code@>=
+void RandomShockRealization::schurFactor(const TwoDMatrix& v)
+{
+	SymSchurDecomp ssd(v);
+	ssd.getFactor(factor);
+}
+
+@ 
+@<|RandomShockRealization::get| code@>=
+void RandomShockRealization::get(int n, Vector& out)
+{
+ 	KORD_RAISE_IF(out.length() != numShocks(),
+				  "Wrong length of out vector in RandomShockRealization::get");
+	Vector d(out.length());
+	for (int i = 0; i < d.length(); i++) {
+		d[i] = mtwister.normal();
+	}
+	out.zeros();
+	factor.multaVec(out, ConstVector(d));
+}
+
+@ 
+@<|ExplicitShockRealization| constructor code@>=
+ExplicitShockRealization::ExplicitShockRealization(ShockRealization& sr,
+												   int num_per)
+	: shocks(sr.numShocks(), num_per)
+{
+	for (int j = 0; j < num_per; j++) {
+		Vector jcol(shocks, j);
+		sr.get(j, jcol);
+	}
+}
+
+@ 
+@<|ExplicitShockRealization::get| code@>=
+void ExplicitShockRealization::get(int n, Vector& out)
+{
+ 	KORD_RAISE_IF(out.length() != numShocks(),
+				  "Wrong length of out vector in ExplicitShockRealization::get");
+	int i = n % shocks.ncols();
+	ConstVector icol(shocks, i);
+	out = icol;
+}
+
+@ 
+@<|ExplicitShockRealization::addToShock| code@>=
+void ExplicitShockRealization::addToShock(int ishock, int iper, double val)
+{
+	KORD_RAISE_IF(ishock < 0 || ishock > numShocks(),
+				  "Wrong index of shock in ExplicitShockRealization::addToShock");
+	int j = iper % shocks.ncols();
+	shocks.get(ishock, j) += val;
+}
+
+
+@ 
+@<|GenShockRealization::get| code@>=
+void GenShockRealization::get(int n, Vector& out)
+{
+	KORD_RAISE_IF(out.length() != numShocks(),
+				  "Wrong length of out vector in GenShockRealization::get");
+	ExplicitShockRealization::get(n, out);
+	Vector r(numShocks());
+	RandomShockRealization::get(n, r);
+	for (int j = 0; j < numShocks(); j++)
+		if (! isfinite(out[j]))
+			out[j] = r[j];
+}
+
+
+@ End of {\tt decision\_rule.cpp} file.
diff --git a/dynare++/kord/decision_rule.hweb b/dynare++/kord/decision_rule.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..caf7315faab80877bd9ac969f39a1c4fdf9a2ae7
--- /dev/null
+++ b/dynare++/kord/decision_rule.hweb
@@ -0,0 +1,988 @@
+@q $Id: decision_rule.hweb 2336 2009-01-14 10:37:02Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Decision rule and simulation. Start of {\tt decision\_rule.h} file.
+
+The main purpose of this file is a decision rule representation which
+can run a simulation. So we define an interface for classes providing
+realizations of random shocks, and define the class
+|DecisionRule|. The latter basically takes tensor container of
+derivatives of policy rules, and adds them up with respect to
+$\sigma$. The class allows to specify the $\sigma$ different from $1$.
+
+In addition, we provide classes for running simulations and storing
+the results, calculating some statistics and generating IRF. The class
+|DRFixPoint| allows for calculation of the fix point of a given
+decision rule.
+
+@s DecisionRule int
+@s DecisionRuleImpl int
+@s FoldDecisionRule int
+@s UnfoldDecisionRule int
+@s ShockRealization int
+@s DRFixPoint int
+@s SimResults int
+@s SimResultsStats int
+@s SimResultsDynamicStats int
+@s RTSimResultsStats int
+@s SimResultsIRF int
+@s IRFResults int
+@s SimulationWorker int
+@s RTSimulationWorker int
+@s SimulationIRFWorker int
+@s RandomShockRealization int
+@s ExplicitShockRealization int
+@s GenShockRealization int
+@s IRFShockRealization int
+
+@c
+#ifndef DECISION_RULE_H
+#define DECISION_RULE_H
+
+#include "kord_exception.h"
+#include "korder.h"
+#include "normal_conjugate.h"
+#include "mersenne_twister.h"
+
+@<|ShockRealization| class declaration@>;
+@<|DecisionRule| class declaration@>;
+@<|DecisionRuleImpl| class declaration@>;
+@<|FoldDecisionRule| class declaration@>;
+@<|UnfoldDecisionRule| class declaration@>;
+@<|DRFixPoint| class declaration@>;
+@<|SimResults| class declaration@>;
+@<|SimResultsStats| class declaration@>;
+@<|SimResultsDynamicStats| class declaration@>;
+@<|SimResultsIRF| class declaration@>;
+@<|RTSimResultsStats| class declaration@>;
+@<|IRFResults| class declaration@>;
+@<|SimulationWorker| class declaration@>;
+@<|SimulationIRFWorker| class declaration@>;
+@<|RTSimulationWorker| class declaration@>;
+@<|RandomShockRealization| class declaration@>;
+@<|ExplicitShockRealization| class declaration@>;
+@<|GenShockRealization| class declaration@>;
+
+#endif
+
+@ This is a general interface to a shock realizations. The interface
+has only one method returning the shock realizations at the given
+time. This method is not constant, since it may change a state of the
+object.
+
+@<|ShockRealization| class declaration@>=
+class ShockRealization {
+public:@;
+	virtual ~ShockRealization()@+ {}
+	virtual void get(int n, Vector& out) =0;
+	virtual int numShocks() const =0;
+};
+
+@ This class is an abstract interface to decision rule. Its main
+purpose is to define a common interface for simulation of a decision
+rule. We need only a simulate, evaluate, cetralized clone and output
+method. The |simulate| method simulates the rule for a given
+realization of the shocks. |eval| is a primitive evaluation (it takes
+a vector of state variables (predetermined, both and shocks) and
+returns the next period variables. Both input and output are in
+deviations from the rule's steady. |evaluate| method makes only one
+step of simulation (in terms of absolute values, not
+deviations). |centralizedClone| returns a new copy of the decision
+rule, which is centralized about provided fix-point. And finally
+|writeMat4| writes the decision rule to the Matlab MAT-4 file.
+
+@<|DecisionRule| class declaration@>=
+class DecisionRule {
+public:@;
+	enum emethod {@+ horner, trad @+};
+	virtual ~DecisionRule()@+ {}
+	virtual TwoDMatrix* simulate(emethod em, int np, const Vector& ystart,
+								 ShockRealization& sr) const =0;
+	virtual void eval(emethod em, Vector& out, const ConstVector& v) const =0;
+	virtual void evaluate(emethod em, Vector& out, const ConstVector& ys,
+						  const ConstVector& u) const =0;
+	virtual void writeMat4(FILE* fd, const char* prefix) const =0;
+	virtual DecisionRule* centralizedClone(const Vector& fixpoint) const =0;
+	virtual const Vector& getSteady() const =0;
+	virtual int nexog() const =0;
+	virtual const PartitionY& getYPart() const =0;
+};
+
+@ The main purpose of this class is to implement |DecisionRule|
+interface, which is a simulation. To be able to do this we have to
+know the partitioning of state vector $y$ since we will need to pick
+only predetermined part $y^*$. Also, we need to know the steady state.
+
+The decision rule will take the form: $$y_t-\bar
+y=\sum_{i=0}^n\left[g_{(yu)^i}\right]_{\alpha_1\ldots\alpha_i}\prod_{m=1}^i
+\left[\matrix{y^*_{t-1}-\bar y^*\cr u_t}\right]^{\alpha_m},$$ where
+the tensors $\left[g_{(yu)^i}\right]$ are tensors of the constructed
+container, and $\bar y$ is the steady state.
+
+If we know the fix point of the rule (conditional zero shocks)
+$\tilde y$, the rule can be transformed to so called ``centralized''
+form. This is very similar to the form above but the zero dimensional
+tensor is zero:
+$$y_t-\tilde y=\sum_{i=1}^n
+\left[\tilde g_{(yu)^i}\right]_{\alpha_1\ldots\alpha_i}\prod_{m=1}^i
+\left[\matrix{y^*_{t-1}-\tilde y^*\cr u_t}\right]^{\alpha_m}.$$
+We provide a method and a constructor to transform a rule to the centralized form.
+
+The class is templated, the template argument is either |KOrder::fold|
+or |KOrder::unfold|. So, there are two implementations of |DecisionRule| interface.
+
+@<|DecisionRuleImpl| class declaration@>=
+template <int t>
+class DecisionRuleImpl : public ctraits<t>::Tpol, public DecisionRule {
+protected:@;
+	typedef typename ctraits<t>::Tpol _Tparent;
+	const Vector ysteady;
+	const PartitionY ypart;
+	const int nu;
+public:@;
+	DecisionRuleImpl(const _Tparent& pol, const PartitionY& yp, int nuu,
+					 const Vector& ys)
+		: ctraits<t>::Tpol(pol), ysteady(ys), ypart(yp), nu(nuu)@+ {}
+	DecisionRuleImpl(_Tparent& pol, const PartitionY& yp, int nuu,
+					 const Vector& ys)
+		: ctraits<t>::Tpol(0, yp.ny(), pol), ysteady(ys), ypart(yp),
+		nu(nuu)@+ {}
+	DecisionRuleImpl(const _Tg& g, const PartitionY& yp, int nuu,
+					 const Vector& ys, double sigma)
+		: ctraits<t>::Tpol(yp.ny(), yp.nys()+nuu), ysteady(ys), ypart(yp), nu(nuu)
+		{@+ fillTensors(g, sigma);@+}
+	DecisionRuleImpl(const DecisionRuleImpl<t>& dr, const ConstVector& fixpoint)
+		: ctraits<t>::Tpol(dr.ypart.ny(), dr.ypart.nys()+dr.nu),
+	  	ysteady(fixpoint), ypart(dr.ypart), nu(dr.nu)
+		{@+ centralize(dr);@+}
+	const Vector& getSteady() const
+		{@+ return ysteady;@+}
+	@<|DecisionRuleImpl::simulate| code@>;
+	@<|DecisionRuleImpl::evaluate| code@>;
+	@<|DecisionRuleImpl::centralizedClone| code@>;
+	@<|DecisionRuleImpl::writeMat4| code@>;
+	int nexog() const
+		{@+ return nu;@+}
+	const PartitionY& getYPart() const
+		{@+ return ypart;}
+protected:@;
+	@<|DecisionRuleImpl::fillTensors| code@>;
+	@<|DecisionRuleImpl::centralize| code@>;
+	@<|DecisionRuleImpl::eval| code@>;
+};
+
+@ Here we have to fill the tensor polynomial. This involves two
+separated actions. First is to evaluate the approximation at a given
+$\sigma$, the second is to compile the tensors $[g_{{(yu)}^{i+j}}]$ from
+$[g_{y^iu^j}]$. The first action is done here, the second is done by
+method |addSubTensor| of a full symmetry tensor.
+
+The way how the evaluation is done is described here:
+
+The $q-$order approximation to the solution can be written as:
+
+$$
+\eqalign{
+y_t-\bar y &= \sum_{l=1}^q{1\over l!}\left[\sum_{i+j+k=l}
+\left(\matrix{l\cr i,j,k}\right)\left[g_{y^iu^j\sigma^k}\right]
+_{\alpha_1\ldots\alpha_j\beta_1\ldots\beta_j}
+\prod_{m=1}^i[y^*_{t-1}-\bar y^*]^{\alpha_m}
+\prod_{n=1}^j[u_t]^{\beta_m}\sigma^k\right]\cr
+ &= \sum_{l=1}^q\left[\sum_{i+j\leq l}\left(\matrix{i+j\cr i}\right)
+\left[\sum_{k=0}^{l-i-j}{1\over l!}
+\left(\matrix{l\cr k}\right)\left[g_{y^iu^j\sigma^k}\right]\sigma^k\right]
+\prod_{m=1}^i[y^*_{t-1}-\bar y^*]^{\alpha_m}
+\prod_{n=1}^j[u_t]^{\beta_m}\sigma^k\right]
+}
+$$
+
+This means that for each $i+j+k=l$ we have to add
+$${1\over l!}\left(\matrix{l\cr
+k}\right)\left[g_{y^iu^j\sigma^k}\right]\cdot\sigma^k=
+{1\over (i+j)!k!}\left[g_{y^iu^j\sigma^k}\right]\cdot\sigma^k$$ to
+$g_{(yu)^{i+j}}$. In addition, note that the multiplier
+$\left(\matrix{i+j\cr i}\right)$ is applied when the fully symmetric
+tensor $[g_{(yu)^{i+j}}]$ is evaluated.
+
+So we go through $i+j=d=0\ldots q$ and in each loop we form the fully
+symmetric tensor $[g_{(yu)^l}]$ and insert it to the container.
+
+@<|DecisionRuleImpl::fillTensors| code@>=
+void fillTensors(const _Tg& g, double sigma)
+{
+	IntSequence tns(2);
+	tns[0] = ypart.nys(); tns[1] = nu;
+	int dfact = 1;
+	for (int d = 0; d <= g.getMaxDim(); d++, dfact*=d) {
+		_Ttensym* g_yud = new _Ttensym(ypart.ny(), ypart.nys()+nu, d);
+		g_yud->zeros();
+		@<fill tensor of |g_yud| of dimension |d|@>;
+		insert(g_yud);
+	}
+}
+
+@ Here we have to fill the tensor $\left[g_{(yu)^d}\right]$. So we go
+through all pairs $(i,j)$ giving $i+j=d$, and through all $k$ from
+zero up to maximal dimension minus $d$. In this way we go through all
+symmetries of $g_{y^iu^j\sigma^k}$ which will be added to $g_{(yu)^d}$.
+
+Note that at the beginning, |dfact| is a factorial of |d|. We
+calculate |kfact| is equal to $k!$. As indicated in
+|@<|DecisionRuleImpl::fillTensors| code@>|, the added tensor is thus
+multiplied with ${1\over d!k!}\sigma^k$.
+
+@<fill tensor of |g_yud| of dimension |d|@>=
+	for (int i = 0; i <= d; i++) {
+		int j = d-i;
+		int kfact = 1;
+		_Ttensor tmp(ypart.ny(),
+					 TensorDimens(Symmetry(i,j), tns));
+		tmp.zeros();
+		for (int k = 0; k+d <= g.getMaxDim(); k++, kfact*=k) {
+			Symmetry sym(i,j,0,k);
+			if (g.check(sym)) {
+				double mult = pow(sigma,k)/dfact/kfact;
+				tmp.add(mult,*(g.get(sym)));
+			}
+		}
+		g_yud->addSubTensor(tmp);
+	}
+
+@ The centralization is straightforward. We suppose here that the
+object's steady state is the fix point $\tilde y$. It is clear that
+the new derivatives $\left[\tilde g_{(yu)^i}\right]$ will be equal to
+the derivatives of the original decision rule |dr| at the new steady
+state $\tilde y$. So, the new derivatives are obtained by derivating the
+given decision rule $dr$ and evaluating its polynomial at
+$$dstate=\left[\matrix{\tilde y^*-\bar y^*\cr 0}\right],$$
+where $\bar y$ is the steady state of the original rule |dr|.
+
+@<|DecisionRuleImpl::centralize| code@>=
+void centralize(const DecisionRuleImpl& dr)
+{
+	Vector dstate(ypart.nys() + nu);
+	dstate.zeros();
+	Vector dstate_star(dstate, 0, ypart.nys());
+	ConstVector newsteady_star(ysteady, ypart.nstat, ypart.nys());
+	ConstVector oldsteady_star(dr.ysteady, ypart.nstat, ypart.nys());
+	dstate_star.add(1.0, newsteady_star);
+	dstate_star.add(-1.0, oldsteady_star);
+
+	_Tpol pol(dr);
+	int dfac = 1;
+	for (int d = 1; d <= dr.getMaxDim(); d++, dfac *= d) {
+		pol.derivative(d-1);
+		_Ttensym* der = pol.evalPartially(d, dstate);
+		der->mult(1.0/dfac);
+		insert(der);
+	}
+}
+
+@ Here we evaluate repeatedly the polynomial storing results in the
+created matrix. For exogenous shocks, we use |ShockRealization|
+class, for predetermined variables, we use |ystart| as the first
+state. The |ystart| vector is required to be all state variables
+|ypart.ny()|, although only the predetermined part of |ystart| is
+used.
+
+We simulate in terms of $\Delta y$, this is, at the beginning the
+|ysteady| is canceled from |ystart|, we simulate, and at the end
+|ysteady| is added to all columns of the result.
+
+@<|DecisionRuleImpl::simulate| code@>=
+TwoDMatrix* simulate(emethod em, int np, const Vector& ystart,
+					 ShockRealization& sr) const
+{
+	KORD_RAISE_IF(ysteady.length() != ystart.length(),
+				  "Start and steady lengths differ in DecisionRuleImpl::simulate");
+	TwoDMatrix* res = new TwoDMatrix(ypart.ny(), np);
+
+	@<initialize vectors and subvectors for simulation@>;
+	@<perform the first step of simulation@>;
+	@<perform all other steps of simulations@>;
+	@<add the steady state to all columns of |res|@>;
+	return res;
+}
+
+@ Here allocate the stack vector $(\Delta y^*, u)$, define the
+subvectors |dy|, and |u|, then we pickup predetermined parts of
+|ystart| and |ysteady|.
+
+@<initialize vectors and subvectors for simulation@>=
+	Vector dyu(ypart.nys()+nu);
+	ConstVector ystart_pred(ystart, ypart.nstat, ypart.nys());
+	ConstVector ysteady_pred(ysteady, ypart.nstat, ypart.nys());
+	Vector dy(dyu, 0, ypart.nys());
+	Vector u(dyu, ypart.nys(), nu);
+
+
+@ We cancel |ysteady| from |ystart|, get realization to |u|, and
+evaluate the polynomial.
+
+@<perform the first step of simulation@>=
+	dy = ystart_pred;
+	dy.add(-1.0, ysteady_pred);
+	sr.get(0, u);
+	Vector out(*res, 0);
+	eval(em, out, dyu);
+
+@ Also clear. If the result at some period is not finite, we pad the
+rest of the matrix with zeros and return immediatelly.
+
+@<perform all other steps of simulations@>=
+	for (int i = 1; i < np; i++) {
+		ConstVector ym(*res, i-1);
+		ConstVector dym(ym, ypart.nstat, ypart.nys());
+		dy = dym;
+		sr.get(i, u);
+		Vector out(*res, i);
+		eval(em, out, dyu);
+		if (! out.isFinite()) {
+			if (i+1 < np) {
+				TwoDMatrix rest(*res, i+1, np-i-1);
+				rest.zeros();
+			}
+			return res;
+		}
+	}
+
+@ Even clearer.
+@<add the steady state to all columns of |res|@>=
+	for (int i = 0; i < res->ncols(); i++) {
+		Vector col(*res, i);
+		col.add(1.0, ysteady);
+	}
+
+
+@ This is one period evaluation of the decision rule. The simulation
+is a sequence of repeated one period evaluations with a difference,
+that the steady state (fix point) is cancelled and added once. Hence
+we have two special methods.
+
+@<|DecisionRuleImpl::evaluate| code@>=
+void evaluate(emethod em, Vector& out, const ConstVector& ys,
+			  const ConstVector& u) const
+{
+	KORD_RAISE_IF(ys.length() != ypart.nys() || u.length() != nu,
+				  "Wrong dimensions of input vectors in DecisionRuleImpl::evaluate");
+	KORD_RAISE_IF(out.length() != ypart.ny(),
+				  "Wrong dimension of output vector in DecisionRuleImpl::evaluate");
+	ConstVector ysteady_pred(ysteady, ypart.nstat, ypart.nys());
+	Vector ys_u(ypart.nys()+nu);
+	Vector ys_u1(ys_u, 0, ypart.nys());
+	ys_u1 = ys;
+	ys_u1.add(-1.0, ysteady_pred);
+	Vector ys_u2(ys_u, ypart.nys(), nu);
+	ys_u2 = u;
+	eval(em, out, ys_u);
+	out.add(1.0, ysteady);
+}
+
+@ This is easy. We just return the newly created copy using the
+centralized constructor.
+
+@<|DecisionRuleImpl::centralizedClone| code@>=
+DecisionRule* centralizedClone(const Vector& fixpoint) const
+{
+	return new DecisionRuleImpl<t>(*this, fixpoint);
+}
+
+@ Here we only encapsulate two implementations to one, deciding
+according to the parameter.
+
+@<|DecisionRuleImpl::eval| code@>=
+void eval(emethod em, Vector& out, const ConstVector& v) const
+{
+	if (em == DecisionRule::horner)
+		_Tparent::evalHorner(out, v);
+	else
+		_Tparent::evalTrad(out, v);
+}
+
+@ Write the decision rule and steady state to the MAT--4 file.
+@<|DecisionRuleImpl::writeMat4| code@>=
+void writeMat4(FILE* fd, const char* prefix) const
+{
+	ctraits<t>::Tpol::writeMat4(fd, prefix);
+	TwoDMatrix dum(ysteady.length(), 1);
+	dum.getData() = ysteady;
+	char tmp[100];
+	sprintf(tmp, "%s_ss", prefix);
+	ConstTwoDMatrix(dum).writeMat4(fd, tmp);
+}
+
+@ This is exactly the same as |DecisionRuleImpl<KOrder::fold>|. The
+only difference is that we have a conversion from
+|UnfoldDecisionRule|, which is exactly
+|DecisionRuleImpl<KOrder::unfold>|.
+
+@<|FoldDecisionRule| class declaration@>=
+class UnfoldDecisionRule;
+class FoldDecisionRule : public DecisionRuleImpl<KOrder::fold> {
+	friend class UnfoldDecisionRule;
+public:@;
+	FoldDecisionRule(const ctraits<KOrder::fold>::Tpol& pol, const PartitionY& yp, int nuu,
+					 const Vector& ys)
+		: DecisionRuleImpl<KOrder::fold>(pol, yp, nuu, ys) {}
+	FoldDecisionRule(ctraits<KOrder::fold>::Tpol& pol, const PartitionY& yp, int nuu,
+					 const Vector& ys)
+		: DecisionRuleImpl<KOrder::fold>(pol, yp, nuu, ys) {}
+	FoldDecisionRule(const ctraits<KOrder::fold>::Tg& g, const PartitionY& yp, int nuu,
+					 const Vector& ys, double sigma)
+		: DecisionRuleImpl<KOrder::fold>(g, yp, nuu, ys, sigma) {}
+	FoldDecisionRule(const DecisionRuleImpl<KOrder::fold>& dr, const ConstVector& fixpoint)
+		: DecisionRuleImpl<KOrder::fold>(dr, fixpoint) {}
+	FoldDecisionRule(const UnfoldDecisionRule& udr);
+};
+
+@ This is exactly the same as |DecisionRuleImpl<KOrder::unfold>|, but
+with a conversion from |FoldDecisionRule|, which is exactly
+|DecisionRuleImpl<KOrder::fold>|.
+
+@<|UnfoldDecisionRule| class declaration@>=
+class UnfoldDecisionRule : public DecisionRuleImpl<KOrder::unfold> {
+	friend class FoldDecisionRule; 
+public:@;
+	UnfoldDecisionRule(const ctraits<KOrder::unfold>::Tpol& pol, const PartitionY& yp, int nuu,
+					 const Vector& ys)
+		: DecisionRuleImpl<KOrder::unfold>(pol, yp, nuu, ys) {}
+	UnfoldDecisionRule(ctraits<KOrder::unfold>::Tpol& pol, const PartitionY& yp, int nuu,
+					 const Vector& ys)
+		: DecisionRuleImpl<KOrder::unfold>(pol, yp, nuu, ys) {}
+	UnfoldDecisionRule(const ctraits<KOrder::unfold>::Tg& g, const PartitionY& yp, int nuu,
+					 const Vector& ys, double sigma)
+		: DecisionRuleImpl<KOrder::unfold>(g, yp, nuu, ys, sigma) {}
+	UnfoldDecisionRule(const DecisionRuleImpl<KOrder::unfold>& dr, const ConstVector& fixpoint)
+		: DecisionRuleImpl<KOrder::unfold>(dr, fixpoint) {}
+	UnfoldDecisionRule(const FoldDecisionRule& udr);
+};
+
+
+@ This class serves for calculation of the fix point of the decision
+rule given that the shocks are zero. The class is very similar to the
+|DecisionRuleImpl|. Besides the calculation of the fix point, the only
+difference between |DRFixPoint| and |DecisionRuleImpl| is that the
+derivatives wrt. shocks are ignored (since shocks are zero during the
+calculations). That is why have a different |fillTensor| method.
+
+The solution algorithm is Newton and is described in
+|@<|DRFixPoint::solveNewton| code@>|. It solves $F(y)=0$, where
+$F=g(y,0)-y$. The function $F$ is given by its derivatives |bigf|. The
+Jacobian of the solved system is given by derivatives stored in
+|bigfder|.
+
+@<|DRFixPoint| class declaration@>=
+template <int t>
+class DRFixPoint : public ctraits<t>::Tpol {
+	typedef typename ctraits<t>::Tpol _Tparent;
+	static int max_iter;
+	static int max_newton_iter;
+	static int newton_pause;
+	static double tol;
+	const Vector ysteady;
+	const PartitionY ypart;
+	_Tparent* bigf;
+	_Tparent* bigfder;
+public:@;
+	typedef typename DecisionRule::emethod emethod;
+	@<|DRFixPoint| constructor code@>;
+	@<|DRFixPoint| destructor code@>;
+	@<|DRFixPoint::calcFixPoint| code@>;
+	int getNumIter() const
+		{@+ return iter;@+}
+	int getNewtonLastIter() const
+		{@+ return newton_iter_last;@+}
+	int getNewtonTotalIter() const
+		{@+ return newton_iter_total;@+}
+protected:@;
+	@<|DRFixPoint::fillTensors| code@>;
+	@<|DRFixPoint::solveNewton| code@>;
+private:@;
+	int iter;
+	int newton_iter_last;
+	int newton_iter_total;	
+};
+
+
+@ Here we have to setup the function $F=g(y,0)-y$ and ${\partial
+F\over\partial y}$. The former is taken from the given derivatives of
+$g$ where a unit matrix is subtracted from the first derivative
+(|Symmetry(1)|). Then the derivative of the $F$ polynomial is
+calculated.
+
+@<|DRFixPoint| constructor code@>=
+DRFixPoint(const _Tg& g, const PartitionY& yp,
+					   const Vector& ys, double sigma)
+	: ctraits<t>::Tpol(yp.ny(), yp.nys()),
+	  ysteady(ys), ypart(yp), bigf(NULL), bigfder(NULL)
+{
+	fillTensors(g, sigma);
+	_Tparent yspol(ypart.nstat, ypart.nys(), *this);
+	bigf = new _Tparent((const _Tparent&) yspol);
+	_Ttensym* frst = bigf->get(Symmetry(1));
+	for (int i = 0; i < ypart.nys(); i++)
+		frst->get(i,i) = frst->get(i,i) - 1;
+	bigfder = new _Tparent(*bigf, 0);
+}
+
+@ 
+@<|DRFixPoint| destructor code@>=
+virtual ~DRFixPoint()
+{
+	if (bigf)
+		delete bigf;
+	if (bigfder)
+		delete bigfder;
+}
+
+@ Here we fill the tensors for the |DRFixPoint| class. We ignore the
+derivatives $g_{y^iu^j\sigma^k}$ for which $j>0$. So we go through all
+dimensions |d|, and all |k| such that |d+k| is between the maximum
+dimension and |d|, and add ${\sigma^k\over d!k!}g_{y^d\sigma^k}$ to
+the tensor $g_{(y)^d}$.
+
+@<|DRFixPoint::fillTensors| code@>=
+void fillTensors(const _Tg& g, double sigma)
+{
+	int dfact = 1;
+	for (int d = 0; d <= g.getMaxDim(); d++, dfact*=d) {
+		_Ttensym* g_yd = new _Ttensym(ypart.ny(), ypart.nys(), d);
+		g_yd->zeros();
+		int kfact = 1;
+		for (int k = 0; d+k <= g.getMaxDim(); k++, kfact*=k) {
+			if (g.check(Symmetry(d,0,0,k))) {
+				const _Ttensor* ten = g.get(Symmetry(d,0,0,k));
+				double mult = pow(sigma,k)/dfact/kfact;
+				g_yd->add(mult, *ten);
+			}
+		}
+		insert(g_yd);
+	}
+}
+
+@ This tries to solve polynomial equation $F(y)=0$, where $F$
+polynomial is |bigf| and its derivative is in |bigfder|. It returns
+true if the Newton converged. The method takes the given vector as
+initial guess, and rewrites it with a solution. The method guarantees
+to return the vector, which has smaller norm of the residual. That is
+why the input/output vector |y| is always changed.
+
+The method proceeds with a Newton step, if the Newton step improves
+the residual error. So we track residual errors in |flastnorm| and
+|fnorm| (former and current). In addition, at each step we search for
+an underrelaxation parameter |urelax|, which improves the residual. If
+|urelax| is less that |urelax_threshold|, we stop searching and stop
+the Newton.
+
+@<|DRFixPoint::solveNewton| code@>=
+bool solveNewton(Vector& y)
+{
+	const double urelax_threshold = 1.e-5;
+	Vector sol((const Vector&) y);
+	Vector delta(y.length());
+	newton_iter_last = 0;
+	bool delta_finite = true;
+	double flastnorm = 0.0;
+	double fnorm = 0.0;
+	bool converged = false;
+	double urelax = 1.0;
+
+	do {
+		_Ttensym* jacob = bigfder->evalPartially(1, sol);
+		bigf->evalHorner(delta, sol);
+		if (newton_iter_last == 0)
+			flastnorm = delta.getNorm();
+		delta_finite = delta.isFinite();
+		if (delta_finite) {
+			ConstTwoDMatrix(*jacob).multInvLeft(delta);
+			@<find |urelax| improving residual@>;
+			sol.add(-urelax, delta);
+			delta_finite = delta.isFinite();
+		}
+		delete jacob;
+		newton_iter_last++;
+		converged = delta_finite && fnorm < tol;
+		flastnorm = fnorm;
+	} while (!converged && newton_iter_last < max_newton_iter
+			 && urelax > urelax_threshold);
+
+	newton_iter_total += newton_iter_last;
+	if (! converged)
+		newton_iter_last = 0;
+	y = (const Vector&)sol; 
+	return converged;
+}
+
+@ Here we find the |urelax|. We cycle as long as the new residual size
+|fnorm| is greater than last residual size |flastnorm|. If the urelax
+is less than |urelax_threshold| we give up. The |urelax| is damped by
+the ratio of |flastnorm| and |fnorm|. It the ratio is close to one, we
+damp by one half.
+
+@<find |urelax| improving residual@>=
+	bool urelax_found = false;
+	urelax = 1.0;
+	while (!urelax_found && urelax > urelax_threshold) {
+		Vector soltmp((const Vector&)sol);
+		soltmp.add(-urelax, delta);
+		Vector f(sol.length());
+		bigf->evalHorner(f, soltmp);
+		fnorm = f.getNorm();
+		if (fnorm <= flastnorm)
+			urelax_found = true;
+		else
+			urelax *= std::min(0.5, flastnorm/fnorm);
+	}
+
+
+@ This method solves the fix point of the no-shocks rule
+$y_{t+1}=f(y_t)$. It combines dull steps with Newton attempts. The
+dull steps correspond to evaluations setting $y_{t+1}=f(y_t)$. For
+reasonable models the dull steps converge to the fix-point but very
+slowly. That is why we make Newton attempt from time to time. The
+frequency of the Newton attempts is given by |newton_pause|. We
+perform the calculations in deviations from the steady state. So, at
+the end, we have to add the steady state.
+
+The method also sets the members |iter|, |newton_iter_last| and
+|newton_iter_total|. These numbers can be examined later.
+
+The |out| vector is not touched if the algorithm has not convered.
+
+@<|DRFixPoint::calcFixPoint| code@>=
+bool calcFixPoint(emethod em, Vector& out)
+{
+	KORD_RAISE_IF(out.length() != ypart.ny(),
+				  "Wrong length of out in DRFixPoint::calcFixPoint");
+
+	Vector delta(ypart.nys());
+	Vector ystar(ypart.nys());
+	ystar.zeros();
+
+	iter = 0;
+	newton_iter_last = 0;
+	newton_iter_total = 0;
+	bool converged = false;
+	do {
+		if ((iter/newton_pause)*newton_pause == iter)
+			converged = solveNewton(ystar);
+		if (! converged) {
+			bigf->evalHorner(delta, ystar);
+			KORD_RAISE_IF_X(! delta.isFinite(),
+							"NaN or Inf asserted in DRFixPoint::calcFixPoint",
+							KORD_FP_NOT_FINITE);
+			ystar.add(1.0, delta);
+			converged = delta.getNorm() < tol;
+		}
+		iter++;
+	} while (iter < max_iter && ! converged);
+
+	if (converged) {
+		_Tparent::evalHorner(out, ystar);
+		out.add(1.0, ysteady);
+	}
+
+	return converged;
+}
+
+
+@ This is a basically a number of matrices of the same dimensions,
+which can be obtained as simulation results from a given decision rule
+and shock realizations. We also store the realizations of shocks.
+
+@<|SimResults| class declaration@>=
+class ExplicitShockRealization;
+class SimResults {
+protected:@;
+	int num_y;
+	int num_per;
+	vector<TwoDMatrix*> data;
+	vector<ExplicitShockRealization*> shocks;
+public:@;
+	SimResults(int ny, int nper)
+		: num_y(ny), num_per(nper)@+ {}
+	virtual ~SimResults();
+	void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+				  const TwoDMatrix& vcov, Journal& journal);
+	void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+				  const TwoDMatrix& vcov);
+	int getNumPer() const
+		{@+ return num_per;@+}
+	int getNumSets() const
+		{@+ return (int)data.size();@+}
+	const TwoDMatrix& getData(int i) const
+		{@+ return *(data[i]);@+}
+	const ExplicitShockRealization& getShocks(int i) const
+		{ @+ return *(shocks[i]);@+}
+	bool addDataSet(TwoDMatrix* d, ExplicitShockRealization* sr);
+	void writeMat4(const char* base, const char* lname) const;
+	void writeMat4(FILE* fd, const char* lname) const;
+};
+
+@ This does the same as |SimResults| plus it calculates means and
+covariances of the simulated data.
+
+@<|SimResultsStats| class declaration@>=
+class SimResultsStats : public SimResults {
+protected:@;
+	Vector mean;
+	TwoDMatrix vcov;
+public:@;
+	SimResultsStats(int ny, int nper)
+		: SimResults(ny, nper), mean(ny), vcov(ny,ny)@+ {}
+	void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+				  const TwoDMatrix& vcov, Journal& journal);
+	void writeMat4(FILE* fd, const char* lname) const;
+protected:@;
+	void calcMean();
+	void calcVcov();
+};
+
+@ This does the similar thing as |SimResultsStats| but the statistics are
+not calculated over all periods but only within each period. Then we
+do not calculate covariances with periods but only variances.
+
+@<|SimResultsDynamicStats| class declaration@>=
+class SimResultsDynamicStats : public SimResults {
+protected:@;
+	TwoDMatrix mean;
+	TwoDMatrix variance;
+public:@;
+	SimResultsDynamicStats(int ny, int nper)
+		: SimResults(ny, nper), mean(ny,nper), variance(ny,nper)@+ {}
+	void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+				  const TwoDMatrix& vcov, Journal& journal);
+	void writeMat4(FILE* fd, const char* lname) const; 
+protected:@;
+	void calcMean();
+	void calcVariance();
+};
+
+
+@ This goes through control simulation results, and for each control
+it adds a given impulse to a given shock and runs a simulation. The
+control simulation is then cancelled and the result is stored. After
+that these results are averaged with variances calculated.
+
+The means and the variances are then written to the MAT-4 file.
+
+@<|SimResultsIRF| class declaration@>=
+class SimulationIRFWorker;
+class SimResultsIRF : public SimResults {
+	friend class SimulationIRFWorker;
+protected:@;
+	const SimResults& control;
+	int ishock;
+	double imp;
+	TwoDMatrix means;
+	TwoDMatrix variances;
+public:@;
+	SimResultsIRF(const SimResults& cntl, int ny, int nper, int i, double impulse)
+		: SimResults(ny, nper), control(cntl),
+		  ishock(i), imp(impulse),
+		  means(ny, nper), variances(ny, nper)@+ {}
+	void simulate(const DecisionRule& dr, const Vector& start,
+				 Journal& journal);
+	void simulate(const DecisionRule& dr, const Vector& start);
+	void writeMat4(FILE* fd, const char* lname) const;
+protected:@;
+	void calcMeans();
+	void calcVariances();
+};
+
+@ This simulates and gathers all statistics from the real time
+simulations. In the |simulate| method, it runs |RTSimulationWorker|s
+which accummulate information from their own estimates. The estimation
+is done by means of |NormalConj| class, which is a conjugate family of
+densities for normal distibutions.
+
+@<|RTSimResultsStats| class declaration@>=
+class RTSimulationWorker;
+class RTSimResultsStats {
+	friend class RTSimulationWorker;
+protected:@;
+	Vector mean;
+	TwoDMatrix vcov;
+	int num_per;
+	NormalConj nc;
+	int incomplete_simulations;
+	int thrown_periods;
+public:@;
+	RTSimResultsStats(int ny, int nper)
+		: mean(ny), vcov(ny, ny),
+		  num_per(nper), nc(ny),
+		  incomplete_simulations(0), thrown_periods(0)@+ {}
+	void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+				  const TwoDMatrix& vcov, Journal& journal);
+	void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
+				  const TwoDMatrix& vcov);
+	void writeMat4(FILE* fd, const char* lname);
+};
+
+@ For each shock, this simulates plus and minus impulse. The class
+maintains a vector of simulation results, each gets a particular shock
+and sign (positive/negative). The results of type |SimResultsIRF| are
+stored in a vector so that even ones are positive, odd ones are
+negative.
+
+The constructor takes a reference to the control simulations, which
+must be finished before the constructor is called. The control
+simulations are passed to all |SimResultsIRF|s.
+
+The constructor also takes the vector of indices of exogenous
+variables (|ili|) for which the IRFs are generated. The list is kept
+(as |irf_list_ind|) for other methods.
+ 
+@<|IRFResults| class declaration@>=
+class DynamicModel;
+class IRFResults {
+	vector<SimResultsIRF*> irf_res;
+	const DynamicModel& model;
+	vector<int> irf_list_ind;
+public:@;
+	IRFResults(const DynamicModel& mod, const DecisionRule& dr,
+			   const SimResults& control, const vector<int>& ili,
+			   Journal& journal);
+	~IRFResults();
+	void writeMat4(FILE* fd, const char* prefix) const;
+};
+
+@ This worker simulates the given decision rule and inserts the result
+to |SimResults|.
+
+@<|SimulationWorker| class declaration@>=
+class SimulationWorker : public THREAD {
+protected:@;
+	SimResults& res;
+	const DecisionRule& dr;
+	DecisionRule::emethod em;
+	int np;
+	const Vector& st;
+	ShockRealization& sr;
+public:@;
+	SimulationWorker(SimResults& sim_res,
+					 const DecisionRule& dec_rule,
+					 DecisionRule::emethod emet, int num_per,
+					 const Vector& start, ShockRealization& shock_r)
+		: res(sim_res), dr(dec_rule), em(emet), np(num_per), st(start), sr(shock_r) {}
+	void operator()();
+};
+
+@ This worker simulates a given impulse |imp| to a given shock
+|ishock| based on a given control simulation with index |idata|. The
+control simulations are contained in |SimResultsIRF| which is passed
+to the constructor.
+
+@<|SimulationIRFWorker| class declaration@>=
+class SimulationIRFWorker : public THREAD {
+	SimResultsIRF& res;
+	const DecisionRule& dr;
+	DecisionRule::emethod em;
+	int np;
+	const Vector& st;
+	int idata;
+	int ishock;
+	double imp;	
+public:@;
+	SimulationIRFWorker(SimResultsIRF& sim_res,
+						const DecisionRule& dec_rule,
+						DecisionRule::emethod emet, int num_per,
+						const Vector& start, int id,
+						int ishck, double impulse)
+		: res(sim_res), dr(dec_rule), em(emet), np(num_per), st(start),
+		  idata(id), ishock(ishck), imp(impulse)@+ {}
+	void operator()();
+};
+
+@ This class does the real time simulation job for
+|RTSimResultsStats|. It simulates the model period by period. It
+accummulates the information in the |RTSimResultsStats::nc|. If NaN or
+Inf is observed, it ends the simulation and adds to the
+|thrown_periods| of |RTSimResultsStats|.
+
+@<|RTSimulationWorker| class declaration@>=
+class RTSimulationWorker : public THREAD {
+protected:@;
+	RTSimResultsStats& res;
+	const DecisionRule& dr;
+	DecisionRule::emethod em;
+	int np;
+	const Vector& ystart;
+	ShockRealization& sr;
+public:@;
+	RTSimulationWorker(RTSimResultsStats& sim_res,
+					   const DecisionRule& dec_rule,
+					   DecisionRule::emethod emet, int num_per,
+					   const Vector& start, ShockRealization& shock_r)
+		: res(sim_res), dr(dec_rule), em(emet), np(num_per), ystart(start), sr(shock_r) {}
+	void operator()();
+};
+
+@ This class generates draws from Gaussian distribution with zero mean
+and the given variance-covariance matrix. It stores the factor of vcov
+$V$ matrix, yielding $FF^T = V$.
+ 
+@<|RandomShockRealization| class declaration@>=
+class RandomShockRealization : virtual public ShockRealization {
+protected:@;
+	MersenneTwister mtwister;
+	TwoDMatrix factor;
+public:@;
+	RandomShockRealization(const TwoDMatrix& v, unsigned int iseed)
+		: mtwister(iseed), factor(v.nrows(),v.nrows())
+		{@+schurFactor(v);@+}
+	RandomShockRealization(const RandomShockRealization& sr)
+		: mtwister(sr.mtwister), factor(sr.factor)@+ {}
+	virtual ~RandomShockRealization() @+{}
+	void get(int n, Vector& out);
+	int numShocks() const
+		{@+ return factor.nrows();@+}
+protected:@;
+	void choleskyFactor(const TwoDMatrix& v);
+	void schurFactor(const TwoDMatrix& v);
+};
+
+@ This is just a matrix of finite numbers. It can be constructed from
+any |ShockRealization| with a given number of periods.
+
+@<|ExplicitShockRealization| class declaration@>=
+class ExplicitShockRealization : virtual public ShockRealization {
+	TwoDMatrix shocks;
+public:@;
+	ExplicitShockRealization(const TwoDMatrix& sh)
+		: shocks(sh)@+ {}
+	ExplicitShockRealization(const ExplicitShockRealization& sr)
+		: shocks(sr.shocks)@+ {}
+	ExplicitShockRealization(ShockRealization& sr, int num_per);
+	void get(int n, Vector& out);
+	int numShocks() const
+		{@+ return shocks.nrows();@+}
+	void addToShock(int ishock, int iper, double val);
+	void print() const
+		{@+ shocks.print();@+}
+};
+
+@ This represents a user given shock realization. The first matrix of
+the constructor is a covariance matrix of shocks, the second matrix is
+a rectangular matrix, where columns correspond to periods, rows to
+shocks. If an element of the matrix is {\tt NaN}, or {\tt Inf}, or
+{\tt -Inf}, then the random shock is taken instead of that element.
+
+In this way it is a generalization of both |RandomShockRealization|
+and |ExplicitShockRealization|.
+
+@<|GenShockRealization| class declaration@>=
+class GenShockRealization : public RandomShockRealization, public ExplicitShockRealization {
+public:@;
+	GenShockRealization(const TwoDMatrix& v, const TwoDMatrix& sh, int seed)
+		: RandomShockRealization(v, seed), ExplicitShockRealization(sh)@+
+		{
+			KORD_RAISE_IF(sh.nrows() != v.nrows() || v.nrows() != v.ncols(),
+						  "Wrong dimension of input matrix in GenShockRealization constructor");
+		}
+	void get(int n, Vector& out);
+	int numShocks() const
+		{@+ return RandomShockRealization::numShocks();@+}
+};
+
+@ End of {\tt decision\_rule.h} file.
diff --git a/dynare++/kord/dynamic_model.cweb b/dynare++/kord/dynamic_model.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..d8dd17159ecf0c122ab1c81f8aed421327e1668e
--- /dev/null
+++ b/dynare++/kord/dynamic_model.cweb
@@ -0,0 +1,59 @@
+@q $Id: dynamic_model.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ Start of {\tt dynamic\_model.cpp} file.
+
+@c
+#include "dynamic_model.h"
+
+@<|NameList::print| code@>;
+@<|NameList::writeMat4| code@>;
+@<|NameList::writeMat4Indices| code@>;
+
+@ 
+@<|NameList::print| code@>=
+void NameList::print() const
+{
+	for (int i = 0; i < getNum(); i++)
+		printf("%s\n", getName(i));
+}
+
+@ 
+@<|NameList::writeMat4| code@>=
+void NameList::writeMat4(FILE* fd, const char* vname) const
+{
+	int maxlen = 0;
+	for (int i = 0; i < getNum(); i++)
+		if (maxlen < (int)strlen(getName(i)))
+			maxlen = (int)strlen(getName(i));
+
+	if (maxlen == 0)
+		return;
+
+	TwoDMatrix m(getNum(), maxlen);
+	for (int i = 0; i < getNum(); i++)
+		for (int j = 0; j < maxlen; j++)
+			if (j < (int)strlen(getName(i)))
+				m.get(i,j) = (double)(getName(i)[j]);
+			else
+				m.get(i,j) = (double)(' ');
+
+	Mat4Header header(m, vname, "text matrix");
+	header.write(fd);
+	fwrite(m.getData().base(), sizeof(double), m.nrows()*m.ncols(), fd);
+}
+
+@ 
+@<|NameList::writeMat4Indices| code@>=
+void NameList::writeMat4Indices(FILE* fd, const char* prefix) const
+{
+	char tmp[100];
+	TwoDMatrix aux(1,1);
+	for (int i = 0; i < getNum(); i++) {
+		sprintf(tmp, "%s_i_%s", prefix, getName(i));
+		aux.get(0,0) = i+1;
+		aux.writeMat4(fd, tmp);
+	}
+}
+
+@ End of {\tt dynamic\_model.cpp} file.
diff --git a/dynare++/kord/dynamic_model.hweb b/dynare++/kord/dynamic_model.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..9a0a7ae5e0856111a5538d2d687be4d5652c6ce3
--- /dev/null
+++ b/dynare++/kord/dynamic_model.hweb
@@ -0,0 +1,120 @@
+@q $Id: dynamic_model.hweb 378 2005-07-21 15:50:20Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Dynamic model abstraction. Start of {\tt dynamic\_model.h} file.
+
+This file only defines a generic interface to an SDGE model. The model
+takes the form:
+$$E_t\left[f(g^{**}(g^*(y,u_t),u_{t+1}),g(y,u),y,u_t)\right]=0$$
+The interface is defined via pure virtual class |DynamicModel|.
+
+@s NameList int
+@s DynamicModel int
+@c
+#ifndef DYNAMIC_MODEL_H
+#define DYNAMIC_MODEL_H
+
+#include "t_container.h"
+#include "sparse_tensor.h"
+
+#include "Vector.h"
+
+@<|NameList| class declaration@>;
+@<|DynamicModel| class declaration@>;
+
+#endif
+
+@ The class is a virtual pure class which provides an access to names
+of the variables.
+@<|NameList| class declaration@>=
+class NameList {
+public:@;
+	virtual ~NameList() {}
+	virtual int getNum() const =0;
+	virtual const char* getName(int i) const=0;
+	void print() const;
+	void writeMat4(FILE* fd, const char* vname) const;
+	void writeMat4Indices(FILE* fd, const char* prefix) const;
+};
+
+@ This is the interface to an information on a generic SDGE
+model. It is sufficient for calculations of policy rule Taylor
+approximations at some (not necessarily deterministic) steady state.
+
+We need to know a partitioning of endogenous variables $y$. We suppose
+that $y$ is partitioned as
+$$y=\left[\matrix{\hbox{static}\cr\hbox{pred}\cr\hbox{both}\cr\hbox{forward}}\right]$$
+of which we define
+$$y^*=\left[\matrix{\hbox{pred}\cr\hbox{both}}\right]\quad
+y^{**}=\left[\matrix{\hbox{both}\cr\hbox{forward}}\right]$$
+where ``static'' are meant those variables, which appear only at time
+$t$; ``pred'' are meant those variables, which appear only at $t$ and
+$t-1$; ``both'' are meant those variables, which appear at least at
+$t-1$ and $t+1$; and ``forward'' are meant those variables, which
+appear only at $t$ and $t+1$. This partitioning is given by methods
+|nstat()|, |npred()|, |nboth()|, and |nforw()|. The number of
+equations |numeq()| must be the same as a number of endogenous
+variables.
+
+In order to complete description, we need to know a number of
+exogenous variables, which is a size of $u$, hence |nexog()| method.
+
+The model contains an information about names of variables, the
+variance-covariance matrix of the shocks, the derivatives of equations
+of $f$ at some steady state, and the steady state. These can be
+retrieved by the corresponding methods.
+
+The derivatives of the system are calculated with respect to stacked
+variables, the stack looks as:
+$$\left[\matrix{y^{**}_{t+1}\cr y_t\cr y^*_{t-1}\cr u_t}\right].$$
+
+There are only three operations. The first
+|solveDeterministicSteady()| solves the deterministic steady steate
+which can be retrieved by |getSteady()| later. The method
+|evaluateSystem| calculates $f(y^{**},y,y^*,u)$, where $y$ and $u$ are
+passed, or $f(y^{**}_{t+1}, y_t, y^*_{t-1}, u)$, where $y^{**}_{t+1}$,
+$y_t$, $y^*_{t-1}$, $u$ are passed. Finally, the method
+|calcDerivativesAtSteady()| calculates derivatives of $f$ at the
+current steady state, and zero shocks. The derivatives can be
+retrieved with |getModelDerivatives()|. All the derivatives are done
+up to a given order in the model, which can be retrieved by |order()|.
+
+The model initialization is done in a constructor of the implementing
+class. The constructor usually calls a parser, which parses a given
+file (usually a text file), and retrieves all necessary information
+about the model, inluding variables, partitioning, variance-covariance
+matrix, information helpful for calculation of the deterministic
+steady state, and so on.
+
+@<|DynamicModel| class declaration@>=
+class DynamicModel {
+public:@;
+	virtual DynamicModel* clone() const =0;
+	virtual ~DynamicModel() {}
+
+	virtual int nstat() const =0;
+	virtual int nboth() const =0;
+	virtual int npred() const =0;
+	virtual int nforw() const =0;
+	virtual int nexog() const =0;
+	virtual int order() const =0;
+	int numeq() const
+		{@+ return nstat()+nboth()+npred()+nforw(); @+}
+
+	virtual const NameList& getAllEndoNames() const =0;
+	virtual const NameList& getStateNames() const =0;
+	virtual const NameList& getExogNames() const =0;
+	virtual const TwoDMatrix& getVcov() const =0;
+	virtual const TensorContainer<FSSparseTensor>& getModelDerivatives() const =0;
+	virtual const Vector& getSteady() const =0;
+	virtual Vector& getSteady() =0;
+
+	virtual void solveDeterministicSteady() =0;
+	virtual void evaluateSystem(Vector& out, const Vector& yy, const Vector& xx) =0;
+	virtual void evaluateSystem(Vector& out, const Vector& yym, const Vector& yy,
+								const Vector& yyp, const Vector& xx) =0;
+	virtual void calcDerivativesAtSteady() =0;
+};
+
+
+@ End of {\tt dynamic\_model.h} file.
diff --git a/dynare++/kord/faa_di_bruno.cweb b/dynare++/kord/faa_di_bruno.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..4973798459c7d4b340abf1cdb4e84e6686b86a2b
--- /dev/null
+++ b/dynare++/kord/faa_di_bruno.cweb
@@ -0,0 +1,158 @@
+@q $Id: faa_di_bruno.cweb 744 2006-05-09 13:16:07Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ Start of {\tt faa\_di\_bruno.cpp} file.
+
+@s FoldedFineContainer int
+@s UnfoldedFineContainer int
+
+@c
+#include "faa_di_bruno.h"
+#include "fine_container.h"
+
+#include <math.h>
+
+double FaaDiBruno::magic_mult = 1.5;
+@<|FaaDiBruno::calculate| folded sparse code@>;
+@<|FaaDiBruno::calculate| folded dense code@>;
+@<|FaaDiBruno::calculate| unfolded sparse code@>;
+@<|FaaDiBruno::calculate| unfolded dense code@>;
+@<|FaaDiBruno::estimRefinment| code@>;
+
+@ We take an opportunity to refine the stack container to avoid
+allocation of more memory than available.
+
+@<|FaaDiBruno::calculate| folded sparse code@>=
+void FaaDiBruno::calculate(const StackContainer<FGSTensor>& cont,
+						   const TensorContainer<FSSparseTensor>& f,
+						   FGSTensor& out)
+{
+	out.zeros();
+	for (int l = 1; l <= out.dimen(); l++) {
+		int mem_mb, p_size_mb;
+		int max = estimRefinment(out.getDims(), out.nrows(), l, mem_mb, p_size_mb);
+		FoldedFineContainer fine_cont(cont, max);
+		fine_cont.multAndAdd(l, f, out);
+		JournalRecord recc(journal);
+		recc << "dim=" << l << " avmem=" << mem_mb << " tmpmem=" << p_size_mb << " max=" << max
+			 << " stacks=" << cont.numStacks() << "->" << fine_cont.numStacks() << endrec;
+	}
+}
+
+@ Here we just simply evaluate |multAndAdd| for the dense
+container. There is no opportunity for tuning.
+
+@<|FaaDiBruno::calculate| folded dense code@>=
+void FaaDiBruno::calculate(const FoldedStackContainer& cont, const FGSContainer& g,
+						   FGSTensor& out)
+{
+	out.zeros();
+	for (int l = 1; l <= out.dimen(); l++) {
+		long int mem = SystemResources::availableMemory();
+		cont.multAndAdd(l, g, out);
+		JournalRecord rec(journal);
+		int mem_mb = mem/1024/1024;
+		rec << "dim=" << l << " avmem=" << mem_mb << endrec;
+	}
+}
+
+@ This is the same as |@<|FaaDiBruno::calculate| folded sparse
+code@>|. The only difference is that we construct unfolded fine
+container.
+
+@<|FaaDiBruno::calculate| unfolded sparse code@>=
+void FaaDiBruno::calculate(const StackContainer<UGSTensor>& cont,
+						   const TensorContainer<FSSparseTensor>& f,
+						   UGSTensor& out)
+{
+	out.zeros();
+	for (int l = 1; l <= out.dimen(); l++) {
+		int mem_mb, p_size_mb;
+		int max = estimRefinment(out.getDims(), out.nrows(), l, mem_mb, p_size_mb);
+		UnfoldedFineContainer fine_cont(cont, max);
+		fine_cont.multAndAdd(l, f, out);
+		JournalRecord recc(journal);
+		recc << "dim=" << l << " avmem=" << mem_mb << " tmpmem=" << p_size_mb << " max=" << max
+			 << " stacks=" << cont.numStacks() << "->" << fine_cont.numStacks() << endrec;
+	}
+}
+
+@ Again, no tuning opportunity here.
+@<|FaaDiBruno::calculate| unfolded dense code@>=
+void FaaDiBruno::calculate(const UnfoldedStackContainer& cont, const UGSContainer& g,
+					   UGSTensor& out)
+{
+	out.zeros();
+	for (int l = 1; l <= out.dimen(); l++) {
+		long int mem = SystemResources::availableMemory();
+		cont.multAndAdd(l, g, out);
+		JournalRecord rec(journal);
+		int mem_mb = mem/1024/1024;
+		rec << "dim=" << l << " avmem=" << mem_mb << endrec;
+	}
+}
+
+@ This function returns a number of maximum rows used for refinement of
+the stacked container. We want to set the maximum so that the expected
+memory consumption for the number of paralel threads would be less
+than available memory. On the other hand we do not want to be too
+pesimistic since a very fine refinement can be very slow.
+
+Besides memory needed for a dense unfolded slice of a tensor from
+|f|, each thread needs |magic_mult*per_size| bytes of memory. In the
+worst case, |magic_mult| will be equal to 2, this means memory
+|per_size| for target temporary (permuted symmetry) tensor plus one
+copy for intermediate result. However, this shows to be too
+pesimistic, so we set |magic_mult| to 1.5. The memory for permuted
+symmetry temporary tensor |per_size| is estimated as a weigthed
+average of unfolded memory of the |out| tensor and unfolded memory of
+a symetric tensor with the largest coordinate size. Some experiments
+showed that the best combination of the two is to take 100\% if the
+latter, so we set |lambda| to zero.
+
+The |max| number of rows in the refined |cont| must be such that each
+slice fits to remaining memory. Number of columns of the slice are
+never greater $max^l$. (This is not true, since stacks corresponing to
+unit/zero matrices cannot be further refined). We get en equation:
+
+$$nthreads\cdot max^l\cdot 8\cdot r = mem - 
+magic\_mult\cdot nthreads\cdot per\_size\cdot 8\cdot r,$$
+where |mem| is available memory in bytes, |nthreads| is a number of
+threads, $r$ is a number of rows, and $8$ is |sizeof(double)|.
+
+If the right hand side is less than zero, we set |max| to 10, just to
+let it do something.
+
+@<|FaaDiBruno::estimRefinment| code@>=
+int FaaDiBruno::estimRefinment(const TensorDimens& tdims, int nr, int l,
+							   int& avmem_mb, int& tmpmem_mb)
+{
+	int nthreads = THREAD_GROUP::max_parallel_threads;
+	long int per_size1 = tdims.calcUnfoldMaxOffset();
+	long int per_size2 = (long int)pow((double)tdims.getNVS().getMax(), l);
+	double lambda = 0.0;
+	long int per_size = sizeof(double)*nr
+		*(long int)(lambda*per_size1+(1-lambda)*per_size2);
+	long int mem = SystemResources::availableMemory();
+	int max = 0;
+	double num_cols = ((double)(mem-magic_mult*nthreads*per_size))
+		/nthreads/sizeof(double)/nr;
+	if (num_cols > 0) {
+		double maxd = pow(num_cols, ((double)1)/l);
+		max = (int)floor(maxd);
+	}
+	if (max == 0) {
+		max = 10;
+		JournalRecord rec(journal);
+		rec << "dim=" << l << " run out of memory, imposing max=" << max;
+		if (nthreads > 1)
+			rec << " (decrease number of threads)";
+		rec << endrec;
+	}
+	avmem_mb = mem/1024/1024;
+	tmpmem_mb = (nthreads*per_size)/1024/1024;
+	return max;
+}
+
+
+@ End of {\tt faa\_di\_bruno.cpp} file.
diff --git a/dynare++/kord/faa_di_bruno.hweb b/dynare++/kord/faa_di_bruno.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..773ed0d263ab2df5aa927cec8e6176b6325138e5
--- /dev/null
+++ b/dynare++/kord/faa_di_bruno.hweb
@@ -0,0 +1,49 @@
+@q $Id: faa_di_bruno.hweb 744 2006-05-09 13:16:07Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Faa Di Bruno evaluator. Start of {\tt faa\_di\_bruno.h} file.
+
+This defines a class which implements Faa Di Bruno Formula
+$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_l}=\left[f_{z^l}\right]_{\beta_1\ldots\beta_l}
+\sum_{c\in M_{l,k}}\prod_{m=1}^l\left[z_{s^k(c_m)}\right]^{\beta_m}_{c_m(\alpha)}$$
+where $s^k$ is a general symmetry of dimension $k$ and $z$ is a stack of functions.
+
+@s FaaDiBruno int
+
+@c
+#ifndef FAA_DI_BRUNO_H
+#define FAA_DI_BRUNO_H
+
+#include "journal.h"
+#include "stack_container.h"
+#include "t_container.h"
+#include "sparse_tensor.h"
+#include "gs_tensor.h"
+
+@<|FaaDiBruno| class declaration@>;
+
+#endif
+
+@ Nothing special here. See |@<|FaaDiBruno::calculate| folded sparse
+code@>| for reason of having |magic_mult|.
+
+@<|FaaDiBruno| class declaration@>=
+class FaaDiBruno {
+	Journal& journal;
+public:@;
+	FaaDiBruno(Journal& jr)
+		: journal(jr)@+ {}
+	void calculate(const StackContainer<FGSTensor>& cont, const TensorContainer<FSSparseTensor>& f,
+				   FGSTensor& out);
+	void calculate(const FoldedStackContainer& cont, const FGSContainer& g,
+				   FGSTensor& out);
+	void calculate(const StackContainer<UGSTensor>& cont, const TensorContainer<FSSparseTensor>& f,
+				   UGSTensor& out);
+	void calculate(const UnfoldedStackContainer& cont, const UGSContainer& g,
+				   UGSTensor& out);
+protected:@;
+	int estimRefinment(const TensorDimens& tdims, int nr, int l, int& avmem_mb, int& tmpmem_mb);
+	static double magic_mult;
+};
+
+@ End of {\tt faa\_di\_bruno.h} file.
diff --git a/dynare++/kord/first_order.cweb b/dynare++/kord/first_order.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..40deafe55c2541cc8ee63ada25c18ef3ca3ff648
--- /dev/null
+++ b/dynare++/kord/first_order.cweb
@@ -0,0 +1,302 @@
+@q $Id: first_order.cweb 2351 2009-09-03 14:58:03Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt first\_order.cpp} file.
+
+@c
+
+#include "kord_exception.h"
+#include "first_order.h"
+#include "cpplapack.h"
+
+double qz_criterium = 1.000001;
+@<|order_eigs| function code@>;
+@<|FirstOrder::solve| code@>;
+@<|FirstOrder::journalEigs| code@>;
+
+@ This is a function which selects the eigenvalues pair used by
+|LAPACK_dgges|. See documentation to DGGES for details. Here we want
+to select (return true) the pairs for which $\alpha<\beta$.
+
+@<|order_eigs| function code@>=
+int order_eigs(const double* alphar, const double* alphai, const double* beta)
+{
+	return (*alphar * *alphar + *alphai * *alphai < *beta * *beta * qz_criterium);
+}
+
+
+@ Here we solve the linear approximation. The result are the matrices
+$g_{y^*}$ and $g_u$. The method solves the first derivatives of $g$ so
+that the following equation would be true:
+$$E_t[F(y^*_{t-1},u_t,u_{t+1},\sigma)] =
+E_t[f(g^{**}(g^*(y_{t-1}^*,u_t,\sigma), u_{t+1}, \sigma), g(y_{t-1}^*,u_t,\sigma),
+y^*_{t-1},u_t)]=0$$
+where $f$ is a given system of equations.
+
+It is known that $g_{y^*}$ is given by $F_{y^*}=0$, $g_u$ is given by
+$F_u=0$, and $g_\sigma$ is zero. The only input to the method are the
+derivatives |fd| of the system $f$, and partitioning of the vector $y$
+(from object).
+
+@<|FirstOrder::solve| code@>=
+void FirstOrder::solve(const TwoDMatrix& fd)
+{
+	JournalRecordPair pa(journal);
+	pa << "Recovering first order derivatives " << endrec;
+
+	::qz_criterium = FirstOrder::qz_criterium;
+
+	@<solve derivatives |gy|@>;
+	@<solve derivatives |gu|@>;
+	journalEigs();
+
+	if (! gy.isFinite() || ! gu.isFinite()) {
+		throw KordException(__FILE__, __LINE__,
+						  "NaN or Inf asserted in first order derivatives in FirstOrder::solve");
+	}
+}
+
+@ The derivatives $g_{y^*}$ are retrieved from the equation
+$F_{y^*}=0$. The calculation proceeds as follows:
+
+\orderedlist
+
+\li For each variable appearing at both $t-1$ and $t-1$ we add a dummy
+variable, so that the predetermined variables and forward looking would
+be disjoint. This is, the matrix of the first derivatives of the
+system written as:
+$$\left[\matrix{f_{y^{**}_+}&f_{ys}&f_{yp}&f_{yb}&f_{yf}&f_{y^*_-}}\right],$$
+where $f_{ys}$, $f_{yp}$, $f_{yb}$, and $f_{yf}$ are derivatives wrt
+static, predetermined, both, forward looking at time $t$, is rewritten
+to the matrix:
+$$\left[
+\matrix{f_{y^{**}_+}&f_{ys}&f_{yp}&f_{yb}&0&f_{yf}&f_{y^*_-}\cr
+        0           &0     &0     &I   &-I&0    &0}
+ \right],$$
+where the second line has number of rows equal to the number of both variables.
+
+\li Next, provided that forward looking and predetermined are
+disjoint, the equation $F_{y^*}=0$ is written as:
+$$\left[f_+{y^{**}_+}\right]\left[g_{y^*}^{**}\right]\left[g_{y^*}^*\right]
++\left[f_{ys}\right]\left[g^s_{y^*}\right]
++\left[f_{y^*}\right]\left[g^*_{y^*}\right]
++\left[f_{y^{**}}\right]\left[g^{**}_{y^*}\right]+\left[f_{y^*_-}\right]=0$$
+This is rewritten as
+$$\left[\matrix{f_{y^*}&0&f_{y^{**}_+}}\right]
+\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]\left[g_{y^*}^*\right]+ 
+\left[\matrix{f_{y^*_-}&f_{ys}&f_{y^{**}}}\right]
+\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]=0
+$$
+Now, in the above equation, there are the auxiliary variables standing
+for copies of both variables at time $t+1$. This equation is then
+rewritten as:
+$$
+\left[\matrix{f_{yp}&f_{yb}&0&f_{y^{**}_+}\cr 0&I&0&0}\right]
+\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]\left[g_{y^*}^*\right]+ 
+\left[\matrix{f_{y^*_-}&f_{ys}&0&f_{yf}\cr 0&0&-I&0}\right]
+\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]=0
+$$
+The two matrices are denoted as $D$ and $-E$, so the equation takes the form:
+$$D\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]\left[g_{y^*}^*\right]=
+E\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]$$
+
+\li Next we solve the equation by Generalized Schur decomposition:
+$$
+\left[\matrix{T_{11}&T_{12}\cr 0&T_{22}}\right]
+\left[\matrix{Z_{11}^T&Z_{21}^T\cr Z_{12}^T&Z_{22}^T}\right]
+\left[\matrix{I\cr X}\right]\left[g_{y^*}^*\right]=
+\left[\matrix{S_{11}&S_{12}\cr 0&S_{22}}\right]
+\left[\matrix{Z_{11}^T&Z_{21}^T\cr Z_{12}^T&Z_{22}^T}\right]
+\left[\matrix{I\cr X}\right]
+$$
+We reorder the eigenvalue pair so that $S_{ii}/T_{ii}$ with modulus
+less than one would be in the left-upper part.
+
+\li The Blachard--Kahn stability argument implies that the pairs
+with modulus less that one will be in and only int $S_{11}/T_{11}$.
+The exploding paths will be then eliminated when
+$$
+\left[\matrix{Z_{11}^T&Z_{21}^T\cr Z_{12}^T&Z_{22}^T}\right]
+\left[\matrix{I\cr X}\right]\left[g_{y^*}^*\right]=
+\left[\matrix{Y\cr 0}\right]
+$$
+From this we have, $Y=Z_{11}^{-1}$, and $X=Z_{21}Y$, or equivalently
+$X=-Z_{22}^{-T}Z_{12}^T$.  From the equation, we get
+$\left[g_{y^*}^*\right]=Y^{-1}T_{11}^{-1}S_{11}Y$, which is
+$Z_{11}T_{11}^{-1}S_{11}Z_{11}^{-1}$.
+
+\li We then copy the derivatives to storage |gy|. Note that the
+derivatives of both variables are in $X$ and in
+$\left[g_{y^*}^*\right]$, so we check whether the two submatrices are
+the same. The difference is only numerical error.
+
+\endorderedlist
+
+@<solve derivatives |gy|@>=
+	@<setup submatrices of |f|@>;
+	@<form matrix $D$@>;
+	@<form matrix $E$@>;
+	@<solve generalized Schur@>;
+	@<make submatrices of right space@>;
+	@<calculate derivatives of static and forward@>;
+	@<calculate derivatives of predetermined@>;
+	@<copy derivatives to |gy|@>;
+	@<check difference for derivatives of both@>;
+
+
+@ Here we setup submatrices of the derivatives |fd|.
+@<setup submatrices of |f|@>=
+	int off = 0;
+	ConstTwoDMatrix fyplus(fd, off, ypart.nyss());
+	off += ypart.nyss();
+	ConstTwoDMatrix fyszero(fd, off, ypart.nstat);
+	off += ypart.nstat;
+	ConstTwoDMatrix fypzero(fd, off, ypart.npred);
+	off += ypart.npred;
+	ConstTwoDMatrix fybzero(fd, off, ypart.nboth);
+	off += ypart.nboth;
+	ConstTwoDMatrix fyfzero(fd, off, ypart.nforw);
+	off += ypart.nforw;
+	ConstTwoDMatrix fymins(fd, off, ypart.nys());
+	off += ypart.nys();
+	ConstTwoDMatrix fuzero(fd, off, nu);
+	off += nu;
+
+@ 
+@<form matrix $D$@>=
+	int n = ypart.ny()+ypart.nboth;
+	TwoDMatrix matD(n, n);
+	matD.zeros();
+	matD.place(fypzero, 0, 0);
+	matD.place(fybzero, 0, ypart.npred);
+	matD.place(fyplus, 0, ypart.nys()+ypart.nstat);
+	for (int i = 0; i < ypart.nboth; i++)
+		matD.get(ypart.ny()+i, ypart.npred+i) = 1.0;
+
+@ 
+@<form matrix $E$@>=
+	TwoDMatrix matE(n, n);
+	matE.zeros();
+	matE.place(fymins, 0, 0);
+	matE.place(fyszero, 0, ypart.nys());
+	matE.place(fyfzero, 0, ypart.nys()+ypart.nstat+ypart.nboth);
+	for (int i = 0; i < ypart.nboth; i++)
+		matE.get(ypart.ny()+i, ypart.nys()+ypart.nstat+i) = -1.0;
+	matE.mult(-1.0);
+
+@ 
+@<solve generalized Schur@>=
+	TwoDMatrix vsl(n, n);
+	TwoDMatrix vsr(n, n);
+	int lwork = 100*n+16;
+	Vector work(lwork);
+	IntSequence bwork(n);
+	int info;
+	LAPACK_dgges("N", "V", "S", order_eigs, &n, matE.getData().base(), &n,
+				 matD.getData().base(), &n, &sdim, alphar.base(), alphai.base(),
+				 beta.base(), vsl.getData().base(), &n, vsr.getData().base(), &n,
+				 work.base(), &lwork, &(bwork[0]), &info);
+	bk_cond = (sdim == ypart.nys());
+
+
+@ Here we setup submatrices of the matrix $Z$.
+@<make submatrices of right space@>=
+	ConstGeneralMatrix z11(vsr, 0, 0, ypart.nys(), ypart.nys());
+	ConstGeneralMatrix z12(vsr, 0, ypart.nys(), ypart.nys(), n-ypart.nys());
+	ConstGeneralMatrix z21(vsr, ypart.nys(), 0, n-ypart.nys(), ypart.nys());
+	ConstGeneralMatrix z22(vsr, ypart.nys(), ypart.nys(), n-ypart.nys(), n-ypart.nys());
+	
+@ Here we calculate $X=-Z_{22}^{-T}Z_{12}^T$, where $X$ is |sfder| in the code.
+@<calculate derivatives of static and forward@>=
+	GeneralMatrix sfder(z12, "transpose");
+	z22.multInvLeftTrans(sfder);
+	sfder.mult(-1);
+
+@ Here we calculate
+$g_{y^*}^*=Z_{11}T^{-1}_{11}S_{11}Z_{11}^{-1}
+=Z_{11}T^{-1}_{11}(Z_{11}^{-T}S^T_{11})^T$.
+
+@<calculate derivatives of predetermined@>=
+    ConstGeneralMatrix s11(matE, 0, 0, ypart.nys(), ypart.nys());
+	ConstGeneralMatrix t11(matD, 0, 0, ypart.nys(), ypart.nys());
+	GeneralMatrix dumm(s11, "transpose");
+	z11.multInvLeftTrans(dumm);
+	GeneralMatrix preder(dumm, "transpose");
+	t11.multInvLeft(preder);
+	preder.multLeft(z11);
+
+@ 
+@<copy derivatives to |gy|@>=
+	gy.place(preder, ypart.nstat, 0);
+	GeneralMatrix sder(sfder, 0, 0, ypart.nstat, ypart.nys());
+	gy.place(sder, 0, 0);
+	GeneralMatrix fder(sfder, ypart.nstat+ypart.nboth, 0, ypart.nforw, ypart.nys());
+	gy.place(fder, ypart.nstat+ypart.nys(), 0);
+
+@ 
+@<check difference for derivatives of both@>=
+	GeneralMatrix bder((const GeneralMatrix&)sfder, ypart.nstat, 0, ypart.nboth, ypart.nys());
+	GeneralMatrix bder2(preder, ypart.npred, 0, ypart.nboth, ypart.nys());
+	bder.add(-1, bder2);
+	b_error = bder.getData().getMax();
+
+@ The equation $F_u=0$ can be written as
+$$
+\left[f_{y^{**}_+}\right]\left[g^{**}_{y^*}\right]\left[g_u^*\right]+
+\left[f_y\right]\left[g_u\right]+\left[f_u\right]=0
+$$
+and rewritten as
+$$
+\left[f_y +
+\left[\matrix{0&f_{y^{**}_+}g^{**}_{y^*}&0}\right]\right]g_u=f_u
+$$
+This is exactly done here. The matrix
+$\left[f_y +\left[\matrix{0&f_{y^{**}_+}g^{**}_{y^*}&0}\right]\right]$ is |matA|
+in the code.
+
+@<solve derivatives |gu|@>=
+	GeneralMatrix matA(ypart.ny(), ypart.ny());
+	matA.zeros();
+	ConstGeneralMatrix gss(gy, ypart.nstat+ypart.npred, 0, ypart.nyss(), ypart.nys());
+	GeneralMatrix aux(fyplus, gss);
+	matA.place(aux, 0, ypart.nstat);
+	ConstGeneralMatrix fyzero(fd, 0, ypart.nyss(), ypart.ny(), ypart.ny());
+	matA.add(1.0, fyzero);
+	gu.zeros();
+	gu.add(-1.0, fuzero);
+	ConstGeneralMatrix(matA).multInvLeft(gu);
+
+@ 
+@<|FirstOrder::journalEigs| code@>=
+void FirstOrder::journalEigs()
+{
+	if (bk_cond) {
+		JournalRecord jr(journal);
+		jr << "Blanchard-Kahn conditition satisfied, model stable" << endrec;
+	} else {
+		JournalRecord jr(journal);
+		jr << "Blanchard-Kahn condition not satisfied, model not stable: sdim=" << sdim 
+		   << " " << "npred=" << ypart.nys() << endrec;
+	}
+	if (!bk_cond) {
+		for (int i = 0; i < alphar.length(); i++) {
+			if (i == sdim || i == ypart.nys()) {
+				JournalRecord jr(journal);
+				jr << "---------------------------------------------------- ";
+				if (i == sdim)
+					jr << "sdim";
+				else
+					jr << "npred";
+				jr << endrec;
+			}
+			JournalRecord jr(journal);
+			double mod = sqrt(alphar[i]*alphar[i]+alphai[i]*alphai[i]);
+			mod = mod/round(100000*std::abs(beta[i]))*100000;
+			jr << i << "\t(" << alphar[i] << "," << alphai[i] << ") / " << beta[i]
+			   << "  \t" << mod << endrec; 
+		}
+	}
+}
+
+
+@ End of {\tt first\_order.cpp} file.
diff --git a/dynare++/kord/first_order.hweb b/dynare++/kord/first_order.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..3b5e78ca7909382b968503f425bfa783eca13c85
--- /dev/null
+++ b/dynare++/kord/first_order.hweb
@@ -0,0 +1,85 @@
+@q $Id: first_order.hweb 2345 2009-03-24 11:50:48Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 First order at deterministic steady. Start of {\tt first\_order.h} file.
+
+@s GeneralMatrix int
+@s ConstGeneralMatrix int
+@s FirstOrder int
+@s FirstOrderDerivs int
+@c
+
+#ifndef FIRST_ORDER_H
+#define FIRST_ORDER_H
+
+#include "korder.h"
+
+@<|FirstOrder| class declaration@>;
+@<|FirstOrderDerivs| class declaration@>;
+
+#endif
+
+@ 
+@<|FirstOrder| class declaration@>=
+template<int> class FirstOrderDerivs;
+class FirstOrder {
+	template <int> friend class FirstOrderDerivs;
+	PartitionY ypart;
+	int nu;
+	TwoDMatrix gy;
+	TwoDMatrix gu;
+	bool bk_cond;
+	double b_error;
+	int sdim;
+	Vector alphar;
+	Vector alphai;
+	Vector beta;
+	double qz_criterium;
+	Journal& journal;
+public:@;
+	FirstOrder(int num_stat, int num_pred, int num_both, int num_forw,
+			   int num_u, const FSSparseTensor& f, Journal& jr, double qz_crit)
+		: ypart(num_stat, num_pred, num_both, num_forw),
+		  nu(num_u),
+		  gy(ypart.ny(), ypart.nys()),
+		  gu(ypart.ny(), nu),
+		  alphar(ypart.ny()+ypart.nboth),
+		  alphai(ypart.ny()+ypart.nboth),
+		  beta(ypart.ny()+ypart.nboth),
+		  qz_criterium(qz_crit),
+		  journal(jr)
+		{@+ solve(FFSTensor(f)); @+}
+	bool isStable() const
+		{@+ return bk_cond;@+}
+	const TwoDMatrix& getGy() const
+		{@+ return gy;@+}
+	const TwoDMatrix& getGu() const
+		{@+ return gu;@+}
+protected:@;
+	void solve(const TwoDMatrix& f);
+	void journalEigs();
+};
+
+@ This class only converts the derivatives $g_{y^*}$ and $g_u$ to a
+folded or unfolded container.
+
+@<|FirstOrderDerivs| class declaration@>=
+template <int t>
+class FirstOrderDerivs : public ctraits<t>::Tg {
+public:@;
+	FirstOrderDerivs(const FirstOrder& fo)
+		: ctraits<t>::Tg(4)
+		{
+			IntSequence nvs(4);
+			nvs[0] = fo.ypart.nys(); nvs[1] = fo.nu; nvs[2] = fo.nu; nvs[3] = 1;
+			_Ttensor* ten = new _Ttensor(fo.ypart.ny(), TensorDimens(Symmetry(1,0,0,0),nvs));
+			ten->zeros(); ten->add(1.0, fo.gy);
+			insert(ten);
+			ten = new _Ttensor(fo.ypart.ny(), TensorDimens(Symmetry(0,1,0,0), nvs));
+			ten->zeros(); ten->add(1.0, fo.gu);
+			insert(ten);
+		}
+};
+
+
+@ End of {\tt first\_order.h} file.
diff --git a/dynare++/kord/global_check.cweb b/dynare++/kord/global_check.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..299ddbb65b617190e1df77c55dcbfec0d7b80464
--- /dev/null
+++ b/dynare++/kord/global_check.cweb
@@ -0,0 +1,443 @@
+@q $Id: global_check.cweb 1830 2008-05-18 20:06:40Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ Start of {\tt global\_check.cpp} file.
+
+@c
+#include "SymSchurDecomp.h"
+
+#include "global_check.h"
+
+#include "smolyak.h"
+#include "product.h"
+#include "quasi_mcarlo.h"
+
+#include "cpplapack.h"
+
+@<|ResidFunction| constructor code@>;
+@<|ResidFunction| copy constructor code@>;
+@<|ResidFunction| destructor code@>;
+@<|ResidFunction::setYU| code@>;
+@<|ResidFunction::eval| code@>;
+@<|GlobalChecker::check| vector code@>;
+@<|GlobalChecker::check| matrix code@>;
+@<|GlobalChecker::checkAlongShocksAndSave| code@>;
+@<|GlobalChecker::checkOnEllipseAndSave| code@>;
+@<|GlobalChecker::checkAlongSimulationAndSave| code@>;
+
+@ Here we just set a reference to the approximation, and create a new
+|DynamicModel|.
+
+@<|ResidFunction| constructor code@>=
+ResidFunction::ResidFunction(const Approximation& app)
+	: VectorFunction(app.getModel().nexog(), app.getModel().numeq()), approx(app),
+	  model(app.getModel().clone()),
+	  yplus(NULL), ystar(NULL), u(NULL), hss(NULL)
+{
+}
+
+@ 
+@<|ResidFunction| copy constructor code@>=
+ResidFunction::ResidFunction(const ResidFunction& rf)
+	: VectorFunction(rf), approx(rf.approx), model(rf.model->clone()),
+	  yplus(NULL), ystar(NULL), u(NULL), hss(NULL)
+{
+	if (rf.yplus)
+		yplus = new Vector(*(rf.yplus));
+	if (rf.ystar)
+		ystar = new Vector(*(rf.ystar));
+	if (rf.u)
+		u = new Vector(*(rf.u));
+	if (rf.hss)
+		hss = new FTensorPolynomial(*(rf.hss));
+}
+
+
+@ 
+@<|ResidFunction| destructor code@>=
+ResidFunction::~ResidFunction()
+{		
+	delete model;
+	@<delete |y| and |u| dependent data@>;
+}
+
+@ 
+@<delete |y| and |u| dependent data@>=
+	if (yplus)
+		delete yplus;
+	if (ystar)
+		delete ystar;
+	if (u)
+		delete u;
+	if (hss)
+		delete hss;
+
+
+@ This sets $y^*$ and $u$. We have to create |ystar|, |u|, |yplus| and
+|hss|.
+
+@<|ResidFunction::setYU| code@>=
+void ResidFunction::setYU(const Vector& ys, const Vector& xx)
+{
+	@<delete |y| and |u| dependent data@>;
+
+	ystar = new Vector(ys);
+	u = new Vector(xx);
+	yplus = new Vector(model->numeq());
+	approx.getFoldDecisionRule().evaluate(DecisionRule::horner,
+										  *yplus, *ystar, *u);
+
+	@<make a tensor polynomial of in-place subtensors from decision rule@>;
+	@<make |ytmp_star| be a difference of |yplus| from steady@>;
+	@<make |hss| and add steady to it@>;
+}
+
+@ Here we use a dirty tricky of converting |const| to non-|const| to
+obtain a polynomial of subtensor corresponding to non-predetermined
+variables. However, this new non-|const| polynomial will be used for a
+construction of |hss| and will be used in |const| context. So this
+dirty thing is safe.
+
+Note, that there is always a folded decision rule in |Approximation|.
+
+@<make a tensor polynomial of in-place subtensors from decision rule@>=
+	union {const FoldDecisionRule* c; FoldDecisionRule* n;} dr;
+	dr.c = &(approx.getFoldDecisionRule());
+	FTensorPolynomial dr_ss(model->nstat()+model->npred(), model->nboth()+model->nforw(),
+							*(dr.n));
+
+@ 
+@<make |ytmp_star| be a difference of |yplus| from steady@>=
+	Vector ytmp_star(ConstVector(*yplus, model->nstat(), model->npred()+model->nboth()));
+	ConstVector ysteady_star(dr.c->getSteady(), model->nstat(),
+							 model->npred()+model->nboth());
+	ytmp_star.add(-1.0, ysteady_star);
+
+@ Here is the |const| context of |dr_ss|.
+@<make |hss| and add steady to it@>=
+	hss = new FTensorPolynomial(dr_ss, ytmp_star);
+	ConstVector ysteady_ss(dr.c->getSteady(), model->nstat()+model->npred(),
+						   model->nboth()+model->nforw());
+	if (hss->check(Symmetry(0))) {
+		hss->get(Symmetry(0))->getData().add(1.0, ysteady_ss);
+	} else {
+		FFSTensor* ten = new FFSTensor(hss->nrows(), hss->nvars(), 0);
+		ten->getData() = ysteady_ss;
+		hss->insert(ten);
+	}
+
+@ Here we evaluate the residual $F(y^*,u,u')$. We have to evaluate |hss|
+for $u'=$|point| and then we evaluate the system $f$.
+
+@<|ResidFunction::eval| code@>=
+void ResidFunction::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
+{
+	KORD_RAISE_IF(point.length() != hss->nvars(),
+				  "Wrong dimension of input vector in ResidFunction::eval");
+	KORD_RAISE_IF(out.length() != model->numeq(),
+				  "Wrong dimension of output vector in ResidFunction::eval");
+	Vector yss(hss->nrows());
+	hss->evalHorner(yss, point);
+	model->evaluateSystem(out, *ystar, *yplus, yss, *u);
+}
+
+@ This checks the $E[F(y^*,u,u')]$ for a given $y^*$ and $u$ by
+integrating with a given quadrature. Note that the input |ys| is $y^*$
+not whole $y$.
+
+@<|GlobalChecker::check| vector code@>=
+void GlobalChecker::check(const Quadrature& quad, int level,
+						  const ConstVector& ys, const ConstVector& x, Vector& out)
+{
+	for (int ifunc = 0; ifunc < vfs.getNum(); ifunc++)
+		((GResidFunction&)(vfs.getFunc(ifunc))).setYU(ys, x);
+	quad.integrate(vfs, level, out);
+}
+
+@ This method is a bulk version of |@<|GlobalChecker::check| vector
+code@>|. It decides between Smolyak and product quadrature according
+to |max_evals| constraint.
+
+Note that |y| can be either full (all endogenous variables including
+static and forward looking), or just $y^*$ (state variables). The
+method is able to recognize it.
+
+@<|GlobalChecker::check| matrix code@>=
+void GlobalChecker::check(int max_evals, const ConstTwoDMatrix& y,
+						  const ConstTwoDMatrix& x, TwoDMatrix& out)
+{
+	JournalRecordPair pa(journal);
+	pa << "Checking approximation error for " << y.ncols()
+	   << " states with at most " << max_evals << " evaluations" << endrec;
+
+	@<decide about type of quadrature@>;
+	Quadrature* quad;
+	int lev;
+	@<create the quadrature and report the decision@>;
+	@<check all column of |y| and |x|@>;
+	delete quad;
+}
+
+@ 
+@<decide about type of quadrature@>=
+	GaussHermite gh;
+
+	SmolyakQuadrature dummy_sq(model.nexog(), 1, gh);
+	int smol_evals;
+	int smol_level;
+	dummy_sq.designLevelForEvals(max_evals, smol_level, smol_evals);
+
+	ProductQuadrature dummy_pq(model.nexog(), gh);
+	int prod_evals;
+	int prod_level;
+	dummy_pq.designLevelForEvals(max_evals, prod_level, prod_evals);
+
+	bool take_smolyak = (smol_evals < prod_evals) && (smol_level >= prod_level-1);
+
+@ 
+@<create the quadrature and report the decision@>=
+	if (take_smolyak) {
+		quad = new SmolyakQuadrature(model.nexog(), smol_level, gh);
+		lev = smol_level;
+		JournalRecord rec(journal);
+		rec << "Selected Smolyak (level,evals)=(" << smol_level << ","
+			<< smol_evals << ") over product (" << prod_level << ","
+			<< prod_evals << ")" << endrec;
+	} else {
+		quad = new ProductQuadrature(model.nexog(), gh);
+		lev = prod_level;
+		JournalRecord rec(journal);
+		rec << "Selected product (level,evals)=(" << prod_level << ","
+			<< prod_evals << ") over Smolyak (" << smol_level << ","
+			<< smol_evals << ")" << endrec;
+	}
+
+@ 
+@<check all column of |y| and |x|@>=
+	int first_row = (y.nrows() == model.numeq())? model.nstat() : 0;
+	ConstTwoDMatrix ysmat(y, first_row, 0, model.npred()+model.nboth(), y.ncols());
+	for (int j = 0; j < y.ncols(); j++) {
+		ConstVector yj(ysmat, j);
+		ConstVector xj(x, j);
+		Vector outj(out, j);
+		check(*quad, lev, yj, xj, outj);
+	}
+
+
+
+@ This method checks an error of the approximation by evaluating
+residual $E[F(y^*,u,u')\vert y^*,u]$ for $y^*$ being the steady state, and
+changing $u$. We go through all elements of $u$ and vary them from
+$-mult\cdot\sigma$ to $mult\cdot\sigma$ in |m| steps.
+
+@<|GlobalChecker::checkAlongShocksAndSave| code@>=
+void GlobalChecker::checkAlongShocksAndSave(FILE* fd, const char* prefix,
+											int m, double mult, int max_evals)
+{
+	JournalRecordPair pa(journal);
+	pa << "Calculating errors along shocks +/- "
+	   << mult << " std errors, granularity " << m << endrec;
+	@<setup |y_mat| of steady states for checking@>;
+	@<setup |exo_mat| for checking@>;
+
+	TwoDMatrix errors(model.numeq(), 2*m*model.nexog()+1);
+	check(max_evals, y_mat, exo_mat, errors);
+
+	@<report errors along shock and save them@>;
+}
+
+@ 
+@<setup |y_mat| of steady states for checking@>=
+	TwoDMatrix y_mat(model.numeq(), 2*m*model.nexog()+1);
+	for (int j = 0; j < 2*m*model.nexog()+1; j++) {
+		Vector yj(y_mat, j);
+		yj = (const Vector&)model.getSteady();
+	}
+
+@ 
+@<setup |exo_mat| for checking@>=
+	TwoDMatrix exo_mat(model.nexog(), 2*m*model.nexog()+1);
+	exo_mat.zeros();
+	for (int ishock = 0; ishock < model.nexog(); ishock++) {
+		double max_sigma = sqrt(model.getVcov().get(ishock,ishock));
+		for (int j = 0; j < 2*m; j++) {
+			int jmult = (j < m)? j-m: j-m+1;
+			exo_mat.get(ishock, 1+2*m*ishock+j) = 
+				mult*jmult*max_sigma/m;
+		}
+	}
+
+@ 
+@<report errors along shock and save them@>=
+	TwoDMatrix res(model.nexog(), 2*m+1);
+	JournalRecord rec(journal);
+	rec << "Shock    value         error" << endrec;
+	ConstVector err0(errors,0);
+	char shock[9];
+	char erbuf[17];
+	for (int ishock = 0; ishock < model.nexog(); ishock++) {
+		TwoDMatrix err_out(model.numeq(), 2*m+1);
+		sprintf(shock, "%-8s", model.getExogNames().getName(ishock));
+		for (int j = 0; j < 2*m+1; j++) {
+			int jj;
+			Vector error(err_out, j);
+			if (j != m) {
+				if (j < m)
+					jj = 1 + 2*m*ishock+j;
+				else
+					jj = 1 + 2*m*ishock+j-1;
+				ConstVector coljj(errors,jj);
+				error = coljj;
+			} else {
+				jj = 0;
+				error = err0;
+			}
+			JournalRecord rec1(journal);
+			sprintf(erbuf,"%12.7g    ", error.getMax());
+			rec1 << shock << " " << exo_mat.get(ishock,jj)
+				<< "\t" << erbuf << endrec;
+		}
+		char tmp[100];
+		sprintf(tmp, "%s_shock_%s_errors", prefix, model.getExogNames().getName(ishock));
+		err_out.writeMat4(fd, tmp);
+	}
+
+
+@ This method checks errors on ellipse of endogenous states
+(predetermined variables). The ellipse is shaped according to
+covariance matrix of endogenous variables based on the first order
+approximation and scaled by |mult|. The points on the
+ellipse are chosen as polar images of the low discrepancy grid in a
+cube.
+
+The method works as follows: First we calculate symmetric Schur factor of
+covariance matrix of the states. Second we generate low discrepancy
+points on the unit sphere. Third we transform the sphere with the
+variance-covariance matrix factor and multiplier |mult| and initialize
+matrix of $u_t$ to zeros. Fourth we run the |check| method and save
+the results.
+
+@<|GlobalChecker::checkOnEllipseAndSave| code@>=
+void GlobalChecker::checkOnEllipseAndSave(FILE* fd, const char* prefix,
+										  int m, double mult, int max_evals)
+{
+	JournalRecordPair pa(journal);
+	pa << "Calculating errors at " << m
+	   << " ellipse points scaled by " << mult << endrec; 
+	@<make factor of covariance of variables@>;
+	@<put low discrepancy sphere points to |ymat|@>;
+	@<transform sphere |ymat| and prepare |umat| for checking@>;
+	@<check on ellipse and save@>;
+}
+
+
+@ Here we set |ycovfac| to the symmetric Schur decomposition factor of
+a submatrix of covariances of all endogenous variables. The submatrix
+corresponds to state variables (predetermined plus both).
+
+@<make factor of covariance of variables@>=
+	TwoDMatrix* ycov = approx.calcYCov();
+	TwoDMatrix ycovpred((const TwoDMatrix&)*ycov, model.nstat(), model.nstat(),
+						model.npred()+model.nboth(), model.npred()+model.nboth());
+	delete ycov;
+	SymSchurDecomp ssd(ycovpred);
+	ssd.correctDefinitness(1.e-05);
+	TwoDMatrix ycovfac(ycovpred.nrows(), ycovpred.ncols());
+	KORD_RAISE_IF(! ssd.isPositiveSemidefinite(),
+				  "Covariance matrix of the states not positive \
+				  semidefinite in GlobalChecker::checkOnEllipseAndSave");
+	ssd.getFactor(ycovfac);
+
+
+@ Here we first calculate dimension |d| of the sphere, which is a
+number of state variables minus one. We go through the |d|-dimensional
+cube $\langle 0,1\rangle^d$ by |QMCarloCubeQuadrature| and make a
+polar transformation to the sphere. The polar transformation $f^i$ can
+be written recursively wrt. the dimension $i$ as:
+$$\eqalign{
+f^0() &= \left[1\right]\cr
+f^i(x_1,\ldots,x_i) &=
+\left[\matrix{cos(2\pi x_i)\cdot f^{i-1}(x_1,\ldots,x_{i-1})\cr sin(2\pi x_i)}\right]
+}$$
+
+@<put low discrepancy sphere points to |ymat|@>=
+   	int d = model.npred()+model.nboth()-1;
+	TwoDMatrix ymat(model.npred()+model.nboth(), (d==0)? 2:m);
+	if (d == 0) {
+		ymat.get(0,0) = 1;
+		ymat.get(0,1) = -1;
+	} else {
+		int icol = 0;
+		ReversePerScheme ps;
+		QMCarloCubeQuadrature qmc(d, m, ps);
+		qmcpit beg = qmc.start(m);
+		qmcpit end = qmc.end(m);
+		for (qmcpit run = beg; run != end; ++run, icol++) {
+			Vector ycol(ymat, icol);
+			Vector x(run.point());
+			x.mult(2*M_PI);
+			ycol[0] = 1;
+			for (int i = 0; i < d; i++) {
+				Vector subsphere(ycol, 0, i+1);
+				subsphere.mult(cos(x[i]));
+				ycol[i+1] = sin(x[i]);
+			}
+		}
+	}
+
+@ Here we multiply the sphere points in |ymat| with the Cholesky
+factor to obtain the ellipse, scale the ellipse by the given |mult|,
+and initialize matrix of shocks |umat| to zero.
+
+@<transform sphere |ymat| and prepare |umat| for checking@>=
+	TwoDMatrix umat(model.nexog(), ymat.ncols());
+	umat.zeros();
+	ymat.mult(mult);
+	ymat.multLeft(ycovfac);
+	ConstVector ys(model.getSteady(), model.nstat(),
+				   model.npred()+model.nboth());
+	for (int icol = 0; icol < ymat.ncols(); icol++) {
+		Vector ycol(ymat, icol);
+		ycol.add(1.0, ys);
+	}
+
+@ Here we check the points and save the results to MAT-4 file.
+@<check on ellipse and save@>=
+	TwoDMatrix out(model.numeq(), ymat.ncols());
+	check(max_evals, ymat, umat, out);
+
+	char tmp[100];
+	sprintf(tmp, "%s_ellipse_points", prefix);
+	ymat.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_ellipse_errors", prefix);
+	out.writeMat4(fd, tmp);
+
+@ Here we check the errors along a simulation. We simulate, then set
+|x| to zeros, check and save results.
+
+@<|GlobalChecker::checkAlongSimulationAndSave| code@>=
+void GlobalChecker::checkAlongSimulationAndSave(FILE* fd, const char* prefix,
+												int m, int max_evals)
+{
+	JournalRecordPair pa(journal);
+	pa << "Calculating errors at " << m
+	   << " simulated points" << endrec; 
+	RandomShockRealization sr(model.getVcov(), system_random_generator.int_uniform());
+	TwoDMatrix* y = approx.getFoldDecisionRule().simulate(DecisionRule::horner,
+														  m, model.getSteady(), sr);
+	TwoDMatrix x(model.nexog(), m);
+	x.zeros();
+	TwoDMatrix out(model.numeq(), m);
+	check(max_evals, *y, x, out);
+
+	char tmp[100];
+	sprintf(tmp, "%s_simul_points", prefix);
+	y->writeMat4(fd, tmp);
+	sprintf(tmp, "%s_simul_errors", prefix);
+	out.writeMat4(fd, tmp);
+
+	delete y;
+}
+
+
+@ End of {\tt global\_check.cpp} file.
diff --git a/dynare++/kord/global_check.hweb b/dynare++/kord/global_check.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..c54b57a73e205a63528a68ebe7dc27cb0c7ba0f4
--- /dev/null
+++ b/dynare++/kord/global_check.hweb
@@ -0,0 +1,167 @@
+@q $Id: global_check.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Global check. Start of {\tt global\_check.h} file.
+
+The purpose of this file is to provide classes for checking error of
+approximation. If $y_t=g(y^*_{t-1},u)$ is an approximate solution,
+then we check for the error of residuals of the system equations. Let
+$F(y^*,u,u')=f(g^{**}(g^*(y^*,u'),u),g(y^*,u),y^*,u)$, then we
+calculate integral
+$$E[F(y^*,u,u')]$$@q'@>
+which we want to be zero for all $y^*$, and $u$.
+
+There are a few possibilities how and where the integral is
+evaluated. Currently we offer the following:
+
+\numberedlist
+\li Along shocks. The $y^*$ is set to steady state, and $u$ is set to
+zero but one element is going from minus through plus shocks in few
+steps. The user gives the scaling factor, for instance interval
+$\langle<-3\sigma,3\sigma\rangle$ (where $sigma$ is a standard error
+of the shock), and a number of steps. This is repeated for each shock
+(element of the $u$ vector).
+\li Along simulation. Some random simulation is run, and for each
+realization of $y^*$ and $u$ along the path we evaluate the residual.
+\li On ellipse. Let $V=AA^T$ be a covariance matrix of the
+predetermined variables $y^*$ based on linear approximation, then we
+calculate integral for points on the ellipse $\{Ax\vert\, \Vert
+x\Vert_2=1\}$. The points are selected by means of low discrepancy
+method and polar transformation. The shock $u$ are zeros.
+
+\li Unconditional distribution. 
+
+\endnumberedlist
+
+
+@s ResidFunction int
+@s GResidFunction int
+@s GlobalChecker int
+@s VectorFunction int
+@s ResidFunctionSig int
+@s GaussHermite int
+@s SmolyakQuadrature int
+@s ProductQuadrature int
+@s ParameterSignal int
+@s Quadrature int
+@s QMCarloCubeQuadrature int
+
+@c
+#ifndef GLOBAL_CHECK_H
+#define GLOBAL_CHECK_H
+
+#include "vector_function.h"
+#include "quadrature.h"
+
+#include "dynamic_model.h"
+#include "journal.h"
+#include "approximation.h"
+
+@<|ResidFunction| class declaration@>;
+@<|GResidFunction| class declaration@>;
+@<|GlobalChecker| class declaration@>;
+@<|ResidFunctionSig| class declaration@>;
+
+#endif
+
+@ This is a class for implementing |VectorFunction| interface
+evaluating the residual of equations, this is
+$$F(y^*,u,u')=f(g^{**}(g^*(y^*,u),u'),y^*,u)$$
+is written as a function of $u'$.
+
+When the object is constructed, one has to specify $(y^*,u)$, this is
+done by |setYU| method. The object has basically two states. One is
+after construction and before call to |setYU|. The second is after
+call |setYU|. We distinguish between the two states, an object in the
+second state contains |yplus|, |ystar|, |u|, and |hss|.
+
+The vector |yplus| is $g^*(y^*,u)$. |ystar| is $y^*$, and polynomial
+|hss| is partially evaluated $g^**(yplus, u)$.
+
+The pointer to |DynamicModel| is important, since the |DynamicModel|
+evaluates the function $f$. When copying the object, we have to make
+also a copy of |DynamicModel|.
+
+@<|ResidFunction| class declaration@>=
+class ResidFunction : public VectorFunction {
+protected:@;
+	const Approximation& approx;
+	DynamicModel* model;
+	Vector* yplus;
+	Vector* ystar;
+	Vector* u;
+	FTensorPolynomial* hss;
+public:@;
+	ResidFunction(const Approximation& app);
+	ResidFunction(const ResidFunction& rf);
+	virtual ~ResidFunction();
+	virtual VectorFunction* clone() const
+		{@+ return new ResidFunction(*this);@+}
+	virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
+	void setYU(const Vector& ys, const Vector& xx);
+};
+
+@ This is a |ResidFunction| wrapped with |GaussConverterFunction|.
+
+@<|GResidFunction| class declaration@>=
+class GResidFunction : public GaussConverterFunction {
+public:@;
+	GResidFunction(const Approximation& app)
+		: GaussConverterFunction(new ResidFunction(app), app.getModel().getVcov())@+ {}
+	GResidFunction(const GResidFunction& rf)
+		: GaussConverterFunction(rf)@+ {}
+	virtual ~GResidFunction()@+ {}
+	virtual VectorFunction* clone() const
+		{@+ return new GResidFunction(*this);@+}
+	void setYU(const Vector& ys, const Vector& xx)
+		{@+ ((ResidFunction*)func)->setYU(ys, xx);}
+};
+
+
+@ This is a class encapsulating checking algorithms. Its core routine
+is |check|, which calculates integral $E[F(y^*,u,u')\vert y^*,u]$ for
+given realizations of $y^*$ and $u$. The both are given in
+matrices. The methods checking along shocks, on ellipse and anlong a
+simulation path, just fill the matrices and call the core |check|.
+
+The method |checkUnconditionalAndSave| evaluates unconditional
+$E[F(y,u,u')]$.
+
+The object also maintains a set of |GResidFunction| functions |vfs| in
+order to save (possibly expensive) copying of |DynamicModel|s.
+
+@<|GlobalChecker| class declaration@>=
+class GlobalChecker {
+	const Approximation& approx;
+	const DynamicModel& model;
+	Journal& journal;
+	GResidFunction rf;
+	VectorFunctionSet vfs;
+public:@;
+	GlobalChecker(const Approximation& app, int n, Journal& jr)
+		: approx(app), model(approx.getModel()), journal(jr),
+		  rf(approx), vfs(rf, n)@+ {}
+	void check(int max_evals, const ConstTwoDMatrix& y,
+			   const ConstTwoDMatrix& x, TwoDMatrix& out);
+	void checkAlongShocksAndSave(FILE* fd, const char* prefix,
+								 int m, double mult, int max_evals);
+	void checkOnEllipseAndSave(FILE* fd, const char* prefix,
+							   int m, double mult, int max_evals);
+	void checkAlongSimulationAndSave(FILE* fd, const char* prefix,
+									 int m, int max_evals);
+	void checkUnconditionalAndSave(FILE* fd, const char* prefix,
+								   int m, int max_evals);
+protected:@;
+	void check(const Quadrature& quad, int level,
+			   const ConstVector& y, const ConstVector& x, Vector& out);
+};
+
+
+@ Signalled resid function. Not implemented yet. todo:
+@<|ResidFunctionSig| class declaration@>=
+class ResidFunctionSig : public ResidFunction {
+public:@;
+	ResidFunctionSig(const Approximation& app, const Vector& ys, const Vector& xx);
+};
+
+@ End of {\tt global\_check.h} file.
diff --git a/dynare++/kord/journal.cweb b/dynare++/kord/journal.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..dcf22e26375dff95faa1457935777802b5803c56
--- /dev/null
+++ b/dynare++/kord/journal.cweb
@@ -0,0 +1,333 @@
+@q $Id: journal.cweb 413 2005-08-16 14:39:55Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt journal.cpp} file.
+
+@c
+#include "journal.h"
+#include "kord_exception.h"
+
+#ifndef __MINGW32__
+# include <sys/resource.h>
+# include <sys/utsname.h>
+#endif
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+
+SystemResources _sysres;
+#ifdef __MINGW32__
+@<|gettimeofday| Win32 implementation@>;
+@<|sysconf| Win32 implementation@>;
+#endif
+
+@<|SystemResources| constructor code@>;
+@<|SystemResources::pageSize| code@>;
+@<|SystemResources::physicalPages| code@>;
+@<|SystemResources::onlineProcessors| code@>;
+@<|SystemResources::availableMemory| code@>;
+@<|SystemResources::getRUS| code@>;
+@<|SystemResourcesFlash| constructor code@>;
+@<|SystemResourcesFlash::diff| code@>;
+@<|JournalRecord::operator<<| symmetry code@>;
+@<|JournalRecord::writePrefix| code@>;
+@<|JournalRecord::writePrefixForEnd| code@>;
+@<|JournalRecordPair| destructor code@>;
+@<|endrec| code@>;
+@<|Journal::printHeader| code@>;
+
+@ 
+@<|SystemResources| constructor code@>=
+SystemResources::SystemResources()
+{
+	gettimeofday(&start, NULL);
+}
+
+
+@ 
+@<|SystemResources::pageSize| code@>=
+long int SystemResources::pageSize()
+{
+	return sysconf(_SC_PAGESIZE);
+}
+
+@ 
+@<|SystemResources::physicalPages| code@>=
+long int SystemResources::physicalPages()
+{
+	return sysconf(_SC_PHYS_PAGES);
+}
+
+@ 
+@<|SystemResources::onlineProcessors| code@>=
+long int SystemResources::onlineProcessors()
+{
+	return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+@ 
+@<|SystemResources::availableMemory| code@>=
+long int SystemResources::availableMemory()
+{
+	return pageSize()*sysconf(_SC_AVPHYS_PAGES);
+}
+
+@ Here we read the current values of resource usage. For MinGW, we
+implement only a number of available physical memory pages.
+
+@<|SystemResources::getRUS| code@>=
+void SystemResources::getRUS(double& load_avg, long int& pg_avail,
+							 double& utime, double& stime, double& elapsed,
+							 long int& idrss, long int& majflt)
+{
+	struct timeval now;
+	gettimeofday(&now, NULL);
+	elapsed = now.tv_sec-start.tv_sec + (now.tv_usec-start.tv_usec)*1.0e-6;
+
+#ifndef __MINGW32__
+	struct rusage rus;
+	getrusage(RUSAGE_SELF, &rus);
+	utime = rus.ru_utime.tv_sec+rus.ru_utime.tv_usec*1.0e-6;
+	stime = rus.ru_stime.tv_sec+rus.ru_stime.tv_usec*1.0e-6;
+	idrss = rus.ru_idrss;
+	majflt = rus.ru_majflt;
+
+	getloadavg(&load_avg, 1);
+#else
+	utime = -1.0;
+	stime = -1.0;
+	idrss = -1;
+	majflt = -1;
+	load_avg = -1.0;
+#endif
+	pg_avail = sysconf(_SC_AVPHYS_PAGES);
+}
+
+@ 
+@<|SystemResourcesFlash| constructor code@>=
+SystemResourcesFlash::SystemResourcesFlash()
+{
+	_sysres.getRUS(load_avg, pg_avail, utime, stime,
+				   elapsed, idrss, majflt);
+}
+
+@ 
+@<|SystemResourcesFlash::diff| code@>=
+void SystemResourcesFlash::diff(const SystemResourcesFlash& pre)
+{
+	utime -= pre.utime;
+	stime -= pre.stime;
+	elapsed -= pre.elapsed;
+	idrss -= pre.idrss;
+	majflt -= pre.majflt;
+}
+
+@ 
+@<|JournalRecord::operator<<| symmetry code@>=
+JournalRecord& JournalRecord::operator<<(const IntSequence& s)
+{
+	operator<<("[");
+	for (int i = 0; i < s.size(); i++) {
+		operator<<(s[i]);
+		if (i < s.size()-1)
+			operator<<(",");
+	}
+	operator<<("]");
+	return *this;
+}
+
+@ 
+@<|JournalRecord::writePrefix| code@>=
+void JournalRecord::writePrefix(const SystemResourcesFlash& f)
+{
+	for (int i = 0; i < MAXLEN; i++)
+		prefix[i] = ' ';
+	double mb = 1024*1024;
+	sprintf(prefix, "%07.6g", f.elapsed);
+	sprintf(prefix+7, ":%c%05d", recChar, ord);
+	sprintf(prefix+14, ":%1.1f", f.load_avg);
+	sprintf(prefix+18, ":%05.4g", f.pg_avail*_sysres.pageSize()/mb);
+	sprintf(prefix+24, "%s", ":      : ");
+	for (int i = 0; i < 2*journal.getDepth(); i++)
+		prefix[i+33]=' ';
+	prefix[2*journal.getDepth()+33]='\0';
+}
+
+@ 
+@<|JournalRecord::writePrefixForEnd| code@>=
+void JournalRecordPair::writePrefixForEnd(const SystemResourcesFlash& f)
+{
+	for (int i = 0; i < MAXLEN; i++)
+		prefix_end[i] = ' ';
+	double mb = 1024*1024;
+	SystemResourcesFlash difnow;
+	difnow.diff(f);
+	sprintf(prefix_end, "%07.6g", f.elapsed+difnow.elapsed);
+	sprintf(prefix_end+7, ":E%05d", ord);
+	sprintf(prefix_end+14, ":%1.1f", difnow.load_avg);
+	sprintf(prefix_end+18, ":%05.4g", difnow.pg_avail*_sysres.pageSize()/mb);
+	sprintf(prefix_end+24, ":%06.5g", difnow.majflt*_sysres.pageSize()/mb);
+	sprintf(prefix_end+31, "%s", ": ");
+	for (int i = 0; i < 2*journal.getDepth(); i++)
+		prefix_end[i+33]=' ';
+	prefix_end[2*journal.getDepth()+33]='\0';
+}
+
+@ 
+@<|JournalRecordPair| destructor code@>=
+JournalRecordPair::~JournalRecordPair()
+{
+	journal.decrementDepth();
+	writePrefixForEnd(flash);
+	journal << prefix_end;
+	journal << mes;
+	journal << endl;
+	journal.flush();
+}
+
+@ 
+@<|endrec| code@>=
+JournalRecord& endrec(JournalRecord& rec)
+{
+	rec.journal << rec.prefix;
+	rec.journal << rec.mes;
+	rec.journal << endl;
+	rec.journal.flush();
+	rec.journal.incrementOrd();
+	return rec;
+}
+
+@ 
+@<|Journal::printHeader| code@>=
+void Journal::printHeader()
+{
+	(*this)<< "This is Dynare++, Copyright (C) 2004,2005 Michel Juillard, Ondra Kamenik\n";
+	(*this)<< "Dynare++ comes with ABSOLUTELY NO WARRANTY and is distributed under\n";
+	(*this)<< "General Public License, see http://www.gnu.org/license/gpl.html\n";
+	(*this)<< "\n\n";
+
+#ifndef __MINGW32__
+	utsname info;
+	uname(&info);
+	(*this)<< "System info: ";
+	(*this)<< info.sysname << " " << info.release << " " << info.version << " ";
+	(*this)<< info.machine << ", processors online: " << _sysres.onlineProcessors();
+
+	(*this)<< "\n\nStart time: ";
+	char ts[100];
+	time_t curtime = time(NULL);
+	tm loctime;
+	localtime_r(&curtime, &loctime);
+	asctime_r(&loctime, ts);
+	(*this)<< ts << "\n";
+#else
+	(*this) << "System info: (not implemented for MINGW)\n";
+	(*this) << "Start time:  (not implemented for MINGW)\n\n";
+#endif
+	
+	(*this)<< "  ------ elapsed time (seconds)                     \n";
+	(*this)<< "  |       ------ record unique identifier           \n";
+	(*this)<< "  |       |     ------ load average                 \n";
+	(*this)<< "  |       |     |    ------ available memory (MB)   \n";
+	(*this)<< "  |       |     |    |     ------  major faults (MB)\n";
+	(*this)<< "  |       |     |    |     |                        \n";
+	(*this)<< "  V       V     V    V     V                        \n";
+	(*this)<< "\n";
+}
+
+
+@ Taken from list {\tt gdb@@sources.redhat.com}, the author is Danny Smith.
+
+|_W32_FT_OFFSET| is a time from 1 Jan 1601 to 1 Jan 1970 in 100ns
+units, and the the |filetime| is taken from {\tt windows.h} file.
+
+@s filetime int
+@s __stdcall int
+@d _W32_FT_OFFSET (116444736000000000LL) 
+@s w32_ftv int
+
+@<|gettimeofday| Win32 implementation@>=
+typedef struct _filetime {
+	unsigned long dwLowDateTime;
+	unsigned long dwHighDateTime;
+} filetime;
+@#
+extern "C" {
+	void __stdcall GetSystemTimeAsFileTime(filetime*);
+};
+@#
+typedef union {
+	long long ns100; // time since 1 Jan 1601 in 100ns units
+	filetime ft;
+} w32_ftv;
+@#
+void gettimeofday(struct timeval* p, struct timezone* tz)
+{
+	w32_ftv _now;
+	GetSystemTimeAsFileTime( &(_now.ft) );
+	p->tv_usec=(long)((_now.ns100 / 10LL) % 1000000LL );
+	p->tv_sec= (long)((_now.ns100-_W32_FT_OFFSET)/10000000LL);
+	return;
+}
+
+@ Here we implement |sysconf| for MinGW. We implement only page size,
+number of physial pages, and a number of available physical pages. The
+pagesize is set to 1024 bytes, real pagesize can differ but it is not
+important. We can do this since Windows kernel32 |GlobalMemoryStatus|
+call returns number of bytes.
+
+Number of online processors is not implemented and returns -1, since
+Windows kernel32 |GetSystemInfo| call is too complicated.
+
+@<|sysconf| Win32 implementation@>=
+#define _SC_PAGESIZE 1
+#define _SC_PHYS_PAGES 2
+#define _SC_AVPHYS_PAGES 3
+#define _SC_NPROCESSORS_ONLN 4
+@#
+struct Win32MemoryStatus {
+	unsigned long dwLength;
+	unsigned long dwMemoryLoad;
+	unsigned int dwTotalPhys;
+	unsigned int dwAvailPhys;
+	unsigned int dwTotalPageFile;
+	unsigned int dwAvailPageFile;
+	unsigned int dwTotalVirtual;
+	unsigned int dwAvailVirtual;
+	Win32MemoryStatus();
+};
+@#
+extern "C" {
+	void __stdcall GlobalMemoryStatus(Win32MemoryStatus *);
+};
+@#
+Win32MemoryStatus::Win32MemoryStatus()
+{
+	dwLength = sizeof(Win32MemoryStatus);
+	GlobalMemoryStatus(this);
+}
+@#
+long sysconf(int name)
+{
+	switch (name) {
+	case _SC_PAGESIZE:@;
+		return 1024;
+	case _SC_PHYS_PAGES:@;
+		{
+			Win32MemoryStatus memstat;
+			return memstat.dwTotalPhys/1024;
+		}
+	case _SC_AVPHYS_PAGES:@;
+		{
+			Win32MemoryStatus memstat;
+			return memstat.dwAvailPhys/1024;
+		}
+	case _SC_NPROCESSORS_ONLN:@;
+		return -1;
+	default:@; 
+		KORD_RAISE("Not implemented in Win32 sysconf.");
+		return -1;
+	}
+}
+
+@ End of {\tt journal.cpp} file.
\ No newline at end of file
diff --git a/dynare++/kord/journal.hweb b/dynare++/kord/journal.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..71680bb8d03816a295a214aca9acf3e0628ac9d9
--- /dev/null
+++ b/dynare++/kord/journal.hweb
@@ -0,0 +1,137 @@
+@q $Id: journal.hweb 417 2005-08-16 15:04:24Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Resource usage journal. Start of {\tt journal.h} file.
+
+@s timeval int
+@s rusage int
+@s SystemResources int
+@s SystemResourcesFlash int
+@s Journal int
+@s JournalRecord int
+@s JournalRecordPair int
+
+@c
+#ifndef JOURNAL_H
+#define JOURNAL_H
+
+#include "int_sequence.h"
+
+#include <sys/time.h>
+#include <stdio.h>
+#include <iostream>
+#include <fstream>
+
+@<|SystemResources| class declaration@>;
+@<|SystemResourcesFlash| struct declaration@>;
+@<|Journal| class declaration@>;
+@<|JournalRecord| class declaration@>;
+@<|JournalRecordPair| class declaration@>;
+
+#endif
+
+@ 
+@<|SystemResources| class declaration@>=
+class SystemResources {
+	timeval start;
+public:@;
+	SystemResources();
+	static long int pageSize();
+	static long int physicalPages();
+	static long int onlineProcessors();
+	static long int availableMemory();
+	void getRUS(double& load_avg, long int& pg_avail, double& utime,
+				double& stime, double& elapsed, long int& idrss,
+				long int& majflt);
+};
+
+@ 
+@<|SystemResourcesFlash| struct declaration@>=
+struct SystemResourcesFlash {
+	double load_avg;
+	long int pg_avail;
+	double utime;
+	double stime;
+	double elapsed;
+	long int idrss;
+	long int majflt;
+	SystemResourcesFlash();
+	void diff(const SystemResourcesFlash& pre);
+};
+
+
+@ 
+@s stringstream int
+@d MAXLEN 1000
+
+@<|JournalRecord| class declaration@>=
+class JournalRecord;
+JournalRecord& endrec(JournalRecord&);
+
+class JournalRecord {
+protected:@;
+	char recChar;
+	int ord;
+public:@;
+	Journal& journal;
+	char prefix[MAXLEN];
+	char mes[MAXLEN];
+	SystemResourcesFlash flash;
+	typedef JournalRecord& (*_Tfunc)(JournalRecord&);
+
+	JournalRecord(Journal& jr, char rc = 'M')
+		: recChar(rc), ord(jr.getOrd()), journal(jr) 
+		{@+ prefix[0]='\0';mes[0]='\0';writePrefix(flash); @+}
+	virtual ~JournalRecord() @+{}
+	JournalRecord& operator<<(const IntSequence& s);
+	JournalRecord& operator<<(_Tfunc f)
+		{@+ (*f)(*this); return *this;@+}
+	JournalRecord& operator<<(const char* s)
+		{@+ strcat(mes, s); return *this; @+}
+	JournalRecord& operator<<(int i)
+		{@+ sprintf(mes+strlen(mes), "%d", i); return *this;@+}
+	JournalRecord& operator<<(double d)
+		{@+ sprintf(mes+strlen(mes), "%f", d); return *this;@+}
+protected:@;
+	void writePrefix(const SystemResourcesFlash& f);
+};
+
+@ 
+@<|JournalRecordPair| class declaration@>=
+class JournalRecordPair : public JournalRecord {
+	char prefix_end[MAXLEN];
+public:@;
+	JournalRecordPair(Journal& jr)
+		: JournalRecord(jr, 'S')
+		{@+ prefix_end[0] = '\0'; journal.incrementDepth(); @+}
+	~JournalRecordPair();
+private:@;
+	void writePrefixForEnd(const SystemResourcesFlash& f);
+};
+
+@ 
+@<|Journal| class declaration@>=
+class Journal : public ofstream {
+	int ord;
+	int depth;
+public:@;
+	Journal(const char* fname)
+		: ofstream(fname), ord(0), depth(0)
+		{@+ printHeader();@+}
+	~Journal()
+		{@+ flush();@+}
+	void printHeader();
+	void incrementOrd()
+		{@+ ord++; @+}
+	int getOrd() const
+		{@+ return ord; @+}
+	void incrementDepth()
+		{@+ depth++; @+}
+	void decrementDepth()
+		{@+ depth--; @+}
+	int getDepth() const
+		{return depth;}
+};
+
+
+@ End of {\tt journal.h} file.
\ No newline at end of file
diff --git a/dynare++/kord/kord_exception.hweb b/dynare++/kord/kord_exception.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..e447aa615094bed034252505333c054e48b6fa0b
--- /dev/null
+++ b/dynare++/kord/kord_exception.hweb
@@ -0,0 +1,64 @@
+@q $Id: kord_exception.hweb 1452 2007-11-21 11:33:30Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Exception. Start of {\tt kord\_exception.h} file.
+
+This is a simple code defining an exception and two convenience macros.
+
+@s KordException int
+@c
+#ifndef KORD_EXCEPTION_H
+#define KORD_EXCEPTION_H
+
+#include <string.h>
+#include <stdio.h>
+
+#define KORD_RAISE(mes) \
+throw KordException(__FILE__, __LINE__, mes);
+
+#define KORD_RAISE_IF(expr, mes) \
+if (expr) throw KordException(__FILE__, __LINE__, mes);
+
+#define KORD_RAISE_X(mes, c) \
+throw KordException(__FILE__, __LINE__, mes, c);
+
+#define KORD_RAISE_IF_X(expr, mes, c) \
+if (expr) throw KordException(__FILE__, __LINE__, mes, c);
+
+@<|KordException| class definition@>;
+@<|KordException| error code definitions@>;
+ 
+#endif
+
+@ 
+@<|KordException| class definition@>=
+class KordException {
+protected:@;
+	char fname[50];
+	int lnum;
+	char message[500];
+	int cd;
+public:@;
+	KordException(const char* f, int l, const char* mes, int c=255)
+		{
+			strncpy(fname, f, 50);@+ fname[49] = '\0';
+			strncpy(message, mes, 500);@+ message[499] = '\0';
+			lnum = l;
+			cd = c;
+		}
+	virtual ~KordException()@+ {}
+	virtual void print() const
+		{@+ printf("At %s:%d:(%d):%s\n", fname, lnum, cd, message);@+}
+	virtual int code() const
+		{@+ return cd; @+}
+	const char* get_message() const
+		{@+ return message; @+}
+};
+
+@ 
+@<|KordException| error code definitions@>=
+#define KORD_FP_NOT_CONV 254
+#define KORD_FP_NOT_FINITE 253
+#define KORD_MD_NOT_STABLE 252
+
+@ End of {\tt kord\_exception.h} file.
diff --git a/dynare++/kord/korder.cweb b/dynare++/kord/korder.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..8b32c63612ae717968659867fcc1f0b6c8c66d5b
--- /dev/null
+++ b/dynare++/kord/korder.cweb
@@ -0,0 +1,340 @@
+@q $Id: korder.cweb 1831 2008-05-18 20:13:42Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt korder.cpp} file.
+
+@c
+
+#include "kord_exception.h"
+#include "korder.h"
+
+#include "cpplapack.h"
+
+@<|PLUMatrix| copy constructor@>;
+@<|PLUMatrix::calcPLU| code@>;
+@<|PLUMatrix::multInv| code@>;
+@<|MatrixA| constructor code@>;
+@<|MatrixS| constructor code@>;
+@<|KOrder| member access method specializations@>;
+@<|KOrder::sylvesterSolve| unfolded specialization@>;
+@<|KOrder::sylvesterSolve| folded specialization@>;
+@<|KOrder::switchToFolded| code@>;
+@<|KOrder| constructor code@>;
+
+@ 
+@<|PLUMatrix| copy constructor@>=
+PLUMatrix::PLUMatrix(const PLUMatrix& plu)
+	: TwoDMatrix(plu), inv(plu.inv), ipiv(new int[nrows()])
+{
+	memcpy(ipiv, plu.ipiv, nrows()*sizeof(int));
+}
+
+
+@ Here we set |ipiv| and |inv| members of the |PLUMatrix| depending on
+its content. It is assumed that subclasses will call this method at
+the end of their constructors.
+
+@<|PLUMatrix::calcPLU| code@>=
+void PLUMatrix::calcPLU()
+{
+	int info;
+	int rows = nrows();
+	inv = (const Vector&)getData();
+	LAPACK_dgetrf(&rows, &rows, inv.base(), &rows, ipiv, &info);
+}
+
+@ Here we just call the LAPACK machinery to multiply by the inverse.
+
+@<|PLUMatrix::multInv| code@>=
+void PLUMatrix::multInv(TwoDMatrix& m) const
+{
+	KORD_RAISE_IF(m.nrows() != ncols(),
+				  "The matrix is not square in PLUMatrix::multInv");
+	int info;
+	int mcols = m.ncols();
+	int mrows = m.nrows();
+	double* mbase = m.getData().base();
+	LAPACK_dgetrs("N", &mrows, &mcols, inv.base(), &mrows, ipiv,
+				  mbase, &mrows, &info);
+	KORD_RAISE_IF(info != 0,
+				  "Info!=0 in PLUMatrix::multInv");
+}
+
+@ Here we construct the matrix $A$. Its dimension is |ny|, and it is
+$$A=\left[f_{y}\right]+
+\left[0 \left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right] 0\right]$$,
+where the first zero spans |nstat| columns, and last zero spans
+|nforw| columns.
+
+@<|MatrixA| constructor code@>=
+MatrixA::MatrixA(const FSSparseTensor& f, const IntSequence& ss,
+				 const TwoDMatrix& gy, const PartitionY& ypart)
+	: PLUMatrix(ypart.ny())
+{
+	zeros();
+
+	IntSequence c(1); c[0] = 1;
+	FGSTensor f_y(f, ss, c, TensorDimens(ss, c));
+	add(1.0, f_y);
+
+	ConstTwoDMatrix gss_ys(ypart.nstat+ypart.npred, ypart.nyss(), gy);
+	c[0] = 0;
+	FGSTensor f_yss(f, ss, c, TensorDimens(ss, c));
+	TwoDMatrix sub(*this, ypart.nstat, ypart.nys());
+	sub.multAndAdd(ConstTwoDMatrix(f_yss), gss_ys);
+
+	calcPLU();
+}
+
+@ Here we construct the matrix $S$. Its dimension is |ny|, and it is
+$$S=\left[f_{y}\right]+
+\left[0\quad\left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right]\quad
+0\right]+ \left[0\quad 0\quad\left[f_{y^{**}_+}\right]\right]$$
+It is, in fact, the matrix $A$ plus the third summand. The first zero
+in the summand spans |nstat| columns, the second zero spans |npred|
+columns.
+
+@<|MatrixS| constructor code@>=
+MatrixS::MatrixS(const FSSparseTensor& f, const IntSequence& ss,
+				 const TwoDMatrix& gy, const PartitionY& ypart)
+	: PLUMatrix(ypart.ny())
+{
+	zeros();
+
+	IntSequence c(1); c[0] = 1;
+	FGSTensor f_y(f, ss, c, TensorDimens(ss, c));
+	add(1.0, f_y);
+
+	ConstTwoDMatrix gss_ys(ypart.nstat+ypart.npred, ypart.nyss(), gy);
+	c[0] = 0;
+	FGSTensor f_yss(f, ss, c, TensorDimens(ss, c));
+	TwoDMatrix sub(*this, ypart.nstat, ypart.nys());
+	sub.multAndAdd(ConstTwoDMatrix(f_yss), gss_ys);
+
+	TwoDMatrix sub2(*this, ypart.nstat+ypart.npred, ypart.nyss());
+	sub2.add(1.0, f_yss);
+
+	calcPLU();
+}
+
+
+@ Here is the constructor of the |KOrder| class. We pass what we have
+to. The partitioning of the $y$ vector, a sparse container with model
+derivatives, then the first order approximation, these are $g_y$ and
+$g_u$ matrices, and covariance matrix of exogenous shocks |v|.
+
+We build the members, it is nothing difficult. Note that we do not make
+a physical copy of sparse tensors, so during running the class, the
+outer world must not change them.
+
+In the body, we have to set |nvs| array, and initialize $g$ and $G$
+containers to comply to preconditions of |performStep|.
+
+@<|KOrder| constructor code@>=
+KOrder::KOrder(int num_stat, int num_pred, int num_both, int num_forw,
+			   const TensorContainer<FSSparseTensor>& fcont,
+			   const TwoDMatrix& gy, const TwoDMatrix& gu, const TwoDMatrix& v,
+			   Journal& jr)
+	: ypart(num_stat, num_pred, num_both, num_forw),@/
+	  ny(ypart.ny()), nu(gu.ncols()), maxk(fcont.getMaxDim()),@/
+	  nvs(4),@/
+	  _ug(4), _fg(4), _ugs(4), _fgs(4), _ugss(4), _fgss(4), @/
+	  _uG(4), _fG(4),@/
+	  _uZstack(&_uG, ypart.nyss(), &_ug, ny, ypart.nys(), nu),@/
+	  _fZstack(&_fG, ypart.nyss(), &_fg, ny, ypart.nys(), nu),@/
+	  _uGstack(&_ugs, ypart.nys(), nu),@/
+	  _fGstack(&_fgs, ypart.nys(), nu),@/
+	  _um(maxk, v), _fm(_um), f(fcont),@/
+	  matA(*(f.get(Symmetry(1))), _uZstack.getStackSizes(), gy, ypart),@/
+	  matS(*(f.get(Symmetry(1))), _uZstack.getStackSizes(), gy, ypart),@/
+	  matB(*(f.get(Symmetry(1))), _uZstack.getStackSizes()),@/
+	  journal(jr)@/
+{
+	KORD_RAISE_IF(gy.ncols() != ypart.nys(),
+				  "Wrong number of columns in gy in KOrder constructor");
+	KORD_RAISE_IF(v.ncols() != nu,
+				  "Wrong number of columns of Vcov in KOrder constructor");
+	KORD_RAISE_IF(nu != v.nrows(),
+				  "Wrong number of rows of Vcov in KOrder constructor");
+	KORD_RAISE_IF(maxk < 2,
+				  "Order of approximation must be at least 2 in KOrder constructor");
+	KORD_RAISE_IF(gy.nrows() != ypart.ny(),
+				  "Wrong number of rows in gy in KOrder constructor");
+	KORD_RAISE_IF(gu.nrows() != ypart.ny(),
+				  "Wrong number of rows in gu in KOrder constuctor");
+	KORD_RAISE_IF(gu.ncols() != nu,
+				  "Wrong number of columns in gu in KOrder constuctor");
+
+	// set nvs:
+	nvs[0] = ypart.nys(); nvs[1] = nu; nvs[2] = nu; nvs[3] = 1;
+
+	@<put $g_y$ and $g_u$ to the container@>;
+	@<put $G_y$, $G_u$ and $G_{u'}$ to the container@>;@q'@>
+}
+
+@ Note that $g_\sigma$ is zero by the nature and we do not insert it to
+the container. We insert a new physical copies.
+
+@<put $g_y$ and $g_u$ to the container@>=
+	UGSTensor* tgy = new UGSTensor(ny, TensorDimens(Symmetry(1,0,0,0), nvs));
+	tgy->getData() = gy.getData();
+	insertDerivative<unfold>(tgy);
+	UGSTensor* tgu = new UGSTensor(ny, TensorDimens(Symmetry(0,1,0,0), nvs));
+	tgu->getData() = gu.getData();
+	insertDerivative<unfold>(tgu);
+
+@ Also note that since $g_\sigma$ is zero, so $G_\sigma$.
+@<put $G_y$, $G_u$ and $G_{u'}$ to the container@>=
+	UGSTensor* tGy = faaDiBrunoG<unfold>(Symmetry(1,0,0,0));
+	G<unfold>().insert(tGy);
+	UGSTensor* tGu = faaDiBrunoG<unfold>(Symmetry(0,1,0,0));
+	G<unfold>().insert(tGu);
+	UGSTensor* tGup = faaDiBrunoG<unfold>(Symmetry(0,0,1,0));
+	G<unfold>().insert(tGup);
+
+
+
+@ Here we have an unfolded specialization of |sylvesterSolve|. We
+simply create the sylvester object and solve it. Note that the $g^*_y$
+is not continuous in memory as assumed by the sylvester code, so we
+make a temporary copy and pass it as matrix $C$.
+
+If the $B$ matrix is empty, in other words there are now forward
+looking variables, then the system becomes $AX=D$ which is solved by
+simple |matA.multInv()|.
+
+If one wants to display the diagnostic messages from the Sylvester
+module, then after the |sylv.solve()| one needs to call
+|sylv.getParams().print("")|.
+
+
+@<|KOrder::sylvesterSolve| unfolded specialization@>=
+template<>@/
+void KOrder::sylvesterSolve<KOrder::unfold>(ctraits<unfold>::Ttensor& der) const
+{
+	JournalRecordPair pa(journal);
+	pa << "Sylvester equation for dimension = " << der.getSym()[0] << endrec;
+	if (ypart.nys() > 0 && ypart.nyss() > 0) {
+		KORD_RAISE_IF(! der.isFinite(),
+					  "RHS of Sylverster is not finite");
+		TwoDMatrix gs_y(*(gs<unfold>().get(Symmetry(1,0,0,0))));
+		GeneralSylvester sylv(der.getSym()[0], ny, ypart.nys(),
+							  ypart.nstat+ypart.npred,
+							  matA.getData().base(), matB.getData().base(),
+							  gs_y.getData().base(), der.getData().base());
+		sylv.solve();
+	} else if (ypart.nys() > 0 && ypart.nyss() == 0) {
+		matA.multInv(der);
+	}
+}
+
+@ Here is the folded specialization of sylvester. We unfold the right
+hand side. Then we solve it by the unfolded version of
+|sylvesterSolve|, and fold it back and copy to output vector.
+
+@<|KOrder::sylvesterSolve| folded specialization@>=
+template<>@/
+void KOrder::sylvesterSolve<KOrder::fold>(ctraits<fold>::Ttensor& der) const
+{
+	ctraits<unfold>::Ttensor tmp(der);
+	sylvesterSolve<unfold>(tmp);
+	ctraits<fold>::Ttensor ftmp(tmp);
+	der.getData() = (const Vector&)(ftmp.getData());
+}
+
+@ 
+@<|KOrder::switchToFolded| code@>=
+void KOrder::switchToFolded()
+{
+	JournalRecordPair pa(journal);
+	pa << "Switching from unfolded to folded" << endrec;
+
+	int maxdim = g<unfold>().getMaxDim();
+	for (int dim = 1; dim <= maxdim; dim++) {
+		SymmetrySet ss(dim, 4);
+		for (symiterator si(ss); !si.isEnd(); ++si) {
+			if ((*si)[2] == 0 && g<unfold>().check(*si)) {
+				FGSTensor* ft = new FGSTensor(*(g<unfold>().get(*si)));
+				insertDerivative<fold>(ft);
+				if (dim > 1) {
+					gss<unfold>().remove(*si);
+					gs<unfold>().remove(*si);
+					g<unfold>().remove(*si);
+				}
+			}
+			if (G<unfold>().check(*si)) {
+				FGSTensor* ft = new FGSTensor(*(G<unfold>().get(*si)));
+				G<fold>().insert(ft);
+				if (dim > 1) {
+					G<fold>().remove(*si);
+				}
+			}
+		}
+	}
+}
+
+
+
+@ These are the specializations of container access methods. Nothing
+interesting here.
+
+@<|KOrder| member access method specializations@>=
+	template<> ctraits<KOrder::unfold>::Tg& KOrder::g<KOrder::unfold>()
+		{@+ return _ug;@+}
+	template<>@; const ctraits<KOrder::unfold>::Tg& KOrder::g<KOrder::unfold>()@+const@;
+		{@+ return _ug;@+}
+	template<> ctraits<KOrder::fold>::Tg& KOrder::g<KOrder::fold>()
+		{@+ return _fg;@+}
+	template<> const ctraits<KOrder::fold>::Tg& KOrder::g<KOrder::fold>()@+const@;
+		{@+ return _fg;@+}
+	template<> ctraits<KOrder::unfold>::Tgs& KOrder::gs<KOrder::unfold>()
+		{@+ return _ugs;@+}
+	template<> const ctraits<KOrder::unfold>::Tgs& KOrder::gs<KOrder::unfold>()@+const@;
+		{@+ return _ugs;@+}
+	template<> ctraits<KOrder::fold>::Tgs& KOrder::gs<KOrder::fold>()
+		{@+ return _fgs;@+}
+	template<> const ctraits<KOrder::fold>::Tgs& KOrder::gs<KOrder::fold>()@+const@;
+		{@+ return _fgs;@+}
+	template<> ctraits<KOrder::unfold>::Tgss& KOrder::gss<KOrder::unfold>()
+		{@+ return _ugss;@+}
+	template<> const ctraits<KOrder::unfold>::Tgss& KOrder::gss<KOrder::unfold>()@+const@;
+		{@+ return _ugss;@+}
+	template<> ctraits<KOrder::fold>::Tgss& KOrder::gss<KOrder::fold>()
+		{@+ return _fgss;@+}
+	template<> const ctraits<KOrder::fold>::Tgss& KOrder::gss<KOrder::fold>()@+const@;
+		{@+ return _fgss;@+}
+	template<> ctraits<KOrder::unfold>::TG& KOrder::G<KOrder::unfold>()
+		{@+ return _uG;@+}
+	template<> const ctraits<KOrder::unfold>::TG& KOrder::G<KOrder::unfold>()@+const@;
+		{@+ return _uG;@+}
+	template<> ctraits<KOrder::fold>::TG& KOrder::G<KOrder::fold>()
+		{@+ return _fG;@+}
+	template<> const ctraits<KOrder::fold>::TG& KOrder::G<KOrder::fold>()@+const@;
+		{@+ return _fG;@+}
+	template<> ctraits<KOrder::unfold>::TZstack& KOrder::Zstack<KOrder::unfold>()
+		{@+ return _uZstack;@+}
+	template<> const ctraits<KOrder::unfold>::TZstack& KOrder::Zstack<KOrder::unfold>()@+const@;
+		{@+ return _uZstack;@+}
+	template<> ctraits<KOrder::fold>::TZstack& KOrder::Zstack<KOrder::fold>()
+		{@+ return _fZstack;@+}
+	template<> const ctraits<KOrder::fold>::TZstack& KOrder::Zstack<KOrder::fold>()@+const@;
+		{@+ return _fZstack;@+}
+	template<> ctraits<KOrder::unfold>::TGstack& KOrder::Gstack<KOrder::unfold>()
+		{@+ return _uGstack;@+}
+	template<> const ctraits<KOrder::unfold>::TGstack& KOrder::Gstack<KOrder::unfold>()@+const@;
+		{@+ return _uGstack;@+}
+	template<> ctraits<KOrder::fold>::TGstack& KOrder::Gstack<KOrder::fold>()
+		{@+ return _fGstack;@+}
+	template<> const ctraits<KOrder::fold>::TGstack& KOrder::Gstack<KOrder::fold>()@+const@;
+		{@+ return _fGstack;@+}
+	template<> ctraits<KOrder::unfold>::Tm& KOrder::m<KOrder::unfold>()
+		{@+ return _um;@+}
+	template<> const ctraits<KOrder::unfold>::Tm& KOrder::m<KOrder::unfold>()@+const@;
+		{@+ return _um;@+}
+	template<> ctraits<KOrder::fold>::Tm& KOrder::m<KOrder::fold>()
+		{@+ return _fm;@+}
+	template<> const ctraits<KOrder::fold>::Tm& KOrder::m<KOrder::fold>()@+const@;
+		{@+ return _fm;@+}
+
+
+@ End of {\tt korder.cpp} file.
diff --git a/dynare++/kord/korder.hweb b/dynare++/kord/korder.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..a96716fb63cb6db8d881fe32fa59bb6fa290f2bf
--- /dev/null
+++ b/dynare++/kord/korder.hweb
@@ -0,0 +1,956 @@
+@q $Id: korder.hweb 2332 2009-01-14 10:26:54Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Higher order at deterministic steady. Start of {\tt korder.h} file.
+
+The main purpose of this file is to implement a perturbation method
+algorithm for an SDGE model for higher order approximations. The input
+of the algorithm are sparse tensors as derivatives of the dynamic
+system, then dimensions of vector variables, then the first order
+approximation to the decision rule and finally a covariance matrix of
+exogenous shocks. The output are higher order derivatives of decision
+rule $y_t=g(y^*_{t-1},u_t,\sigma)$. The class provides also a method
+for checking a size of residuals of the solved equations.
+
+The algorithm is implemented in |KOrder| class. The class contains
+both unfolded and folded containers to allow for switching (usually
+from unfold to fold) during the calculations. The algorithm is
+implemented in a few templated methods. To do this, we need some
+container type traits, which are in |ctraits| struct. Also, the
+|KOrder| class contains some information encapsulated in other
+classes, which are defined here. These include: |PartitionY|,
+|MatrixA|, |MatrixS| and |MatrixB|.
+
+@s KOrder int
+@s ctraits int
+@s PartitionY int
+@s MatrixA int
+@s MatrixB int
+@s MatrixS int
+@s PLUMatrix int
+@s FGSTensor int
+@s UGSTensor int
+@s FGSContainer int
+@s UGSContainer int
+@s FSSparseTensor int
+@s TensorContainer int
+@s UNormalMoments int
+@s FNormalMoments int
+@s FoldedZContainer int
+@s UnfoldedZContainer int
+@s FoldedGContainer int
+@s UnfoldedGContainer int
+@s FoldedZXContainer int
+@s UnfoldedZXContainer int
+@s FoldedGXContainer int
+@s UnfoldedGXContainer int
+@s TwoDMatrix int
+@s ConstTwoDMatrix int
+@s IntSequence int
+@s Symmetry int
+@s SymmetrySet int
+@s symiterator int
+@s TensorDimens int
+@s Vector int
+@s ConstVector int
+@s UTensorPolynomial int
+@s FTensorPolynomial int
+@s UFSTensor int
+@s FFSTensor int
+@s GeneralSylvester int
+
+@c
+#ifndef KORDER_H
+#define KORDER_H
+
+#include "int_sequence.h"
+#include "fs_tensor.h"
+#include "gs_tensor.h"
+#include "t_container.h"
+#include "stack_container.h"
+#include "normal_moments.h"
+#include "t_polynomial.h"
+#include "faa_di_bruno.h"
+#include "journal.h"
+
+#include "kord_exception.h"
+#include "GeneralSylvester.h"
+
+#include <cmath>
+
+#define TYPENAME typename
+
+@<|ctraits| type traits declaration@>;
+@<|PartitionY| struct declaration@>;
+@<|PLUMatrix| class declaration@>;
+@<|MatrixA| class declaration@>;
+@<|MatrixS| class declaration@>;
+@<|MatrixB| class declaration@>;
+@<|KOrder| class declaration@>;
+
+
+#endif
+
+@ Here we use a classical IF template, and in |ctraits| we define a
+number of types. We have a type for tensor |Ttensor|, and types for
+each pair of folded/unfolded containers used as a member in |KOrder|.
+
+Note that we have enumeration |fold| and |unfold|. These must have the
+same value as the same enumeration in |KOrder|.
+
+@s IF int
+@s Then int
+@s Else int
+@s RET int
+
+@<|ctraits| type traits declaration@>=
+class FoldedZXContainer;
+class UnfoldedZXContainer;
+class FoldedGXContainer;
+class UnfoldedGXContainer;
+
+template<bool condition, class Then, class Else>
+struct IF {
+	typedef Then RET;
+};
+
+template<class Then, class Else>
+struct IF<false, Then, Else> {
+	typedef Else RET;
+};
+
+template <int type>
+class ctraits {
+public:@;
+	enum {@+ fold, unfold@+};
+	typedef TYPENAME IF<type==fold, FGSTensor, UGSTensor>::RET Ttensor;
+	typedef TYPENAME IF<type==fold, FFSTensor, UFSTensor>::RET Ttensym;
+	typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET Tg;
+	typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET Tgs;
+	typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET Tgss;
+	typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET TG;
+	typedef TYPENAME IF<type==fold, FoldedZContainer, UnfoldedZContainer>::RET TZstack;
+	typedef TYPENAME IF<type==fold, FoldedGContainer, UnfoldedGContainer>::RET TGstack;
+	typedef TYPENAME IF<type==fold, FNormalMoments, UNormalMoments>::RET Tm;
+	typedef TYPENAME IF<type==fold, FTensorPolynomial, UTensorPolynomial>::RET Tpol;
+	typedef TYPENAME IF<type==fold, FoldedZXContainer, UnfoldedZXContainer>::RET TZXstack;
+	typedef TYPENAME IF<type==fold, FoldedGXContainer, UnfoldedGXContainer>::RET TGXstack;
+};
+
+
+@ The |PartitionY| class defines the partitioning of state variables
+$y$. The vector $y$, and subvector $y^*$, and $y^{**}$ are defined.
+$$y=\left[\matrix{\hbox{static}\cr\hbox{predeter}\cr\hbox{both}\cr
+				  \hbox{forward}}\right],\quad
+  y^*=\left[\matrix{\hbox{predeter}\cr\hbox{both}}\right],\quad
+  y^{**}=\left[\matrix{\hbox{both}\cr\hbox{forward}}\right],$$
+where ``static'' means variables appearing only at time $t$,
+``predeter'' means variables appearing at time $t-1$, but not at
+$t+1$, ``both'' means variables appearing both at $t-1$ and $t+1$
+(regardless appearance at $t$), and ``forward'' means variables
+appearing at $t+1$, but not at $t-1$.
+
+The class maintains the four lengths, and returns the whole length,
+length of $y^s$, and length of $y^{**}$.
+
+@<|PartitionY| struct declaration@>=
+struct PartitionY {
+	const int nstat;
+	const int npred;
+	const int nboth;
+	const int nforw;
+	PartitionY(int num_stat, int num_pred,
+			   int num_both, int num_forw)
+		: nstat(num_stat), npred(num_pred),
+		  nboth(num_both), nforw(num_forw)
+		{}
+	int ny() const
+		{@+ return nstat+npred+nboth+nforw;@+}
+	int nys() const
+		{@+ return npred+nboth;@+}
+	int nyss() const
+		{@+ return nboth+nforw;@+}
+};
+
+
+@ This is an abstraction for a square matrix with attached PLU
+factorization. It can calculate the PLU factorization and apply the
+inverse with some given matrix.
+
+We use LAPACK $PLU$ decomposition for the inverse. We store the $L$
+and $U$ in the |inv| array and |ipiv| is the permutation $P$.
+
+@<|PLUMatrix| class declaration@>=
+class PLUMatrix : public TwoDMatrix {
+public:@;
+	PLUMatrix(int n)
+		: TwoDMatrix(n,n),
+		  inv(nrows()*ncols()),
+		  ipiv(new int[nrows()]) {}
+	PLUMatrix(const PLUMatrix& plu);
+	virtual ~PLUMatrix()
+		{delete [] ipiv;}
+	void multInv(TwoDMatrix& m) const;
+private:@;
+	Vector inv;
+	int* ipiv;
+protected:@;
+	void calcPLU();
+};
+
+@ The class |MatrixA| is used for matrix $\left[f_{y}\right]+ \left[0
+\left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right] 0\right]$,
+which is central for the perturbation method step.
+
+@<|MatrixA| class declaration@>=
+class MatrixA : public PLUMatrix {
+public:@;
+	MatrixA(const FSSparseTensor& f, const IntSequence& ss,
+			const TwoDMatrix& gy, const PartitionY& ypart);
+};
+
+@ The class |MatrixS| slightly differs from |MatrixA|. It is used for
+matrix $$\left[f_{y}\right]+ \left[0
+\quad\left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right]\quad
+0\right]+\left[0\quad 0\quad\left[f_{y^{**}_+}\right]\right]$$, which is
+needed when recovering $g_{\sigma^k}$.
+
+@<|MatrixS| class declaration@>=
+class MatrixS : public PLUMatrix {
+public:@;
+	MatrixS(const FSSparseTensor& f, const IntSequence& ss,
+			const TwoDMatrix& gy, const PartitionY& ypart);
+};
+
+
+@ The $B$ matrix is equal to $\left[f_{y^{**}_+}\right]$. We have just
+a constructor.
+
+@<|MatrixB| class declaration@>=
+class MatrixB : public TwoDMatrix {
+public:@;
+	MatrixB(const FSSparseTensor& f, const IntSequence& ss)
+		: TwoDMatrix(FGSTensor(f, ss, IntSequence(1,0),
+							   TensorDimens(ss, IntSequence(1,0))))
+		{}
+};
+
+@ Here we have the class for the higher order approximations. It
+contains the following data:
+
+\halign{\kern\parindent\vrule height12pt width0pt
+\vtop{\hsize=4cm\noindent\raggedright #}&\kern0.5cm\vtop{\hsize=10cm\noindent #}\cr
+variable sizes ypart& |PartitionY| struct maintaining partitions of
+$y$, see |@<|PartitionY| struct declaration@>|\cr
+tensor variable dimension |nvs|& variable sizes of all tensors in
+containers, sizes of $y^*$, $u$, $u'$@q'@> and $\sigma$\cr
+tensor containers & folded and unfolded containers for $g$, $g_{y^*}$,
+$g_{y^**}$ (the latter two collect appropriate subtensors of $g$, they
+do not allocate any new space), $G$, $G$ stack, $Z$ stack\cr
+dynamic model derivatives & just a reference to the container of
+sparse tensors of the system derivatives, lives outside the class\cr
+moments & both folded and unfolded normal moment containers, both are
+calculated at initialization\cr
+matrices & matrix $A$, matrix $S$, and matrix $B$, see |@<|MatrixA| class
+ declaration@>| and |@<|MatrixB| class declaration@>|\cr
+}
+
+\kern 0.4cm
+
+The methods are the following:
+\halign{\kern\parindent\vrule height12pt width0pt
+\vtop{\hsize=4cm\noindent\raggedright #}&\kern0.5cm\vtop{\hsize=10cm\noindent #}\cr
+member access & we declare template methods for accessing containers
+depending on |fold| and |unfold| flag, we implement their
+specializations\cr
+|performStep| & this performs $k$-order step provided that $k=2$ or
+ the $k-1$-th step has been run, this is the core method\cr
+|check| & this calculates residuals of all solved equations for
+ $k$-order and reports their sizes, it is runnable after $k$-order
+ |performStep| has been run\cr
+|insertDerivative| & inserts a $g$ derivative to the $g$ container and
+ also creates subtensors and insert them to $g_{y^*}$ and $g_{y^{**}}$
+ containers\cr
+|sylvesterSolve| & solve the sylvester equation (templated fold, and
+ unfold)\cr
+|faaDiBrunoZ| & calculates derivatives of $F$ by Faa Di Bruno for the
+sparse container of system derivatives and $Z$ stack container\cr
+|faaDiBrunoG| & calculates derivatives of $G$ by Faa Di Bruno for the
+ dense container $g^{**}$ and $G$ stack\cr
+|recover_y| & recovers $g_{y^{*i}}$\cr
+|recover_yu| & recovers $g_{y^{*i}u^j}$\cr
+|recover_ys| & recovers $g_{y^{*i}\sigma^j}$\cr
+|recover_yus| & recovers $g_{y^{*i}u^j\sigma^k}$\cr
+|recover_s| & recovers $g_{\sigma^i}$\cr
+|fillG| & calculates specified derivatives of $G$ and inserts them to
+the container\cr
+|calcE_ijk|& calculates $E_{ijk}$\cr
+|calcD_ijk|& calculates $D_{ijk}$\cr
+ }
+
+\kern 0.3cm
+
+Most of the code is templated, and template types are calculated in
+|ctraits|. So all templated methods get a template argument |T|, which
+can be either |fold|, or |unfold|. To shorten a reference to a type
+calculated by |ctraits| for a particular |t|, we define the following
+macros.
+
+@s _Ttensor int
+@s _Ttensym int
+@s _Tg int
+@s _Tgs int
+@s _Tgss int
+@s _TG int
+@s _TZstack int
+@s _TGstack int
+@s _TZXstack int
+@s _TGXstack int
+@s _Tm int
+@s _Tpol int
+@d _Ttensor TYPENAME ctraits<t>::Ttensor 
+@d _Ttensym TYPENAME ctraits<t>::Ttensym 
+@d _Tg TYPENAME ctraits<t>::Tg
+@d _Tgs TYPENAME ctraits<t>::Tgs
+@d _Tgss TYPENAME ctraits<t>::Tgss
+@d _TG TYPENAME ctraits<t>::TG
+@d _TZstack TYPENAME ctraits<t>::TZstack
+@d _TGstack TYPENAME ctraits<t>::TGstack
+@d _TZXstack TYPENAME ctraits<t>::TZXstack
+@d _TGXstack TYPENAME ctraits<t>::TGXstack
+@d _Tm TYPENAME ctraits<t>::Tm
+@d _Tpol TYPENAME ctraits<t>::Tpol
+
+
+@<|KOrder| class declaration@>=
+class KOrder {
+protected:@;
+	const PartitionY ypart;
+	const int ny;
+	const int nu;
+	const int maxk;
+	IntSequence nvs;
+	@<|KOrder| container members@>;
+	const MatrixA matA;
+	const MatrixS matS;
+	const MatrixB matB;
+	@<|KOrder| member access method declarations@>;
+	Journal& journal;
+public:@;
+	KOrder(int num_stat, int num_pred, int num_both, int num_forw,
+		   const TensorContainer<FSSparseTensor>& fcont,
+		   const TwoDMatrix& gy, const TwoDMatrix& gu, const TwoDMatrix& v,
+		   Journal& jr);
+	enum {@+ fold, unfold@+ };
+	@<|KOrder::performStep| templated code@>;
+	@<|KOrder::check| templated code@>;
+	@<|KOrder::calcStochShift| templated code@>;
+	void switchToFolded();
+	const PartitionY& getPartY() const
+		{@+ return ypart;@+}
+	const FGSContainer& getFoldDers() const
+		{@+ return _fg;@+}
+	const UGSContainer& getUnfoldDers() const
+		{@+ return _ug;@+}
+	static bool is_even(int i)
+		{@+ return (i/2)*2 == i;@+}
+protected:@;
+	@<|KOrder::insertDerivative| templated code@>;
+	template<int t>
+	void sylvesterSolve(_Ttensor& der) const;
+
+	@<|KOrder::faaDiBrunoZ| templated code@>;
+	@<|KOrder::faaDiBrunoG| templated code@>;
+
+	@<|KOrder::recover_y| templated code@>;
+	@<|KOrder::recover_yu| templated code@>;
+	@<|KOrder::recover_ys| templated code@>;
+	@<|KOrder::recover_yus| templated code@>;
+	@<|KOrder::recover_s| templated code@>;
+	@<|KOrder::fillG| templated code@>;
+
+	@<|KOrder::calcD_ijk| templated code@>;
+	@<|KOrder::calcD_ik| templated code@>;
+	@<|KOrder::calcD_k| templated code@>;
+
+	@<|KOrder::calcE_ijk| templated code@>;
+	@<|KOrder::calcE_ik| templated code@>;
+	@<|KOrder::calcE_k| templated code@>;
+};
+
+
+
+@ Here we insert the result to the container. Along the insertion, we
+also create subtensors and insert as well.
+
+@<|KOrder::insertDerivative| templated code@>=
+template <int t>
+void insertDerivative(_Ttensor* der)
+{
+	g<t>().insert(der);
+	gs<t>().insert(new _Ttensor(ypart.nstat, ypart.nys(), *der));
+	gss<t>().insert(new _Ttensor(ypart.nstat+ypart.npred,
+								 ypart.nyss(), *der));
+}
+
+
+@ Here we implement Faa Di Bruno formula
+$$\sum_{l=1}^k\left[f_{z^l}\right]_{\gamma_1\ldots\gamma_l}
+\sum_{c\in M_{l,k}}\prod_{m=1}^l\left[z_{s(c_m)}\right]^{\gamma_m},
+$$
+where $s$ is a given outer symmetry and $k$ is the dimension of the
+symmetry.
+
+@<|KOrder::faaDiBrunoZ| templated code@>=
+template <int t>
+_Ttensor* faaDiBrunoZ(const Symmetry& sym) const
+{
+	JournalRecordPair pa(journal);
+	pa << "Faa Di Bruno Z container for " << sym << endrec;
+	_Ttensor* res = new _Ttensor(ny, TensorDimens(sym, nvs));
+	FaaDiBruno bruno(journal);
+	bruno.calculate(Zstack<t>(), f, *res);
+	return res;
+}
+
+@ The same as |@<|KOrder::faaDiBrunoZ| templated code@>|, but for
+$g^{**}$ and $G$ stack.
+
+@<|KOrder::faaDiBrunoG| templated code@>=
+template <int t>
+_Ttensor* faaDiBrunoG(const Symmetry& sym) const
+{
+	JournalRecordPair pa(journal);
+	pa << "Faa Di Bruno G container for " << sym << endrec;
+	TensorDimens tdims(sym, nvs);
+	_Ttensor* res = new _Ttensor(ypart.nyss(), tdims);
+	FaaDiBruno bruno(journal);
+	bruno.calculate(Gstack<t>(), gss<t>(), *res);
+	return res;
+}
+
+@ Here we solve $\left[F_{y^i}\right]=0$. First we calculate
+conditional $G_{y^i}$ (it misses $l=1$ and $l=i$ since $g_{y^i}$ does
+not exist yet). Then calculate conditional $F_{y^i}$ and we have the
+right hand side of equation. Since we miss two orders, we solve by
+Sylvester, and insert the solution as the derivative $g_{y^i}$. Then
+we need to update $G_{y^i}$ running |multAndAdd| for both dimensions
+$1$ and $i$.
+
+{\bf Requires:} everything at order $\leq i-1$
+
+{\bf Provides:} $g_{y^i}$, and $G_{y^i}$
+
+@<|KOrder::recover_y| templated code@>=
+template<int t>
+void recover_y(int i)
+{
+	Symmetry sym(i,0,0,0);
+	JournalRecordPair pa(journal);
+	pa << "Recovering symmetry " << sym << endrec;
+
+	_Ttensor* G_yi = faaDiBrunoG<t>(sym);
+	G<t>().insert(G_yi);
+
+	_Ttensor* g_yi = faaDiBrunoZ<t>(sym);
+	g_yi->mult(-1.0);
+
+	sylvesterSolve<t>(*g_yi);
+
+	insertDerivative<t>(g_yi);
+
+	_Ttensor* gss_y = gss<t>().get(Symmetry(1,0,0,0));
+	gs<t>().multAndAdd(*gss_y, *G_yi);
+	_Ttensor* gss_yi = gss<t>().get(sym);
+	gs<t>().multAndAdd(*gss_yi, *G_yi);
+}
+
+
+@ Here we solve $\left[F_{y^iu^j}\right]=0$ to obtain $g_{y^iu^j}$ for
+$j>0$. We calculate conditional $G_{y^iu^j}$ (this misses only $l=1$)
+and calculate conditional $F_{y^iu^j}$ and we have the right hand
+side. It is solved by multiplication of inversion of $A$. Then we insert
+the result, and update $G_{y^iu^j}$ by |multAndAdd| for $l=1$.
+
+{\bf Requires:} everything at order $\leq i+j-1$, $G_{y^{i+j}}$, and
+$g_{y^{i+j}}$.
+
+{\bf Provides:} $g_{y^iu^j}$, and $G_{y^iu^j}$
+
+@<|KOrder::recover_yu| templated code@>=
+template <int t>
+void recover_yu(int i, int j)
+{
+	Symmetry sym(i,j,0,0);
+	JournalRecordPair pa(journal);
+	pa << "Recovering symmetry " << sym << endrec;
+
+	_Ttensor* G_yiuj = faaDiBrunoG<t>(sym);
+	G<t>().insert(G_yiuj);
+
+	_Ttensor* g_yiuj = faaDiBrunoZ<t>(sym);
+	g_yiuj->mult(-1.0);
+	matA.multInv(*g_yiuj);
+	insertDerivative<t>(g_yiuj);
+
+	gs<t>().multAndAdd(*(gss<t>().get(Symmetry(1,0,0,0))), *G_yiuj);
+}
+
+@ Here we solve
+$\left[F_{y^i\sigma^j}\right]+\left[D_{ij}\right]+\left[E_{ij}\right]=0$
+to obtain $g_{y^i\sigma^j}$. We calculate conditional
+$G_{y^i\sigma^j}$ (missing dimensions $1$ and $i+j$), calculate
+conditional $F_{y^i\sigma^j}$. Before we can calculate $D_{ij}$ and
+$E_{ij}$, we have to calculate $G_{y^iu'^m\sigma^{j-m}}$ for
+$m=1,\ldots,j$. Then we add the $D_{ij}$ and $E_{ij}$ to obtain the
+right hand side. Then we solve the sylvester to obtain
+$g_{y^i\sigma^j}$. Then we update $G_{y^i\sigma^j}$ for $l=1$ and
+$l=i+j$.
+
+{\bf Requires:} everything at order $\leq i+j-1$, $g_{y^{i+j}}$,
+$G_{y^iu'^j}$ and $g_{y^iu^j}$ through $D_{ij}$,
+$g_{y^iu^m\sigma^{j-m}}$ for
+$m=1,\ldots,j-1$ through $E_{ij}$.
+
+{\bf Provides:} $g_{y^i\sigma^j}$ and $G_{y^i\sigma^j}$, and finally
+$G_{y^iu'^m\sigma^{j-m}}$ for $m=1,\ldots,j$. The latter is calculated
+by |fillG| before the actual calculation.
+
+@<|KOrder::recover_ys| templated code@>=
+template <int t>
+void recover_ys(int i, int j)
+{
+	Symmetry sym(i,0,0,j);
+	JournalRecordPair pa(journal);
+	pa << "Recovering symmetry " << sym << endrec;
+
+	fillG<t>(i, 0, j);
+
+	if (is_even(j)) {
+		_Ttensor* G_yisj = faaDiBrunoG<t>(sym);
+		G<t>().insert(G_yisj);
+
+		_Ttensor* g_yisj = faaDiBrunoZ<t>(sym);
+
+		{
+			_Ttensor* D_ij = calcD_ik<t>(i, j);
+			g_yisj->add(1.0, *D_ij);
+			delete D_ij;
+		}
+
+		if (j >= 3) {
+			_Ttensor* E_ij = calcE_ik<t>(i, j);
+			g_yisj->add(1.0, *E_ij);
+			delete E_ij;
+		}
+
+		g_yisj->mult(-1.0);
+
+		sylvesterSolve<t>(*g_yisj);
+
+		insertDerivative<t>(g_yisj);
+
+		Gstack<t>().multAndAdd(1, gss<t>(), *G_yisj);
+		Gstack<t>().multAndAdd(i+j, gss<t>(), *G_yisj);
+	}
+}
+
+@ Here we solve
+$\left[F_{y^iu^j\sigma^k}\right]+\left[D_{ijk}\right]+\left[E_{ijk}\right]=0$
+to obtain $g_{y^iu^j\sigma^k}$. First we calculate conditional
+$G_{y^iu^j\sigma^k}$ (missing only for dimension $l=1$), then we
+evaluate conditional $F_{y^iu^j\sigma^k}$. Before we can calculate
+$D_{ijk}$, and $E_{ijk}$, we need to insert
+$G_{y^iu^ju'^m\sigma^{k-m}}$ for $m=1,\ldots, k$. This is done by
+|fillG|. Then we have right hand side and we multiply by $A^{-1}$ to
+obtain $g_{y^iu^j\sigma^k}$. Finally we have to update
+$G_{y^iu^j\sigma^k}$ by |multAndAdd| for dimension $l=1$.
+
+{\bf Requires:} everything at order $\leq i+j+k$, $g_{y^{i+j}\sigma^k}$
+through $G_{y^iu^j\sigma^k}$ involved in right hand side, then
+$g_{y^iu^{j+k}}$ through $D_{ijk}$, and $g_{y^iu^{j+m}\sigma^{k-m}}$
+for $m=1,\ldots,k-1$ through $E_{ijk}$.
+
+{\bf Provides:} $g_{y^iu^j\sigma^k}$, $G_{y^iu^j\sigma^k}$, and
+$G_{y^iu^ju'^m\sigma^{k-m}}$ for $m=1,\ldots, k$
+
+@<|KOrder::recover_yus| templated code@>=
+template <int t>
+void recover_yus(int i, int j, int k)
+{
+	Symmetry sym(i,j,0,k);
+	JournalRecordPair pa(journal);
+	pa << "Recovering symmetry " << sym << endrec;
+
+	fillG<t>(i, j, k);
+
+	if (is_even(k)) {
+		_Ttensor* G_yiujsk = faaDiBrunoG<t>(sym);
+		G<t>().insert(G_yiujsk);
+
+		_Ttensor* g_yiujsk = faaDiBrunoZ<t>(sym);
+
+		{
+			_Ttensor* D_ijk = calcD_ijk<t>(i,j,k);
+			g_yiujsk->add(1.0, *D_ijk);
+			delete D_ijk;
+		}
+
+		if (k >= 3) {
+			_Ttensor* E_ijk = calcE_ijk<t>(i,j,k);
+			g_yiujsk->add(1.0, *E_ijk);
+			delete E_ijk;
+		}
+
+		g_yiujsk->mult(-1.0);
+
+		matA.multInv(*g_yiujsk);
+		insertDerivative<t>(g_yiujsk);
+
+		Gstack<t>().multAndAdd(1, gss<t>(), *G_yiujsk);
+	}
+}
+
+@ Here we solve
+$\left[F_{\sigma^i}\right]+\left[D_i\right]+\left[E_i\right]=0$ to
+recover $g_{\sigma^i}$. First we calculate conditional $G_{\sigma^i}$
+(missing dimension $l=1$ and $l=i$), then we calculate conditional
+$F_{\sigma^i}$. Before we can calculate $D_i$ and $E_i$, we have to
+obtain $G_{u'm\sigma^{i-m}}$ for $m=1,\ldots,i$. Than
+adding $D_i$ and $E_i$ we have the right hand side. We solve by
+$S^{-1}$ multiplication and update $G_{\sigma^i}$ by calling
+|multAndAdd| for dimension $l=1$.
+
+Recall that the solved equation here is:
+$$
+\left[f_y\right]\left[g_{\sigma^k}\right]+
+\left[f_{y^{**}_+}\right]\left[g^{**}_{y^*}\right]\left[g^*_{\sigma^k}\right]+
+\left[f_{y^{**}_+}\right]\left[g^{**}_{\sigma^k}\right]=\hbox{RHS}
+$$
+This is a sort of deficient sylvester equation (sylvester equation for
+dimension=0), we solve it by $S^{-1}$. See |@<|MatrixS| constructor
+code@>| to see how $S$ looks like.
+
+{\bf Requires:} everything at order $\leq i-1$, $g_{y^i}$ and
+$g_{y^{i-j}\sigma^j}$, then $g_{u^k}$ through $F_{u'^k}$, and
+$g_{y^mu^j\sigma^k}$ for $j=1,\ldots,i-1$ and $m+j+k=i$ through
+$F_{u'j\sigma^{i-j}}$.
+
+{\bf Provides:} $g_{\sigma^i}$, $G_{\sigma^i}$, and
+$G_{u'^m\sigma^{i-m}}$ for $m=1,\ldots,i$
+
+@<|KOrder::recover_s| templated code@>=
+template <int t>
+void recover_s(int i)
+{
+	Symmetry sym(0,0,0,i);
+	JournalRecordPair pa(journal);
+	pa << "Recovering symmetry " << sym << endrec;
+
+	fillG<t>(0, 0, i);
+
+	if (is_even(i)) {
+		_Ttensor* G_si = faaDiBrunoG<t>(sym);
+		G<t>().insert(G_si);
+
+		_Ttensor* g_si = faaDiBrunoZ<t>(sym);
+
+		{
+			_Ttensor* D_i = calcD_k<t>(i);
+			g_si->add(1.0, *D_i);
+			delete D_i;
+		}
+
+		if (i >= 3) {
+			_Ttensor* E_i = calcE_k<t>(i);
+			g_si->add(1.0, *E_i);
+			delete E_i;
+		}
+
+		g_si->mult(-1.0);
+
+
+		matS.multInv(*g_si);
+		insertDerivative<t>(g_si);
+
+		Gstack<t>().multAndAdd(1, gss<t>(), *G_si);
+		Gstack<t>().multAndAdd(i, gss<t>(), *G_si);
+	}
+}
+
+@ Here we calculate and insert $G_{y^iu^ju'^m\sigma^{k-m}}$ for
+$m=1,\ldots, k$. The derivatives are inserted only for $k-m$ being
+even.
+
+@<|KOrder::fillG| templated code@>=
+template<int t>
+void fillG(int i, int j, int k)
+{
+	for (int m = 1; m <= k; m++) {
+		if (is_even(k-m)) {
+			_Ttensor* G_yiujupms = faaDiBrunoG<t>(Symmetry(i,j,m,k-m));
+			G<t>().insert(G_yiujupms);
+		}
+	}
+}
+
+
+@ Here we calculate
+$$\left[D_{ijk}\right]_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j}=
+\left[F_{y^iu^ju'^k}\right]
+_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j\gamma_1\ldots\gamma_k}
+\left[\Sigma\right]^{\gamma_1\ldots\gamma_k}$$
+So it is non zero only for even $k$.
+
+@<|KOrder::calcD_ijk| templated code@>=
+template <int t>
+_Ttensor* calcD_ijk(int i, int j, int k) const
+{
+	_Ttensor* res =	new _Ttensor(ny, TensorDimens(Symmetry(i,j,0,0), nvs));
+	res->zeros();
+	if (is_even(k)) {
+		_Ttensor* tmp = faaDiBrunoZ<t>(Symmetry(i,j,k,0));
+		tmp->contractAndAdd(2, *res, *(m<t>().get(Symmetry(k))));
+		delete tmp;
+	}
+	return res;
+}		   
+
+
+@ Here we calculate
+$$\left[E_{ijk}\right]_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j}=
+\sum_{m=1}^{k-1}\left(\matrix{k\cr m}\right)\left[F_{y^iu^ju'^m\sigma^{k-m}}\right]
+_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j\gamma_1\ldots\gamma_m}
+\left[\Sigma\right]^{\gamma_1\ldots\gamma_m}$$
+The sum can sum only for even $m$.
+
+@<|KOrder::calcE_ijk| templated code@>=
+template <int t>
+_Ttensor* calcE_ijk(int i, int j, int k) const
+{
+	_Ttensor* res = new _Ttensor(ny, TensorDimens(Symmetry(i,j,0,0), nvs));
+	res->zeros();
+	for (int n = 2; n <= k-1; n+=2) {
+		_Ttensor* tmp = faaDiBrunoZ<t>(Symmetry(i,j,n,k-n));
+		tmp->mult((double)(Tensor::noverk(k,n)));
+		tmp->contractAndAdd(2, *res, *(m<t>().get(Symmetry(n))));
+		delete tmp;
+	}
+	return res;
+}
+
+@ 
+@<|KOrder::calcD_ik| templated code@>=
+template <int t>
+_Ttensor* calcD_ik(int i, int k) const
+{
+	return calcD_ijk<t>(i, 0, k);
+}
+
+@ 
+@<|KOrder::calcD_k| templated code@>=
+template <int t>
+_Ttensor* calcD_k(int k) const
+{
+	return calcD_ijk<t>(0, 0, k);
+}
+
+@ 
+@<|KOrder::calcE_ik| templated code@>=
+template <int t>
+_Ttensor* calcE_ik(int i, int k) const
+{
+	return calcE_ijk<t>(i, 0, k);
+}
+
+@ 
+@<|KOrder::calcE_k| templated code@>=
+template <int t>
+_Ttensor* calcE_k(int k) const
+{
+	return calcE_ijk<t>(0, 0, k);
+}
+
+@ Here is the core routine. It calls methods recovering derivatives in
+the right order. Recall, that the code, namely Faa Di Bruno's formula,
+is implemented as to be run conditionally on the current contents of
+containers. So, if some call of Faa Di Bruno evaluates derivatives,
+and some derivatives are not present in the container, then it is
+considered to be zero. So, we have to be very careful to put
+everything in the right order. The order here can be derived from
+dependencies, or it is in the paper.
+
+The method recovers all the derivatives of the given |order|.
+
+The precondition of the method is that all tensors of order |order-1|,
+which are not zero, exist (including $G$). The postcondition of of the
+method is derivatives of $g$ and $G$ of order |order| are calculated
+and stored in the containers. Responsibility of precondition lays upon
+the constructor (for |order==2|), or upon the previous call of
+|performStep|.
+
+From the code, it is clear, that all $g$ are calculated. If one goes
+through all the recovering methods, he should find out that also all
+$G$ are provided.
+
+@<|KOrder::performStep| templated code@>=
+template <int t>
+void performStep(int order)
+{
+	KORD_RAISE_IF(order-1 != g<t>().getMaxDim(),
+				  "Wrong order for KOrder::performStep");
+	JournalRecordPair pa(journal);
+	pa << "Performing step for order = " << order << endrec;
+
+	recover_y<t>(order);
+
+	for (int i = 0; i < order; i++) {
+		recover_yu<t>(i, order-i);
+	}
+
+	for (int j = 1; j < order; j++) {
+		for (int i = j-1; i >= 1; i--) {
+			recover_yus<t>(order-j,i,j-i);
+		}
+   		recover_ys<t>(order-j, j);
+	}
+
+	for (int i = order-1; i >= 1; i--) {
+		recover_yus<t>(0, i, order-i);
+	}
+	recover_s<t>(order);
+}
+
+@ Here we check for residuals of all the solved equations at the given
+order. The method returns the largest residual size. Each check simply
+evaluates the equation.
+
+@<|KOrder::check| templated code@>=
+template <int t>
+double check(int dim) const
+{
+	KORD_RAISE_IF(dim > g<t>().getMaxDim(),
+				  "Wrong dimension for KOrder::check");
+	JournalRecordPair pa(journal);
+	pa << "Checking residuals for order = " << dim << endrec;
+
+	double maxerror = 0.0;
+
+	@<check for $F_{y^iu^j}=0$@>;
+	@<check for $F_{y^iu^ju'^k}+D_{ijk}+E_{ijk}=0$@>;@q'@>
+	@<check for $F_{\sigma^i}+D_i+E_i=0$@>;
+
+	return maxerror;
+}
+
+
+@ 
+@<check for $F_{y^iu^j}=0$@>=
+	for (int i = 0;	i <= dim; i++) {
+		Symmetry sym(dim-i, i, 0,  0);
+		_Ttensor* r = faaDiBrunoZ<t>(sym);
+		double err = r->getData().getMax();
+		JournalRecord(journal) << "\terror for symmetry " << sym << "\tis " << err << endrec;
+		if (err > maxerror)
+			maxerror = err;
+		delete r;
+	}
+
+@ 
+@<check for $F_{y^iu^ju'^k}+D_{ijk}+E_{ijk}=0$@>=
+	SymmetrySet ss(dim, 3);
+	for (symiterator si(ss); !si.isEnd(); ++si) {
+		int i = (*si)[0];
+		int j = (*si)[1];
+		int k = (*si)[2];
+		if (i+j > 0 && k > 0) {
+			Symmetry sym(i, j, 0, k);
+			_Ttensor* r = faaDiBrunoZ<t>(sym);
+			_Ttensor* D_ijk = calcD_ijk<t>(i,j,k);
+			r->add(1.0, *D_ijk);
+			delete D_ijk;
+			_Ttensor* E_ijk = calcE_ijk<t>(i,j,k);
+			r->add(1.0, *E_ijk);
+			delete E_ijk;
+			double err = r->getData().getMax();
+			JournalRecord(journal) << "\terror for symmetry " << sym << "\tis " << err << endrec;
+			delete r;
+		}
+	}
+
+
+@ 
+@<check for $F_{\sigma^i}+D_i+E_i=0$@>=
+	_Ttensor* r = faaDiBrunoZ<t>(Symmetry(0,0,0,dim));
+	_Ttensor* D_k = calcD_k<t>(dim);
+	r->add(1.0, *D_k);
+	delete D_k;
+	_Ttensor* E_k = calcE_k<t>(dim);
+	r->add(1.0, *E_k);
+	delete E_k;
+	double err = r->getData().getMax();
+	Symmetry sym(0,0,0,dim);
+	JournalRecord(journal) << "\terror for symmetry " << sym << "\tis " << err << endrec;
+	if (err > maxerror)
+		maxerror = err;
+	delete r;
+
+@ 
+@<|KOrder::calcStochShift| templated code@>=
+template <int t>
+Vector* calcStochShift(int order, double sigma) const
+{
+	Vector* res = new Vector(ny);
+	res->zeros();
+	int jfac = 1;
+	for (int j = 1; j <= order; j++, jfac *= j)
+		if (is_even(j)) {
+			_Ttensor* ten = calcD_k<t>(j);
+			res->add(std::pow(sigma, j)/jfac, ten->getData());
+			delete ten;
+		}
+	return res;
+}
+
+
+@ These are containers. The names are not important because they do
+not appear anywhere else since we access them by template functions.
+
+@<|KOrder| container members@>=
+	UGSContainer _ug;
+	FGSContainer _fg;
+	UGSContainer _ugs;
+	FGSContainer _fgs;
+	UGSContainer _ugss;
+	FGSContainer _fgss;
+	UGSContainer _uG;
+	FGSContainer _fG;
+	UnfoldedZContainer _uZstack;
+	FoldedZContainer _fZstack;
+	UnfoldedGContainer _uGstack;
+	FoldedGContainer _fGstack;
+	UNormalMoments _um;
+	FNormalMoments _fm;
+	const TensorContainer<FSSparseTensor>& f;
+
+@ These are the declarations of the template functions accessing the
+containers.
+
+@<|KOrder| member access method declarations@>=
+	template<int t> _Tg& g();
+	template<int t> const _Tg& g() const;
+	template<int t> _Tgs& gs();
+	template<int t> const _Tgs& gs() const;
+	template<int t> _Tgss& gss();
+	template<int t> const _Tgss& gss() const;
+	template<int t> _TG& G();
+	template<int t> const _TG& G() const;
+	template<int t> _TZstack& Zstack();
+	template<int t> const _TZstack& Zstack() const;
+	template<int t> _TGstack& Gstack();
+	template<int t> const _TGstack& Gstack() const;
+	template<int t> _Tm& m();
+	template<int t> const _Tm& m() const;
+
+
+@ End of {\tt korder.h} file.
diff --git a/dynare++/kord/korder_stoch.cweb b/dynare++/kord/korder_stoch.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..096ff54e50c2a2da903008bb736901f5be9c7200
--- /dev/null
+++ b/dynare++/kord/korder_stoch.cweb
@@ -0,0 +1,127 @@
+@q $Id: korder_stoch.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ Start of {\tt korder\_stoch.cpp} file.
+@c
+#include "korder_stoch.h"
+
+@<|MatrixAA| constructor code@>;
+@<|KOrderStoch| folded constructor code@>;
+@<|KOrderStoch| unfolded constructor code@>;
+@<|KOrderStoch| convenience method specializations@>;
+
+@ Same as |@<|MatrixA| constructor code@>|, but the submatrix |gss_ys| is passed directly.
+@<|MatrixAA| constructor code@>=
+MatrixAA::MatrixAA(const FSSparseTensor& f, const IntSequence& ss,
+				   const TwoDMatrix& gss_ys, const PartitionY& ypart)
+	: PLUMatrix(ypart.ny())
+{
+	zeros();
+
+	IntSequence c(1); c[0] = 1;
+	FGSTensor f_y(f, ss, c, TensorDimens(ss, c));
+	add(1.0, f_y);
+
+	c[0] = 0;
+	FGSTensor f_yss(f, ss, c, TensorDimens(ss, c));
+	TwoDMatrix sub(*this, ypart.nstat, ypart.nys());
+	sub.multAndAdd(f_yss, gss_ys);
+
+	calcPLU();
+}
+	
+
+@ 
+@<|KOrderStoch| folded constructor code@>=
+KOrderStoch::KOrderStoch(const PartitionY& yp, int nu,
+						 const TensorContainer<FSSparseTensor>& fcont,
+						 const FGSContainer& hh, Journal& jr)
+	: nvs(4), ypart(yp), journal(jr),@/
+	  _ug(4), _fg(4), _ugs(4), _fgs(4), _uG(4), _fG(4),@/
+	  _uh(NULL), _fh(&hh),@/
+	  _uZstack(&_uG, ypart.nyss(), &_ug, ypart.ny(), ypart.nys(), nu),@/
+	  _fZstack(&_fG, ypart.nyss(), &_fg, ypart.ny(), ypart.nys(), nu),@/
+	  _uGstack(&_ugs, ypart.nys(), nu),@/
+	  _fGstack(&_fgs, ypart.nys(), nu),@/
+	  f(fcont),@/
+	  matA(*(fcont.get(Symmetry(1))), _uZstack.getStackSizes(), *(hh.get(Symmetry(1,0,0,0))),
+		   ypart)
+{
+	nvs[0] = ypart.nys();
+	nvs[1] = nu;
+	nvs[2] = nu;
+	nvs[3] = 1;
+}
+
+@ 
+@<|KOrderStoch| unfolded constructor code@>=
+KOrderStoch::KOrderStoch(const PartitionY& yp, int nu,
+						 const TensorContainer<FSSparseTensor>& fcont,
+						 const UGSContainer& hh, Journal& jr)
+	: nvs(4), ypart(yp), journal(jr),@/
+	  _ug(4), _fg(4), _ugs(4), _fgs(4), _uG(4), _fG(4),@/
+	  _uh(&hh), _fh(NULL),@/
+	  _uZstack(&_uG, ypart.nyss(), &_ug, ypart.ny(), ypart.nys(), nu),@/
+	  _fZstack(&_fG, ypart.nyss(), &_fg, ypart.ny(), ypart.nys(), nu),@/
+	  _uGstack(&_ugs, ypart.nys(), nu),@/
+	  _fGstack(&_fgs, ypart.nys(), nu),@/
+	  f(fcont),@/
+	  matA(*(fcont.get(Symmetry(1))), _uZstack.getStackSizes(), *(hh.get(Symmetry(1,0,0,0))),
+		   ypart)
+{
+	nvs[0] = ypart.nys();
+	nvs[1] = nu;
+	nvs[2] = nu;
+	nvs[3] = 1;
+}
+
+
+@ 
+@<|KOrderStoch| convenience method specializations@>=
+	template<> ctraits<KOrder::unfold>::Tg& KOrderStoch::g<KOrder::unfold>()
+		{@+ return _ug;@+}
+	template<>@; const ctraits<KOrder::unfold>::Tg& KOrderStoch::g<KOrder::unfold>()@+const@;
+		{@+ return _ug;@+}
+	template<> ctraits<KOrder::fold>::Tg& KOrderStoch::g<KOrder::fold>()
+		{@+ return _fg;@+}
+	template<> const ctraits<KOrder::fold>::Tg& KOrderStoch::g<KOrder::fold>()@+const@;
+		{@+ return _fg;@+}
+	template<> ctraits<KOrder::unfold>::Tgs& KOrderStoch::gs<KOrder::unfold>()
+		{@+ return _ugs;@+}
+	template<> const ctraits<KOrder::unfold>::Tgs& KOrderStoch::gs<KOrder::unfold>()@+const@;
+		{@+ return _ugs;@+}
+	template<> ctraits<KOrder::fold>::Tgs& KOrderStoch::gs<KOrder::fold>()
+		{@+ return _fgs;@+}
+	template<> const ctraits<KOrder::fold>::Tgs& KOrderStoch::gs<KOrder::fold>()@+const@;
+		{@+ return _fgs;@+}
+	template<> const ctraits<KOrder::unfold>::Tgss& KOrderStoch::h<KOrder::unfold>()@+const@;
+		{@+ return *_uh;@+}
+	template<> const ctraits<KOrder::fold>::Tgss& KOrderStoch::h<KOrder::fold>()@+const@;
+		{@+ return *_fh;@+}
+	template<> ctraits<KOrder::unfold>::TG& KOrderStoch::G<KOrder::unfold>()
+		{@+ return _uG;@+}
+	template<> const ctraits<KOrder::unfold>::TG& KOrderStoch::G<KOrder::unfold>()@+const@;
+		{@+ return _uG;@+}
+	template<> ctraits<KOrder::fold>::TG& KOrderStoch::G<KOrder::fold>()
+		{@+ return _fG;@+}
+	template<> const ctraits<KOrder::fold>::TG& KOrderStoch::G<KOrder::fold>()@+const@;
+		{@+ return _fG;@+}
+	template<> ctraits<KOrder::unfold>::TZXstack& KOrderStoch::Zstack<KOrder::unfold>()
+		{@+ return _uZstack;@+}
+	template<> const ctraits<KOrder::unfold>::TZXstack& KOrderStoch::Zstack<KOrder::unfold>()@+const@;
+		{@+ return _uZstack;@+}
+	template<> ctraits<KOrder::fold>::TZXstack& KOrderStoch::Zstack<KOrder::fold>()
+		{@+ return _fZstack;@+}
+	template<> const ctraits<KOrder::fold>::TZXstack& KOrderStoch::Zstack<KOrder::fold>()@+const@;
+		{@+ return _fZstack;@+}
+	template<> ctraits<KOrder::unfold>::TGXstack& KOrderStoch::Gstack<KOrder::unfold>()
+		{@+ return _uGstack;@+}
+	template<> const ctraits<KOrder::unfold>::TGXstack& KOrderStoch::Gstack<KOrder::unfold>()@+const@;
+		{@+ return _uGstack;@+}
+	template<> ctraits<KOrder::fold>::TGXstack& KOrderStoch::Gstack<KOrder::fold>()
+		{@+ return _fGstack;@+}
+	template<> const ctraits<KOrder::fold>::TGXstack& KOrderStoch::Gstack<KOrder::fold>()@+const@;
+		{@+ return _fGstack;@+}
+
+
+@ End of {\tt korder\_stoch.cpp} file.
diff --git a/dynare++/kord/korder_stoch.hweb b/dynare++/kord/korder_stoch.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..267ab7a6a698e86dd247f1cc0cf555b72d2e1389
--- /dev/null
+++ b/dynare++/kord/korder_stoch.hweb
@@ -0,0 +1,538 @@
+@q $Id: korder_stoch.hweb 418 2005-08-16 15:10:06Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Higher order at stochastic steady. Start of {\tt korder\_stoch.h} file.
+
+This file defines a number of classes of which |KOrderStoch| is the
+main purpose. Basically, |KOrderStoch| calculates first and higher
+order Taylor expansion of a policy rule at $\sigma>0$ with explicit
+forward $g^{**}$. More formally, we have to solve a policy rule $g$
+from
+$$E_t\left[f(g^{**}(g^*(y^*_t,u_t,\sigma),u_{t+1},\sigma),g(y^*,u_t,\sigma),y^*,u_t)\right]$$
+As an introduction in {\tt approximation.hweb} argues, $g^{**}$ at
+tine $t+1$ must be given from outside. Let the explicit
+$E_t(g^{**}(y^*,u_{t+1},\sigma)$ be equal to $h(y^*,\sigma)$. Then we
+have to solve
+$$f(h(g^*(y^*,u,\sigma),\sigma),g(y,u,\sigma),y,u),$$
+which is much easier than fully implicit system for $\sigma=0$.
+
+Besides the class |KOrderStoch|, we declare here also classes for the
+new containers corresponding to
+$f(h(g^*(y^*,u,\sigma),\sigma),g(y,u,\sigma),y,u)$. Further, we
+declare |IntegDerivs| and |StochForwardDerivs| classes which basically
+calculate $h$ as an extrapolation based on an approximation to $g$ at
+lower $\sigma$.
+
+@s IntegDerivs int
+@s StochForwardDerivs int
+@s GContainer int
+@s ZContainer int
+@s GXContainer int
+@s ZXContainer int
+@s MatrixAA int
+@s KOrderStoch int
+@s StackContainer int
+@s _Ttype int
+@s _Ctype int
+@s UnfoldedStackContainer int
+@s FoldedStackContainer int
+
+@c
+#include "korder.h"
+#include "faa_di_bruno.h"
+#include "journal.h"
+
+
+@<|IntegDerivs| class declaration@>;
+@<|StochForwardDerivs| class declaration@>;
+@<|GXContainer| class declaration@>;
+@<|ZXContainer| class declaration@>;
+@<|UnfoldedGXContainer| class declaration@>;
+@<|FoldedGXContainer| class declaration@>;
+@<|UnfoldedZXContainer| class declaration@>;
+@<|FoldedZXContainer| class declaration@>;
+@<|MatrixAA| class declaration@>;
+@<|KOrderStoch| class declaration@>;
+
+@ This class is a container, which has a specialized constructor
+integrating the policy rule at given $\sigma$.
+
+@<|IntegDerivs| class declaration@>=
+template <int t>
+class IntegDerivs : public ctraits<t>::Tgss {
+public:@;
+	@<|IntegDerivs| constructor code@>;
+};
+
+@ This constuctor integrates a rule (namely its $g^{**}$ part) with
+respect to $u=\tilde\sigma\eta$, and stores to the object the
+derivatives of this integral $h$ at $(y^*,u,\sigma)=(\tilde
+y^*,0,\tilde\sigma)$. The original container of $g^{**}$, the moments of
+the stochastic shocks |mom| and the $\tilde\sigma$ are input.
+
+The code follows the following derivation
+\def\lims{\vbox{\baselineskip=0pt\lineskip=1pt
+    \setbox0=\hbox{$\scriptstyle n+k=p$}\hbox to\wd0{\hss$\scriptstyle m=0$\hss}\box0}}
+$$
+\eqalign{h(y,\sigma)&=E_t\left[g(y,u',\sigma)\right]=\cr
+&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+j+k=d}\pmatrix{d\cr i,j,k}\left[g_{y^iu^j\sigma^k}\right]
+  (y^*-\tilde y^*)^i\sigma^j\Sigma^j(\sigma-\tilde\sigma)^k\cr
+&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+m+n+k=d}\pmatrix{d\cr i,m+n,k}
+  \left[g_{y^iu^{m+n}\sigma^k}\right]
+  \hat y^{*i}\Sigma^{m+n}\pmatrix{m+n\cr m,n}{\tilde\sigma}^m\hat\sigma^{k+n}\cr
+&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+m+n+k=d}\pmatrix{d\cr i,m,n,k}
+  \left[g_{y^iu^{m+n}\sigma^k}\right]
+  \Sigma^{m+n}{\tilde\sigma}^m\hat y^{*i}\hat\sigma^{k+n}\cr
+&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+p=d}\sum_{\lims}\pmatrix{d\cr i,m,n,k}
+  \left[g_{y^iu^{m+n}\sigma^k}\right]
+  \Sigma^{m+n}{\tilde\sigma}^m\hat y^{*i}\hat\sigma^{k+n}\cr
+&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+p=d}\pmatrix{d\cr i,p}
+  \left[\sum_{\lims}\pmatrix{p\cr n,k}{1\over m!}
+  \left[g_{y^iu^{m+n}\sigma^k}\right]
+  \Sigma^{m+n}{\tilde\sigma}^m\right]\hat y^{*i}\hat\sigma^{k+n},
+}
+$$
+where $\pmatrix{a\cr b_1,\ldots, b_n}$ is a generalized combination
+number, $p=k+n$, $\hat\sigma=\sigma-\tilde\sigma$, $\hat
+y^*=y^*-\tilde y$, and we dropped writing the multidimensional indexes
+in Einstein summation.
+
+This implies that
+$$h_{y^i\sigma^p}=\sum_{\lims}\pmatrix{p\cr n,k}{1\over m!}
+  \left[g_{y^iu^{m+n}\sigma^k}\right]
+  \Sigma^{m+n}{\tilde\sigma}^m$$
+and this is exactly what the code does.
+
+@<|IntegDerivs| constructor code@>=
+IntegDerivs(int r, const IntSequence& nvs, const _Tgss& g, const _Tm& mom,
+			double at_sigma)
+	: ctraits<t>::Tgss(4)
+{
+	int maxd = g.getMaxDim();
+	for (int d = 1; d <= maxd; d++) {
+		for (int i = 0; i <= d; i++) {
+			int p = d-i;
+			Symmetry sym(i,0,0,p);
+			_Ttensor* ten = new _Ttensor(r, TensorDimens(sym, nvs));
+			@<calculate derivative $h_{y^i\sigma^p}$@>;
+			insert(ten);
+		}
+	}
+}
+
+@ This code calculates
+$$h_{y^i\sigma^p}=\sum_{\lims}\pmatrix{p\cr n,k}{1\over m!}
+  \left[g_{y^iu^{m+n}\sigma^k}\right]
+  \Sigma^{m+n}{\tilde\sigma}^m$$
+and stores it in |ten|.
+
+@<calculate derivative $h_{y^i\sigma^p}$@>=
+	ten->zeros();
+	for (int n = 0; n <= p; n++) {
+		int k = p-n;
+		int povern = Tensor::noverk(p,n);
+		int mfac = 1;
+		for (int m = 0; i+m+n+k <= maxd; m++, mfac*=m) {
+			double mult = (pow(at_sigma,m)*povern)/mfac;
+			Symmetry sym_mn(i,m+n,0,k);
+			if (m+n == 0 && g.check(sym_mn))
+				ten->add(mult, *(g.get(sym_mn)));
+			if (m+n > 0 && KOrder::is_even(m+n) && g.check(sym_mn)) {
+				_Ttensor gtmp(*(g.get(sym_mn)));
+				gtmp.mult(mult);
+				gtmp.contractAndAdd(1, *ten, *(mom.get(Symmetry(m+n))));
+			}
+		}
+	}
+
+@ This class calculates an extrapolation of expectation of forward
+derivatives. It is a container, all calculations are done in a
+constructor.
+
+The class calculates derivatives of $E[g(y*,u,\sigma)]$ at $(\bar
+y^*,\bar\sigma)$. The derivatives are extrapolated based on
+derivatives at $(\tilde y^*,\tilde\sigma)$.
+
+@<|StochForwardDerivs| class declaration@>=
+template <int t>
+class StochForwardDerivs : public ctraits<t>::Tgss {
+public:@;
+	@<|StochForwardDerivs| constructor code@>;
+};
+
+@ This is the constructor which performs the integration and the
+extrapolation. Its parameters are: |g| is the container of derivatives
+at $(\tilde y,\tilde\sigma)$; |m| are the moments of stochastic
+shocks; |ydelta| is a difference of the steady states $\bar y-\tilde
+y$; |sdelta| is a difference between new sigma and old sigma
+$\bar\sigma-\tilde\sigma$, and |at_sigma| is $\tilde\sigma$. There is
+no need of inputing the $\tilde y$.
+
+We do the operation in four steps:
+\orderedlist
+\li Integrate $g^{**}$, the derivatives are at $(\tilde y,\tilde\sigma)$
+\li Form the (full symmetric) polynomial from the derivatives stacking
+$\left[\matrix{y^*\cr\sigma}\right]$
+\li Centralize this polynomial about $(\bar y,\bar\sigma)$
+\li Recover general symmetry tensors from the (full symmetric) polynomial
+\endorderedlist
+
+@<|StochForwardDerivs| constructor code@>=
+StochForwardDerivs(const PartitionY& ypart, int nu,
+				   const _Tgss& g, const _Tm& m,
+				   const Vector& ydelta, double sdelta,
+				   double at_sigma)
+	: ctraits<t>::Tgss(4)
+{
+	int maxd = g.getMaxDim();
+	int r = ypart.nyss();
+
+	@<make |g_int| be integral of $g^{**}$ at $(\tilde y,\tilde\sigma)$@>;
+	@<make |g_int_sym| be full symmetric polynomial from |g_int|@>;
+	@<make |g_int_cent| the centralized polynomial about $(\bar y,\bar\sigma)$@>;
+	@<pull out general symmetry tensors from |g_int_cent|@>;
+}
+
+@ This simply constructs |IntegDerivs| class. Note that the |nvs| of
+the tensors has zero dimensions for shocks, this is because we need to
+make easily stacks of the form $\left[\matrix{y^*\cr\sigma}\right]$ in
+the next step.
+
+@<make |g_int| be integral of $g^{**}$ at $(\tilde y,\tilde\sigma)$@>=
+	IntSequence nvs(4);
+	nvs[0] = ypart.nys(); nvs[1] = 0; nvs[2] = 0; nvs[3] = 1;
+	IntegDerivs<t> g_int(r, nvs, g, m, at_sigma);
+
+@ Here we just form a polynomial whose unique variable corresponds to
+$\left[\matrix{y^*\cr\sigma}\right]$ stack.
+
+@<make |g_int_sym| be full symmetric polynomial from |g_int|@>=
+	_Tpol g_int_sym(r, ypart.nys()+1);
+	for (int d = 1; d <= maxd; d++) {
+		_Ttensym* ten = new _Ttensym(r, ypart.nys()+1, d);
+		ten->zeros();
+		for (int i = 0; i <= d; i++) {
+			int k = d-i;
+			if (g_int.check(Symmetry(i,0,0,k)))
+				ten->addSubTensor(*(g_int.get(Symmetry(i,0,0,k))));
+		}
+		g_int_sym.insert(ten);
+	}
+
+@ Here we centralize the polynomial to $(\bar y,\bar\sigma)$ knowing
+that the polynomial was centralized about $(\tilde
+y,\tilde\sigma)$. This is done by derivating and evaluating the
+derivated polynomial at $(\bar y-\tilde
+y,\bar\sigma-\tilde\sigma)$. The stack of this vector is |delta| in
+the code.
+
+@<make |g_int_cent| the centralized polynomial about $(\bar y,\bar\sigma)$@>=
+	Vector delta(ypart.nys()+1);
+	Vector dy(delta, 0, ypart.nys());
+	ConstVector dy_in(ydelta, ypart.nstat, ypart.nys());
+	dy = dy_in;
+	delta[ypart.nys()] = sdelta;
+	_Tpol g_int_cent(r, ypart.nys()+1);
+	for (int d = 1; d <= maxd; d++) {
+		g_int_sym.derivative(d-1);
+		_Ttensym* der = g_int_sym.evalPartially(d, delta);
+		g_int_cent.insert(der);
+	}
+
+@ Here we only recover the general symmetry derivatives from the full
+symmetric polynomial. Note that the derivative get the true |nvs|.
+
+@<pull out general symmetry tensors from |g_int_cent|@>=
+	IntSequence ss(4);
+	ss[0]=ypart.nys(); ss[1]=0; ss[2]=0; ss[3]=1;
+	IntSequence pp(4);
+	pp[0]=0; pp[1]=1; pp[2]=2; pp[3]=3;
+	IntSequence true_nvs(nvs);
+	true_nvs[1]=nu; true_nvs[2]=nu;
+	for (int d = 1; d <= maxd; d++) {
+		if (g_int_cent.check(Symmetry(d))) {
+			for (int i = 0;	 i <= d; i++) {
+				Symmetry sym(i, 0, 0, d-i);
+				IntSequence coor(sym, pp);
+				_Ttensor* ten = new _Ttensor(*(g_int_cent.get(Symmetry(d))), ss, coor,
+											 TensorDimens(sym, true_nvs));
+				insert(ten);
+			}
+		}
+	}
+
+
+@ This container corresponds to $h(g^*(y,u,\sigma),\sigma)$. Note that
+in our application, the $\sigma$ as a second argument to $h$ will be
+its fourth variable in symmetry, so we have to do four member stack
+having the second and third stack dummy.
+
+@<|GXContainer| class declaration@>=
+template <class _Ttype>
+class GXContainer : public GContainer<_Ttype> {
+public:@;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	typedef typename StackContainer<_Ttype>::_Ctype _Ctype;
+	typedef typename StackContainer<_Ttype>::itype itype;
+	GXContainer(const _Ctype* gs, int ngs, int nu)
+		: GContainer<_Ttype>(gs, ngs, nu)@+ {}
+	@<|GXContainer::getType| code@>;
+};
+
+@ This routine corresponds to this stack:
+$$\left[\matrix{g^*(y,u,\sigma)\cr dummy\cr dummy\cr\sigma}\right]$$
+
+@<|GXContainer::getType| code@>=
+itype getType(int i, const Symmetry& s) const
+{
+	if (i == 0)
+		if (s[2] > 0)
+			return _Stype::zero;
+		else
+			return _Stype::matrix;
+	if (i == 1)
+		return _Stype::zero;
+	if (i == 2)
+		return _Stype::zero;
+	if (i == 3)
+		if (s == Symmetry(0,0,0,1))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+
+	KORD_RAISE("Wrong stack index in GXContainer::getType");
+}
+
+
+@ This container corresponds to $f(H(y,u,\sigma),g(y,u,sigma),y,u)$,
+where the $H$ has the size (number of rows) as $g^{**}$. Since it is
+very simmilar to |ZContainer|, we inherit form it and override only
+|getType| method.
+
+@<|ZXContainer| class declaration@>=
+template <class _Ttype>
+class ZXContainer : public ZContainer<_Ttype> {
+public:@;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	typedef typename StackContainer<_Ttype>::_Ctype _Ctype;
+	typedef typename StackContainer<_Ttype>::itype itype;
+	ZXContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng, int ny, int nu)
+		: ZContainer<_Ttype>(gss, ngss, g, ng, ny, nu) @+{}
+	@<|ZXContainer::getType| code@>;
+};
+
+@ This |getType| method corresponds to this stack:
+$$\left[\matrix{H(y,u,\sigma)\cr g(y,u,\sigma)\cr y\cr u}\right]$$
+
+@<|ZXContainer::getType| code@>=
+itype getType(int i, const Symmetry& s) const
+{
+	if (i == 0)
+		if (s[2] > 0)
+			return _Stype::zero;
+		else
+			return _Stype::matrix;
+	if (i == 1)
+		if (s[2] > 0)
+			return _Stype::zero;
+		else
+			return _Stype::matrix;
+	if (i == 2)
+		if (s == Symmetry(1,0,0,0))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+	if (i == 3)
+		if (s == Symmetry(0,1,0,0))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+
+	KORD_RAISE("Wrong stack index in ZXContainer::getType");
+}
+
+@ 
+@<|UnfoldedGXContainer| class declaration@>=
+class UnfoldedGXContainer : public GXContainer<UGSTensor>, public UnfoldedStackContainer {
+public:@;
+	typedef TensorContainer<UGSTensor> _Ctype;
+	UnfoldedGXContainer(const _Ctype* gs, int ngs, int nu)
+		: GXContainer<UGSTensor>(gs, ngs, nu)@+ {}
+};
+
+@ 
+@<|FoldedGXContainer| class declaration@>=
+class FoldedGXContainer : public GXContainer<FGSTensor>, public FoldedStackContainer {
+public:@;
+	typedef TensorContainer<FGSTensor> _Ctype;
+	FoldedGXContainer(const _Ctype* gs, int ngs, int nu)
+		: GXContainer<FGSTensor>(gs, ngs, nu)@+ {}
+};
+
+@ 
+@<|UnfoldedZXContainer| class declaration@>=
+class UnfoldedZXContainer : public ZXContainer<UGSTensor>, public UnfoldedStackContainer {
+public:@;
+	typedef TensorContainer<UGSTensor> _Ctype;
+	UnfoldedZXContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng, int ny, int nu)
+		: ZXContainer<UGSTensor>(gss, ngss, g, ng, ny, nu)@+ {}
+};
+
+@ 
+@<|FoldedZXContainer| class declaration@>=
+class FoldedZXContainer : public ZXContainer<FGSTensor>, public FoldedStackContainer {
+public:@;
+	typedef TensorContainer<FGSTensor> _Ctype;
+	FoldedZXContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng, int ny, int nu)
+		: ZXContainer<FGSTensor>(gss, ngss, g, ng, ny, nu)@+ {}
+};
+
+@ This matrix corresponds to
+$$\left[f_{y}\right]+ \left[0
+\left[f_{y^{**}_+}\right]\cdot\left[h^{**}_{y^*}\right] 0\right]$$
+This is very the same as |MatrixA|, the only difference that the
+|MatrixA| is constructed from whole $h_{y^*}$, not only from
+$h^{**}_{y^*}$, hence the new abstraction.
+
+@<|MatrixAA| class declaration@>=
+class MatrixAA : public PLUMatrix {
+public:@;
+	MatrixAA(const FSSparseTensor& f, const IntSequence& ss,
+			 const TwoDMatrix& gyss, const PartitionY& ypart);
+};
+
+
+@ This class calculates derivatives of $g$ given implicitly by
+$f(h(g^*(y,u,\sigma),\sigma),g(y,u,\sigma),y,u)$, where $h(y,\sigma)$
+is given from outside.
+
+Structurally, the class is very similar to |KOrder|, but calculations
+are much easier. The two constructors construct an object from sparse
+derivatives of $f$, and derivatives of $h$. The caller must ensure
+that the both derivatives are done at the same point.
+
+The calculation for order $k$ (including $k=1$) is done by a call
+|performStep(k)|. The derivatives can be retrived by |getFoldDers()|
+or |getUnfoldDers()|.
+
+@<|KOrderStoch| class declaration@>=
+class KOrderStoch {
+protected:@;
+	IntSequence nvs;
+	PartitionY ypart;
+	Journal& journal;
+	UGSContainer _ug;
+	FGSContainer _fg;
+	UGSContainer _ugs;
+	FGSContainer _fgs;
+	UGSContainer _uG;
+	FGSContainer _fG;
+	const UGSContainer* _uh;
+	const FGSContainer* _fh;
+	UnfoldedZXContainer _uZstack;
+	FoldedZXContainer _fZstack;
+	UnfoldedGXContainer _uGstack;
+	FoldedGXContainer _fGstack;
+	const TensorContainer<FSSparseTensor>& f;
+	MatrixAA matA;
+public:@;
+	KOrderStoch(const PartitionY& ypart, int nu, const TensorContainer<FSSparseTensor>& fcont,
+				const FGSContainer& hh, Journal& jr);
+	KOrderStoch(const PartitionY& ypart, int nu, const TensorContainer<FSSparseTensor>& fcont,
+				const UGSContainer& hh, Journal& jr);
+	@<|KOrderStoch::performStep| templated code@>;
+	const FGSContainer& getFoldDers() const
+		{@+ return _fg;@+}
+	const UGSContainer& getUnfoldDers() const
+		{@+ return _ug;@+}	
+protected:@;
+	@<|KOrderStoch::faaDiBrunoZ| templated code@>;
+	@<|KOrderStoch::faaDiBrunoG| templated code@>;
+	@<|KOrderStoch| convenience access methods@>;
+};
+
+@ This calculates a derivative of $f(G(y,u,\sigma),g(y,u,\sigma),y,u)$
+of a given symmetry.
+
+@<|KOrderStoch::faaDiBrunoZ| templated code@>=
+template <int t>
+_Ttensor* faaDiBrunoZ(const Symmetry& sym) const
+{
+	JournalRecordPair pa(journal);
+	pa << "Faa Di Bruno ZX container for " << sym << endrec;
+	_Ttensor* res = new _Ttensor(ypart.ny(), TensorDimens(sym, nvs));
+	FaaDiBruno bruno(journal);
+	bruno.calculate(Zstack<t>(), f, *res);
+	return res;
+}
+
+@ This calculates a derivative of
+$G(y,u,\sigma)=h(g^*(y,u,\sigma),\sigma)$ of a given symmetry.
+
+@<|KOrderStoch::faaDiBrunoG| templated code@>=
+template <int t>
+_Ttensor* faaDiBrunoG(const Symmetry& sym) const
+{
+	JournalRecordPair pa(journal);
+	pa << "Faa Di Bruno GX container for " << sym << endrec;
+	TensorDimens tdims(sym, nvs);
+	_Ttensor* res = new _Ttensor(ypart.nyss(), tdims);
+	FaaDiBruno bruno(journal);
+	bruno.calculate(Gstack<t>(), h<t>(), *res);
+	return res;
+}
+
+@ This retrives all $g$ derivatives of a given dimension from implicit
+$f(h(g^*(y,u,\sigma),\sigma),g(y,u,\sigma),y,u)$. It suppose that all
+derivatives of smaller dimensions have been retrieved.
+
+So, we go through all symmetries $s$, calculate $G_s$ conditional on
+$g_s=0$, insert the derivative to the $G$ container, then calculate
+$F_s$ conditional on $g_s=0$. This is a righthand side. The left hand
+side is $matA\cdot g_s$. The $g_s$ is retrieved as
+$$g_s=-matA^{-1}\cdot RHS.$$ Finally we have to update $G_s$ by
+calling |Gstack<t>().multAndAdd(1, h<t>(), *G_sym)|.
+
+@<|KOrderStoch::performStep| templated code@>=
+template <int t>
+void performStep(int order)
+{
+	int maxd = g<t>().getMaxDim();
+	KORD_RAISE_IF(order-1 != maxd && (order != 1 || maxd != -1),
+				  "Wrong order for KOrderStoch::performStep");
+	SymmetrySet ss(order, 4);
+	for (symiterator si(ss); !si.isEnd(); ++si) {
+		if ((*si)[2] == 0) {
+			JournalRecordPair pa(journal);
+			pa << "Recovering symmetry " << *si << endrec;
+
+			_Ttensor* G_sym = faaDiBrunoG<t>(*si);
+			G<t>().insert(G_sym);
+
+			_Ttensor* g_sym = faaDiBrunoZ<t>(*si);
+			g_sym->mult(-1.0);
+			matA.multInv(*g_sym);
+			g<t>().insert(g_sym);
+			gs<t>().insert(new _Ttensor(ypart.nstat, ypart.nys(), *g_sym));
+
+			Gstack<t>().multAndAdd(1, h<t>(), *G_sym);
+		}
+	}
+}
+
+@ 
+@<|KOrderStoch| convenience access methods@>=
+	template<int t> _Tg& g();
+	template<int t> const _Tg& g() const;
+	template<int t> _Tgs& gs();
+	template<int t> const _Tgs& gs() const;
+	template<int t> const _Tgss& h() const;
+	template<int t> _TG& G();
+	template<int t> const _TG& G() const;
+	template<int t> _TZXstack& Zstack();
+	template<int t> const _TZXstack& Zstack() const;
+	template<int t> _TGXstack& Gstack();
+	template<int t> const _TGXstack& Gstack() const;
+
+
+@ End of {\tt korder\_stoch.h} file.
diff --git a/dynare++/kord/main.web b/dynare++/kord/main.web
new file mode 100644
index 0000000000000000000000000000000000000000..ef3bdb22ade2acb27f473c74cef14e04e3892955
--- /dev/null
+++ b/dynare++/kord/main.web
@@ -0,0 +1,66 @@
+@q $Id: main.web 2333 2009-01-14 10:32:55Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+\let\ifpdf\relax
+\input eplain
+\def\title{{\mainfont Dynare++}}
+
+
+@i c++lib.w
+@s const_reverse_iterator int
+@s value_type int
+
+\titletrue
+\null\vfill
+\centerline{\titlefont Dynare++ DSGE solver}
+\vskip\baselineskip
+\centerline{\vtop{\hsize=10cm\leftskip=0pt plus 1fil
+  \rightskip=0pt plus 1fil\noindent
+	solves higher order approximation to a decision rule of a Dynamic Stochastic
+	General Equilibrium model about deterministic and stochastic fix point}}
+\vfill\vfill
+Copyright \copyright\ 2004, 2005, 2006, 2007, 2008, 2009 by Ondra Kamenik
+
+@*1 Utilities.
+@i kord_exception.hweb
+
+@i journal.hweb
+@i journal.cweb
+
+@i normal_conjugate.hweb
+@i normal_conjugate.cweb
+
+@i random.hweb
+@i random.cweb
+
+@i mersenne_twister.hweb
+
+@i faa_di_bruno.hweb
+@i faa_di_bruno.cweb
+
+@*1 Retrieving derivatives.
+
+@i first_order.hweb
+@i first_order.cweb
+
+@i korder.hweb
+@i korder.cweb
+
+@i korder_stoch.hweb
+@i korder_stoch.cweb
+
+@*1 Putting all together.
+
+@i dynamic_model.hweb
+@i dynamic_model.cweb
+
+@i approximation.hweb
+@i approximation.cweb
+
+@i decision_rule.hweb
+@i decision_rule.cweb
+
+@i global_check.hweb
+@i global_check.cweb
+
+@*1 Index.
\ No newline at end of file
diff --git a/dynare++/kord/mersenne_twister.hweb b/dynare++/kord/mersenne_twister.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..f7e668e05388d4d0027c9cf02147792e08dea283
--- /dev/null
+++ b/dynare++/kord/mersenne_twister.hweb
@@ -0,0 +1,141 @@
+@q $Id: mersenne_twister.hweb 1490 2007-12-19 14:29:46Z kamenik $ @>
+@q Copyright 2007, Ondra Kamenik @>
+
+@*2 Mersenne Twister PRNG. Start of {\tt mersenne\_twister.h} file.
+
+This file provides a class for generating random numbers with
+encapsulated state. It is based on the work of Makoto Matsumoto and
+Takuji Nishimura, implementation inspired by code of Richard Wagner
+and Geoff Kuenning.
+
+@s uint32 int
+@s MersenneTwister int
+
+@c
+#ifndef MERSENNE_TWISTER_H
+#define MERSENNE_TWISTER_H
+
+#include "random.h"
+#include <string.h>
+
+@<|MersenneTwister| class declaration@>;
+@<|MersenneTwister| inline method definitions@>;
+
+#endif
+
+@ 
+@<|MersenneTwister| class declaration@>=
+class MersenneTwister : public RandomGenerator {
+protected:@;
+	typedef unsigned int uint32; 
+	enum {STATE_SIZE = 624};
+	enum {RECUR_OFFSET = 397};
+	uint32 statevec[STATE_SIZE];
+	int stateptr;
+public:@;
+	MersenneTwister(uint32 iseed);
+	MersenneTwister(const MersenneTwister& mt);
+	virtual ~MersenneTwister() {}
+	uint32 lrand();
+	double drand();
+	double uniform()
+		{@+return drand();@+}
+protected:@;
+	void seed(uint32 iseed);
+	void refresh();
+private:@;
+	@<|MersenneTwister| static inline methods@>;
+};
+
+@ 
+@<|MersenneTwister| static inline methods@>=
+	static uint32 hibit(uint32 u)
+		{return u & 0x80000000UL;}
+	static uint32 lobit(uint32 u)
+		{return u & 0x00000001UL;}
+	static uint32 lobits(uint32 u)
+		{return u & 0x7fffffffUL;}
+	static uint32 mixbits(uint32 u, uint32 v)
+		{return hibit(u) | lobits(v);}
+	static uint32 twist(uint32 m, uint32 s0, uint32 s1)
+		{return m ^ (mixbits(s0,s1)>>1) ^ (-lobit(s1) & 0x9908b0dfUL);}
+
+
+@ 
+@<|MersenneTwister| inline method definitions@>=
+	@<|MersenneTwister| constructor code@>;
+	@<|MersenneTwister| copy constructor code@>;
+	@<|MersenneTwister::lrand| code@>;
+	@<|MersenneTwister::drand| code@>;
+	@<|MersenneTwister::seed| code@>;
+	@<|MersenneTwister::refresh| code@>;
+
+@ 
+@<|MersenneTwister| constructor code@>=
+inline MersenneTwister::MersenneTwister(uint32 iseed)
+{
+	seed(iseed);
+}
+
+@ 
+@<|MersenneTwister| copy constructor code@>=
+inline MersenneTwister::MersenneTwister(const MersenneTwister& mt)
+	: stateptr(mt.stateptr)
+{
+	memcpy(statevec, mt.statevec, sizeof(uint32)*STATE_SIZE);
+}
+
+@ 
+@<|MersenneTwister::lrand| code@>=
+inline MersenneTwister::uint32 MersenneTwister::lrand()
+{
+	if (stateptr >= STATE_SIZE)
+		refresh();
+
+	register uint32 v = statevec[stateptr++];
+	v ^= v >> 11;
+	v ^= (v << 7) & 0x9d2c5680;
+	v ^= (v << 15) & 0xefc60000;
+	return (v ^ (v >> 18));
+}
+
+@ 
+@<|MersenneTwister::drand| code@>=
+inline double MersenneTwister::drand()
+{
+	uint32 a = lrand() >> 5;
+	uint32 b = lrand() >> 6;
+	return (a*67108864.0+b) * (1.0/9007199254740992.0);
+}
+
+@ PRNG of D. Knuth 
+@<|MersenneTwister::seed| code@>=
+inline void MersenneTwister::seed(uint32 iseed)
+{
+	statevec[0] = iseed & 0xffffffffUL;
+    for (int i = 1;  i < STATE_SIZE;  i++) {
+		register uint32 val = statevec[i-1] >> 30;
+		val ^= statevec[i-1];
+		val *= 1812433253ul;
+		val += i;
+		statevec[i] = val & 0xffffffffUL;
+	}
+
+	refresh();
+}
+
+@ 
+@<|MersenneTwister::refresh| code@>=
+inline void MersenneTwister::refresh()
+{
+	register uint32* p = statevec;
+	for (int i = STATE_SIZE-RECUR_OFFSET; i--; ++p)
+		*p = twist(p[RECUR_OFFSET], p[0], p[1]);
+	for (int i = RECUR_OFFSET; --i; ++p)
+		*p = twist(p[RECUR_OFFSET-STATE_SIZE], p[0], p[1]);
+	*p = twist(p[RECUR_OFFSET-STATE_SIZE], p[0], statevec[0]);
+
+	stateptr = 0;
+}
+
+@ End of {\tt mersenne\_twister.h} file.
diff --git a/dynare++/kord/normal_conjugate.cweb b/dynare++/kord/normal_conjugate.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..b03992a2b66aae4bb6a54ba7c4cf6d5259ceae2c
--- /dev/null
+++ b/dynare++/kord/normal_conjugate.cweb
@@ -0,0 +1,123 @@
+@q $Id$ @>
+@q Copyright 2007, Ondra Kamenik @>
+
+@ Start of {\tt normal\_conjugate.cpp} file.
+
+@c
+
+#include "normal_conjugate.h"
+#include "kord_exception.h"
+
+@<|NormalConj| diffuse prior constructor@>;
+@<|NormalConj| data update constructor@>;
+@<|NormalConj| copy constructor@>;
+@<|NormalConj::update| one observation code@>;
+@<|NormalConj::update| multiple observations code@>;
+@<|NormalConj::update| with |NormalConj| code@>;
+@<|NormalConj::getVariance| code@>;
+
+@ 
+@<|NormalConj| diffuse prior constructor@>=
+NormalConj::NormalConj(int d)
+	: mu(d), kappa(0), nu(-1), lambda(d,d)
+{
+	mu.zeros();
+	lambda.zeros();
+}
+
+@ 
+@<|NormalConj| data update constructor@>=
+NormalConj::NormalConj(const ConstTwoDMatrix& ydata)
+	: mu(ydata.numRows()), kappa(ydata.numCols()), nu(ydata.numCols()-1),
+	  lambda(ydata.numRows(), ydata.numRows())
+{
+	mu.zeros();
+	for (int i = 0; i < ydata.numCols(); i++)
+		mu.add(1.0/ydata.numCols(), ConstVector(ydata, i));
+
+	lambda.zeros();
+	for (int i = 0; i < ydata.numCols(); i++) {
+		Vector diff(ConstVector(ydata, i));
+		diff.add(-1, mu);
+		lambda.addOuter(diff);
+	}
+}
+
+@ 
+@<|NormalConj| copy constructor@>=
+NormalConj::NormalConj(const NormalConj& nc)
+	: mu(nc.mu), kappa(nc.kappa), nu(nc.nu), lambda(nc.lambda)
+{
+}
+
+@ The method performs the following:
+$$\eqalign{
+  \mu_1 = &\; {\kappa_0\over \kappa_0+1}\mu_0 + {1\over \kappa_0+1}y\cr
+  \kappa_1 = &\; \kappa_0 + 1\cr
+  \nu_1 = &\; \nu_0 + 1\cr
+  \Lambda_1 = &\; \Lambda_0 + {\kappa_0\over\kappa_0+1}(y-\mu_0)(y-\mu_0)^T,
+}$$
+
+@<|NormalConj::update| one observation code@>=
+void NormalConj::update(const ConstVector& y)
+{
+	KORD_RAISE_IF(y.length() != mu.length(),
+				  "Wrong length of a vector in NormalConj::update");
+
+	mu.mult(kappa/(1.0+kappa));
+	mu.add(1.0/(1.0+kappa), y);
+
+	Vector diff(y);
+	diff.add(-1, mu);
+	lambda.addOuter(diff, kappa/(1.0+kappa));
+
+	kappa++;
+	nu++;
+}
+
+@ The method evaluates the formula in the header file.
+
+@<|NormalConj::update| multiple observations code@>=
+void NormalConj::update(const ConstTwoDMatrix& ydata)
+{
+	NormalConj nc(ydata);
+	update(nc);
+}
+
+
+@ 
+@<|NormalConj::update| with |NormalConj| code@>=
+void NormalConj::update(const NormalConj& nc)
+{
+	double wold = ((double)kappa)/(kappa+nc.kappa);
+	double wnew = 1-wold;
+
+	mu.mult(wold);
+	mu.add(wnew, nc.mu);
+
+	Vector diff(nc.mu);
+	diff.add(-1, mu);
+	lambda.add(1.0, nc.lambda);
+	lambda.addOuter(diff);
+
+	kappa = kappa + nc.kappa;
+	nu = nu + nc.kappa;
+}
+
+
+@ This returns ${1\over \nu-d-1}\Lambda$, which is the mean of the
+variance in the posterior distribution. If the number of degrees of
+freedom is less than $d$, then NaNs are returned.
+
+@<|NormalConj::getVariance| code@>=
+void NormalConj::getVariance(TwoDMatrix& v) const
+{
+	if (nu > getDim()+1) {
+		v = (const TwoDMatrix&)lambda;
+		v.mult(1.0/(nu-getDim()-1));
+	} else
+		v.nans();
+}
+
+
+@ End of {\tt normal\_conjugate.cpp} file.
diff --git a/dynare++/kord/normal_conjugate.hweb b/dynare++/kord/normal_conjugate.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..03b7d2f28f2eb6a8d6a3be7bde054db8deb41215
--- /dev/null
+++ b/dynare++/kord/normal_conjugate.hweb
@@ -0,0 +1,82 @@
+@q $Id$ @>
+@q Copyright 2007, Ondra Kamenik @>
+
+@*2 Conjugate family for normal distribution. Start of {\tt
+normal\_conjugate.h} file.
+
+The main purpose here is to implement a class representing conjugate
+distributions for mean and variance of the normal distribution. The
+class has two main methods: the first one is to update itself with
+respect to one observation, the second one is to update itself with
+respect to anothe object of the class. In the both methods, the
+previous state of the class corresponds to the prior distribution, and
+the final state corresponds to the posterior distribution.
+
+The algrebra can be found in Gelman, Carlin, Stern, Rubin (p.87). It
+goes as follows: Prior conjugate distribution takes the following form:
+$$\eqalign{
+  \Sigma \sim& {\rm InvWishart}_{\nu_0}(\Lambda_0^{-1}) \cr
+  \mu\vert\Sigma \sim& N(\mu_0,\Sigma/\kappa_0)
+}$$
+If the observations are $y_1\ldots y_n$, then the posterior distribution has the
+same form with the following parameters:
+$$\eqalign{
+  \mu_n = &\; {\kappa_0\over \kappa_0+n}\mu_0 + {n\over \kappa_0+n}\bar y\cr
+  \kappa_n = &\; \kappa_0 + n\cr
+  \nu_n = &\; \nu_0 + n\cr
+  \Lambda_n = &\; \Lambda_0 + S + {\kappa_0 n\over\kappa_0+n}(\bar y-\mu_0)(\bar y-\mu_0)^T,
+}$$
+where
+$$\eqalign{
+  \bar y = &\; {1\over n}\sum_{i=1}^ny_i\cr
+  S = &\; \sum_{i=1}^n(y_i-\bar y)(y_i-\bar y)^T
+}$$
+
+@s NormalConj int
+
+@c
+#ifndef NORMAL_CONJUGATE_H
+#define NORMAL_CONJUGATE_H
+
+#include "twod_matrix.h"
+
+@<|NormalConj| class declaration@>;
+
+#endif
+
+@ The class is described by the four parameters: $\mu$, $\kappa$, $\nu$ and $\Lambda$.
+
+@<|NormalConj| class declaration@>=
+class NormalConj {
+protected:@;
+	Vector mu;
+	int kappa;
+	int nu;
+	TwoDMatrix lambda;
+public:@;
+	@<|NormalConj| constructors@>;
+	virtual ~NormalConj() @+{}
+	void update(const ConstVector& y);
+	void update(const ConstTwoDMatrix& ydata);
+	void update(const NormalConj& nc);
+	int getDim() const
+	{@+ return mu.length();@+}
+	const Vector& getMean() const
+	{@+ return mu;@+}
+	void getVariance(TwoDMatrix& v) const;
+};
+
+@ We provide the following constructors: The first constructs diffuse
+(Jeffrey's) prior. It sets $\kappa$, and $\Lambda$ to zeros, $nu$ to
+$-1$ and also the mean $\mu$ to zero (it should not be
+referenced). The second constructs the posterior using the diffuse
+prior and the observed data (columnwise). The third is a copy
+constructor.
+
+@<|NormalConj| constructors@>=
+	NormalConj(int d);
+	NormalConj(const ConstTwoDMatrix& ydata);
+	NormalConj(const NormalConj& nc);
+
+
+@ End of {\tt normal\_conjugate.h} file.
diff --git a/dynare++/kord/random.cweb b/dynare++/kord/random.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..ae1a5e7b36b7484bd8d8e6b3fd10782dfb992d8d
--- /dev/null
+++ b/dynare++/kord/random.cweb
@@ -0,0 +1,63 @@
+@q $Id: random.cweb 1491 2007-12-19 14:36:53Z kamenik $ @>
+@q Copyright 2007, Ondra Kamenik @>
+
+@ Start of {\tt random.cpp} file.
+@c
+
+#include "random.h"
+
+#include <stdlib.h>
+#include <limits>
+#include <cmath>
+
+@<|RandomGenerator::int_uniform| code@>;
+@<|RandomGenerator::normal| code@>;
+SystemRandomGenerator system_random_generator;
+@<|SystemRandomGenerator::uniform| code@>;
+@<|SystemRandomGenerator::initSeed| code@>;
+
+@ 
+@<|RandomGenerator::int_uniform| code@>=
+int RandomGenerator::int_uniform()
+{
+	double s = std::numeric_limits<int>::max()*uniform();
+	return (int)s;
+}
+
+@ This implements Marsaglia Polar Method. 
+@<|RandomGenerator::normal| code@>=
+double RandomGenerator::normal()
+{
+	double x1, x2;
+	double w;
+	do {
+		x1 = 2*uniform()-1;
+		x2 = 2*uniform()-1;
+		w = x1*x1 + x2*x2;
+	} while (w >= 1.0 || w < 1.0e-30);
+	return x1*std::sqrt((-2.0*std::log(w))/w);
+}
+
+@ 
+@<|SystemRandomGenerator::uniform| code@>=
+double SystemRandomGenerator::uniform()
+{
+#ifndef __MINGW32__
+	return drand48();
+#else
+	return ((double)rand())/RAND_MAX;
+#endif
+}
+
+@ 
+@<|SystemRandomGenerator::initSeed| code@>=
+void SystemRandomGenerator::initSeed(int seed)
+{
+#ifndef __MINGW32__
+	srand48(seed);
+#else
+	srand(seed);
+#endif
+}
+
+@ End of {\tt random.cpp} file.
diff --git a/dynare++/kord/random.hweb b/dynare++/kord/random.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..8b92c68860b768c4c14c44dbc19a32ea0d395e7a
--- /dev/null
+++ b/dynare++/kord/random.hweb
@@ -0,0 +1,39 @@
+@q $Id: random.hweb 2335 2009-01-14 10:35:21Z kamenik $ @>
+@q Copyright 2007, Ondra Kamenik @>
+
+@*2 Random number generation. Start of {\tt random.h} file.
+
+@s RandomGenerator int
+@s SystemRandomGenerator int
+
+@c
+#ifndef RANDOM_H
+#define RANDOM_H
+
+@<|RandomGenerator| class declaration@>;
+@<|SystemRandomGenerator| class declaration@>;
+extern SystemRandomGenerator system_random_generator;
+
+#endif
+
+@ This is a general interface to an object able to generate random
+numbers. Subclass needs to implement |uniform| method, other is, by
+default, implemented here.
+@<|RandomGenerator| class declaration@>=
+class RandomGenerator {
+public:@;
+	virtual double uniform() = 0;
+	int int_uniform();
+	double normal();
+};
+
+@ This implements |RandomGenerator| interface with system |drand| or
+|rand|. It is not thread aware.
+@<|SystemRandomGenerator| class declaration@>=
+class SystemRandomGenerator : public RandomGenerator {
+public:@;
+	double uniform();
+	void initSeed(int seed);
+};
+
+@ End of {\tt random.h} file.
diff --git a/dynare++/kord/tests.cpp b/dynare++/kord/tests.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..75f18a6b926a9f43b2e6537cce120bd925629e2d
--- /dev/null
+++ b/dynare++/kord/tests.cpp
@@ -0,0 +1,413 @@
+/* $Id: tests.cpp 148 2005-04-19 15:12:26Z kamenik $ */
+/* Copyright 2004, Ondra Kamenik */
+
+#include "korder.h"
+#include "SylvException.h"
+
+struct Rand {
+	static void init(int n1, int n2, int n3, int n4, int n5);
+	static double get(double m);
+	static int get(int m);
+	static bool discrete(double prob); // answers true with given probability
+};
+
+void Rand::init(int n1, int n2, int n3, int n4, int n5)
+{
+	long int seed = n1;
+	seed = 256*seed+n2;
+	seed = 256*seed+n3;
+	seed = 256*seed+n4;
+	seed = 256*seed+n5;
+	srand48(seed);
+}
+
+double Rand::get(double m)
+{
+	return 2*m*(drand48()-0.5);
+}
+
+int Rand::get(int m)
+{
+	return (int)(Rand::get(0.9999*m));
+}
+
+bool Rand::discrete(double prob)
+{
+	return drand48() < prob;
+}
+
+struct SparseGenerator {
+	static FSSparseTensor* makeTensor(int dim, int nv, int r,
+									  double fill, double m);
+	static void fillContainer(TensorContainer<FSSparseTensor>& c,
+							  int maxdim, int nv, int r, double m);
+};
+
+FSSparseTensor* SparseGenerator::makeTensor(int dim, int nv, int r,
+											double fill, double m)
+{
+	FSSparseTensor* res = new FSSparseTensor(dim, nv, r);
+	FFSTensor dummy(0, nv, dim);
+	for (Tensor::index fi = dummy.begin(); fi != dummy.end(); ++fi) {
+		for (int i = 0; i < r; i++) {
+			if (Rand::discrete(fill)) {
+				double x = Rand::get(m);
+				res->insert(fi.getCoor(), i, x);
+			}
+		}
+	}
+	return res;
+}
+
+void SparseGenerator::fillContainer(TensorContainer<FSSparseTensor>& c,
+									int maxdim, int nv, int r,
+									double m)
+{
+	Rand::init(maxdim, nv, r, (int)(5*m), 0);
+	double fill = 0.5;
+	for (int d = 1; d <= maxdim; d++) {
+		c.insert(makeTensor(d,nv,r,fill,m));
+		fill *= 0.3;
+	}
+}
+
+const double vdata [] = { // 3x3
+	0.1307870268, 0.1241940078, 0.1356703123,
+	0.1241940078, 0.1986920419, 0.2010160581,
+	0.1356703123, 0.2010160581, 0.2160336975
+};
+
+
+const double gy_data [] = { // 8x4
+	0.3985178619, -0.5688233582, 0.9572900437, -0.6606847776, 0.1453004017,
+	0.3025310675, -0.8627437750, -0.6903410191, 0.4751910580, -0.7270018589,
+	-0.0939612498, -0.1463831989, 0.6742110220, 0.6046671043, 0.5215893126,
+	-1.0412969986, -0.3524898417, -1.0986703430, 0.8006531522, 0.8879776376,
+	-0.1037608317, -0.5587378073, -0.1010366945, 0.9462411248, -0.2439199881,
+	1.3420621236, -0.7820285935, 0.3205293447, 0.3606124791, 0.2975422208,
+	-0.5452861965, 1.6320340279 
+};
+
+const double gu_data [] = { // just some numbers, no structure
+	1.8415286914, -0.2638743845, 1.7690713274, 0.9668585956, 0.2303143646, 
+	-0.2229624279, -0.4381991822, 1.0082401405, -0.3186555860, -0.0624691529, 
+	-0.5189085756, 1.4269672156, 0.1163282969, 1.4020183445, -0.0952660426, 
+	0.2099097124, 0.6912400502, -0.5180935114, 0.5288316624, 0.2188053448, 
+	0.5715516767, 0.7813893410, -0.6385073106, 0.8335131513, 0.3605202168, 
+	-1.1167944865, -1.2263750934, 0.6113636081, 0.6964915482, -0.6451217688, 
+	0.4062810500, -2.0552251116, -1.6383406284, 0.0198915095, 0.0111014458, 
+	-1.2421792262, -1.0724161722, -0.4276904972, 0.1801494950, -2.0716473264
+};
+
+const double vdata2 [] = { // 10x10 positive definite
+	0.79666, -0.15536, 0.05667, -0.21026, 0.20262, 0.28505, 0.60341, -0.09703, 0.32363, 0.13299, 
+	-0.15536, 0.64380, -0.01131, 0.00980, 0.03755, 0.43791, 0.21784, -0.31755, -0.55911, -0.29655, 
+	0.05667, -0.01131, 0.56165, -0.34357, -0.40584, 0.20990, 0.28348, 0.20398, -0.19856, 0.35820, 
+	-0.21026, 0.00980, -0.34357, 0.56147, 0.10972, -0.34146, -0.49906, -0.19685, 0.21088, -0.31560, 
+	0.20262, 0.03755, -0.40584, 0.10972, 0.72278, 0.02155, 0.04089, -0.19696, 0.03446, -0.12919, 
+	0.28505, 0.43791, 0.20990, -0.34146, 0.02155, 0.75867, 0.77699, -0.31125, -0.55141, -0.02155, 
+	0.60341, 0.21784, 0.28348, -0.49906, 0.04089, 0.77699, 1.34553, -0.18613, -0.25811, -0.19016, 
+	-0.09703, -0.31755, 0.20398, -0.19685, -0.19696, -0.31125, -0.18613, 0.59470, 0.08386, 0.41750, 
+	0.32363, -0.55911, -0.19856, 0.21088, 0.03446, -0.55141, -0.25811, 0.08386, 0.98917, -0.12992, 
+	0.13299, -0.29655, 0.35820, -0.31560, -0.12919, -0.02155, -0.19016, 0.41750, -0.12992, 0.89608
+};
+
+const double gy_data2 [] = { // 600 items make gy 30x20, whose gy(6:25,:) has spectrum within unit 
+	0.39414, -0.29766, 0.08948, -0.19204, -0.00750, 0.21159, 0.05494, 0.06225, 0.01771, 0.21913, 
+	-0.01373, 0.20086, -0.06086, -0.10955, 0.14424, -0.08390, 0.03948, -0.14713, 0.11674, 0.05091, 
+	0.24039, 0.28307, -0.11835, 0.13030, 0.11682, -0.27444, -0.19311, -0.16654, 0.12867, 0.25116, 
+	-0.19781, 0.45242, -0.15862, 0.24428, -0.11966, 0.11483, -0.32279, 0.29727, 0.20934, -0.18190, 
+	-0.15080, -0.09477, -0.30551, -0.02672, -0.26919, 0.11165, -0.06390, 0.03449, -0.26622, 0.22197, 
+	0.45141, -0.41683, 0.09760, 0.31094, -0.01652, 0.05809, -0.04514, -0.05645, 0.00554, 0.47980, 
+	0.11726, 0.42459, -0.13136, -0.30902, -0.14648, 0.11455, 0.02947, -0.03835, -0.04044, 0.03559, 
+	-0.26575, -0.01783, 0.31243, -0.14412, -0.13218, -0.05080, 0.18576, 0.13840, -0.05560, 0.35530, 
+	-0.25573, -0.11560, 0.15187, -0.18431, 0.08193, -0.32278, 0.17560, -0.05529, -0.10020, -0.23088, 
+	-0.20979, -0.49245, 0.09915, -0.16909, -0.03443, 0.19497, 0.18473, 0.25662, 0.29605, -0.20531, 
+	-0.39244, -0.43369, 0.05588, 0.24823, -0.14236, -0.08311, 0.16371, -0.19975, 0.30605, -0.17087, 
+	-0.01270, 0.00123, -0.22426, -0.13810, 0.05079, 0.06971, 0.01922, -0.09952, -0.23177, -0.41962, 
+	-0.41991, 0.41430, -0.04247, -0.13706, -0.12048, -0.28906, -0.22813, -0.25057, -0.18579, -0.20642, 
+	-0.47976, 0.25490, -0.05138, -0.30794, 0.31651, 0.02034, 0.12954, -0.20110, 0.13336, -0.40775, 
+	-0.30195, -0.13704, 0.12396, 0.28152, 0.02986, 0.27669, 0.24623, 0.08635, -0.11956, -0.02949, 
+	0.37401, 0.20838, 0.24801, -0.26872, 0.11195, 0.00315, -0.19069, 0.12839, -0.23036, -0.48228, 
+	0.08434, -0.39872, -0.28896, -0.28754, 0.24668, 0.23285, 0.25437, 0.10456, -0.14124, 0.20483, 
+	-0.19117, -0.33836, -0.24875, 0.08207, -0.03930, 0.20364, 0.15384, -0.15270, 0.24372, -0.11199, 
+	-0.46591, 0.30319, 0.05745, 0.09084, 0.06058, 0.31884, 0.05071, -0.28899, -0.30793, -0.03566, 
+	0.02286, 0.28178, 0.00736, -0.31378, -0.18144, -0.22346, -0.27239, 0.31043, -0.26228, 0.22181, 
+	-0.15096, -0.36953, -0.06032, 0.21496, 0.29545, -0.13112, 0.16420, -0.07573, -0.43111, -0.43057, 
+	0.26716, -0.31209, -0.05866, -0.29101, -0.27437, -0.18727, 0.28732, -0.19014, 0.08837, 0.30405, 
+	0.06103, -0.35612, 0.00173, 0.25134, -0.08987, -0.22766, -0.03254, -0.18662, -0.08491, 0.49401, 
+	-0.12145, -0.02961, -0.03668, -0.30043, -0.08555, 0.01701, -0.12544, 0.10969, -0.48202, 0.07245, 
+	0.20673, 0.11408, 0.04343, -0.01815, -0.31594, -0.23632, -0.06258, -0.27474, 0.12180, 0.16613, 
+	-0.37931, 0.30219, 0.15765, 0.25489, 0.17529, -0.17020, -0.30060, 0.22058, -0.02450, -0.42143, 
+	0.49642, 0.46899, -0.28552, -0.22549, -0.01333, 0.21567, 0.22251, 0.21639, -0.19194, -0.19140, 
+	-0.24106, 0.10952, -0.11019, 0.29763, -0.02039, -0.25748, 0.23169, 0.01357, 0.09802, -0.19022, 
+	0.37604, -0.40777, 0.18131, -0.10258, 0.29573, -0.31773, 0.09069, -0.02198, -0.26594, 0.48302, 
+	-0.10041, 0.20210, -0.05609, -0.01169, -0.17339, 0.17862, -0.22502, 0.29009, -0.45160, 0.19771, 
+	0.27634, 0.31695, -0.09993, 0.17167, 0.12394, 0.28088, -0.12502, -0.16967, -0.06296, -0.17036, 
+	0.27320, 0.01595, 0.16955, 0.30146, -0.15173, -0.29807, 0.08178, -0.06811, 0.21655, 0.26348, 
+	0.06316, 0.45661, -0.29756, -0.05742, -0.14715, -0.03037, -0.16656, -0.08768, 0.38078, 0.40679, 
+	-0.32779, -0.09106, 0.16107, -0.07301, 0.07700, -0.22694, -0.15692, -0.02548, 0.38749, -0.12203, 
+	-0.02980, -0.22067, 0.00680, -0.23058, -0.29112, 0.23032, -0.16026, 0.23392, -0.09990, 0.03628, 
+	-0.42592, -0.33474, -0.09499, -0.17442, -0.20110, 0.24618, -0.06418, -0.06715, 0.40754, 0.29377, 
+	0.29543, -0.16832, -0.08468, 0.06491, -0.01410, 0.19988, 0.24950, 0.14626, -0.27851, 0.06079, 
+	0.48134, -0.13475, 0.25398, 0.11738, 0.23369, -0.00661, -0.16811, -0.04557, -0.12030, -0.39527, 
+	-0.35760, 0.01840, -0.15941, 0.03290, 0.09988, -0.08307, 0.06644, -0.24637, 0.34112, -0.08026, 
+	0.00951, 0.27656, 0.16247, 0.28217, 0.17198, -0.16389, -0.03835, -0.02675, -0.08032, -0.21045, 
+	-0.38946, 0.23207, 0.10987, -0.31674, -0.28653, -0.27430, -0.29109, -0.00648, 0.38431, -0.38478, 
+	-0.41195, -0.19364, -0.20977, -0.05524, 0.05558, -0.20109, 0.11803, -0.19884, 0.43318, -0.39255, 
+	0.26612, -0.21771, 0.12471, 0.12856, -0.15104, -0.11676, 0.17582, -0.25330, 0.00298, -0.31712, 
+	0.21532, -0.20319, 0.14507, -0.04588, -0.22995, -0.06470, 0.18849, -0.13444, 0.37107, 0.07387, 
+	-0.14008, 0.09896, 0.13727, -0.28417, -0.09461, -0.18703, 0.04080, 0.02343, -0.49988, 0.17993, 
+	0.23189, -0.30581, -0.18334, -0.09667, -0.27699, -0.05998, 0.09118, -0.32453, 0.46251, 0.41500, 
+	-0.45314, -0.00544, 0.08529, 0.29099, -0.00937, -0.31650, 0.26163, 0.14506, 0.37498, -0.16454, 
+	0.35215, 0.31642, -0.09161, -0.31452, -0.04792, -0.04677, -0.19523, 0.27998, 0.05491, 0.44461, 
+	-0.01258, -0.27887, 0.18361, -0.04539, -0.02977, 0.30821, 0.29454, -0.17932, 0.16193, 0.23934, 
+	0.47923, 0.25373, 0.23258, 0.31484, -0.17958, -0.01136, 0.17681, 0.12869, 0.03235, 0.43762, 
+	0.13734, -0.09433, -0.03735, 0.17949, 0.14122, -0.17814, 0.06359, 0.16044, 0.12249, -0.22314, 
+	0.40775, 0.05147, 0.12389, 0.04290, -0.01642, 0.00082, -0.18056, 0.02875, 0.32690, 0.17712, 
+	0.34001, -0.21581, -0.01086, -0.18180, 0.17480, -0.17774, -0.07503, 0.28438, -0.19747, 0.29595, 
+	-0.28002, -0.02073, -0.16522, -0.18234, -0.20565, 0.29620, 0.07502, 0.01429, -0.31418, 0.43693, 
+	-0.12212, 0.11178, -0.28503, 0.04683, 0.00072, 0.05566, 0.18857, 0.26101, -0.38891, -0.21216, 
+	-0.21850, -0.15147, -0.30749, -0.23762, 0.14984, 0.03535, -0.02862, -0.00105, -0.39907, -0.06909, 
+	-0.36094, 0.21717, 0.15930, -0.18924, 0.13741, 0.01039, 0.13613, 0.00659, 0.07676, -0.13711, 
+	0.24285, -0.07564, -0.28349, -0.15658, 0.03135, -0.30909, -0.22534, 0.17363, -0.19376, 0.26038, 
+	0.05546, -0.22607, 0.32420, -0.02552, -0.05400, 0.13388, 0.04643, -0.31535, -0.06181, 0.30237, 
+	-0.04680, -0.29441, 0.12231, 0.03960, -0.01188, 0.01406, 0.25402, 0.03315, 0.25026, -0.10922
+};
+
+const double gu_data2 [] = { // raw data 300 items
+	0.26599, 0.41329, 0.31846, 0.92590, 0.43050, 0.17466, 0.02322, 0.72621, 0.37921, 0.70597, 
+	0.97098, 0.14023, 0.57619, 0.09938, 0.02281, 0.92341, 0.72654, 0.71000, 0.76687, 0.70182, 
+	0.88752, 0.49524, 0.42549, 0.42806, 0.57615, 0.76051, 0.15341, 0.47457, 0.60066, 0.40880, 
+	0.20668, 0.41949, 0.97620, 0.94318, 0.71491, 0.56402, 0.23553, 0.94387, 0.78567, 0.06362, 
+	0.85252, 0.86262, 0.25190, 0.03274, 0.93216, 0.37971, 0.08797, 0.14596, 0.73871, 0.06574, 
+	0.67447, 0.28575, 0.43911, 0.92133, 0.12327, 0.87762, 0.71060, 0.07141, 0.55443, 0.53310, 
+	0.91529, 0.25121, 0.07593, 0.94490, 0.28656, 0.82174, 0.68887, 0.67337, 0.99291, 0.03316, 
+	0.02849, 0.33891, 0.25594, 0.90071, 0.01248, 0.67871, 0.65953, 0.65369, 0.97574, 0.31578, 
+	0.23678, 0.39220, 0.06706, 0.80943, 0.57694, 0.08220, 0.18151, 0.19969, 0.37096, 0.37858, 
+	0.70153, 0.46816, 0.76511, 0.02520, 0.39387, 0.25527, 0.39050, 0.60141, 0.30322, 0.46195, 
+	0.12025, 0.33616, 0.04174, 0.00196, 0.68886, 0.74445, 0.15869, 0.18994, 0.95195, 0.62874, 
+	0.82874, 0.53369, 0.34383, 0.50752, 0.97023, 0.22695, 0.62407, 0.25840, 0.71279, 0.28785, 
+	0.31611, 0.20391, 0.19702, 0.40760, 0.85158, 0.68369, 0.63760, 0.09879, 0.11924, 0.32920, 
+	0.53052, 0.15900, 0.21229, 0.84080, 0.33933, 0.93651, 0.42705, 0.06199, 0.50092, 0.47192, 
+	0.57152, 0.01818, 0.31404, 0.50173, 0.87725, 0.50530, 0.10717, 0.04035, 0.32901, 0.33538, 
+	0.04780, 0.40984, 0.78216, 0.91288, 0.11314, 0.25248, 0.23823, 0.74001, 0.48089, 0.55531, 
+	0.82486, 0.01058, 0.05409, 0.44357, 0.52641, 0.68188, 0.94629, 0.61627, 0.33037, 0.11961, 
+	0.57988, 0.19653, 0.91902, 0.59838, 0.52974, 0.28364, 0.45767, 0.65836, 0.63045, 0.76140, 
+	0.27918, 0.27256, 0.46035, 0.77418, 0.92918, 0.14095, 0.89645, 0.25146, 0.21172, 0.47910, 
+	0.95451, 0.34377, 0.29927, 0.79220, 0.97654, 0.67591, 0.44385, 0.38434, 0.44860, 0.28170, 
+	0.90712, 0.20337, 0.00292, 0.55046, 0.62255, 0.45127, 0.80896, 0.43965, 0.59145, 0.23801, 
+	0.33601, 0.30119, 0.89935, 0.40850, 0.98226, 0.75430, 0.68318, 0.65407, 0.68067, 0.32942, 
+	0.11756, 0.27626, 0.83879, 0.72174, 0.75430, 0.13702, 0.03402, 0.58781, 0.07393, 0.23067, 
+	0.92537, 0.29445, 0.43437, 0.47685, 0.54548, 0.66082, 0.23805, 0.60208, 0.94337, 0.21363, 
+	0.72637, 0.57181, 0.77679, 0.63931, 0.72860, 0.38901, 0.94920, 0.04535, 0.12863, 0.40550, 
+	0.90095, 0.21418, 0.13953, 0.99639, 0.02526, 0.70018, 0.21828, 0.20294, 0.20191, 0.30954, 
+	0.39490, 0.68955, 0.11506, 0.15748, 0.40252, 0.91680, 0.61547, 0.78443, 0.19693, 0.67630, 
+	0.56552, 0.58556, 0.53554, 0.53507, 0.09831, 0.21229, 0.83135, 0.26375, 0.89287, 0.97069, 
+	0.70615, 0.42041, 0.43117, 0.21291, 0.26086, 0.26978, 0.77340, 0.43833, 0.46179, 0.54418, 
+	0.67878, 0.42776, 0.61454, 0.55915, 0.36363, 0.31999, 0.42442, 0.86649, 0.62513, 0.02047
+};
+
+class TestRunnable {
+	char name[100];
+public:
+	int dim; // dimension of the solved problem
+	int nvar; // number of variable of the solved problem
+	TestRunnable(const char* n, int d, int nv)
+		: dim(d), nvar(nv)
+		{strncpy(name, n, 100);}
+	bool test() const;
+	virtual bool run() const =0;
+	const char* getName() const
+		{return name;}
+protected:
+	static double korder_unfold_fold(int maxdim, int unfold_dim,
+									 int nstat, int npred, int nboth, int forw,
+									 const TwoDMatrix& gy, const TwoDMatrix& gu,
+									 const TwoDMatrix& v);
+};
+
+
+bool TestRunnable::test() const
+{
+	printf("Running test <%s>\n",name);
+	clock_t start = clock();
+	bool passed = run();
+	clock_t end = clock();
+	printf("CPU time %8.4g (CPU seconds)..................",
+		   ((double)(end-start))/CLOCKS_PER_SEC);
+	if (passed) {
+		printf("passed\n\n");
+		return passed;
+	} else {
+		printf("FAILED\n\n");
+		return passed;
+	}
+}
+
+double TestRunnable::korder_unfold_fold(int maxdim, int unfold_dim,
+										int nstat, int npred, int nboth, int nforw,
+										const TwoDMatrix& gy, const TwoDMatrix& gu,
+										const TwoDMatrix& v)
+{
+	TensorContainer<FSSparseTensor> c(1);
+	int ny = nstat+npred+nboth+nforw;
+	int nu = v.nrows();
+	int nz = nboth+nforw+ny+nboth+npred+nu;
+	SparseGenerator::fillContainer(c, maxdim, nz, ny, 5.0);
+	for (int d = 1; d <= maxdim; d++) {
+		printf("\ttensor fill for dim=%d is:   %3.2f %%\n",
+			   d, c.get(Symmetry(d))->getFillFactor()*100.0);
+	}
+	Journal jr("out.txt");
+	KOrder kord(nstat, npred, nboth, nforw, c, gy, gu, v, jr);
+	// perform unfolded steps until unfold_dim
+	double maxerror = 0.0;
+	for (int d = 2; d <= unfold_dim; d++) {
+		clock_t pertime = clock();
+		kord.performStep<KOrder::unfold>(d);
+		pertime = clock()-pertime;
+		printf("\ttime for unfolded step dim=%d: %8.4g\n",
+			   d, ((double)(pertime))/CLOCKS_PER_SEC);
+		clock_t checktime = clock();
+		double err = kord.check<KOrder::unfold>(d);
+		checktime = clock()-checktime;
+		printf("\ttime for step check dim=%d:    %8.4g\n",
+			   d, ((double)(checktime))/CLOCKS_PER_SEC);
+		printf("\tmax error in step dim=%d:      %10.6g\n",
+			   d, err);
+		if (maxerror < err)
+			maxerror = err;
+	}
+	// perform folded steps until maxdim
+	if (unfold_dim < maxdim) {
+		clock_t swtime = clock();
+		kord.switchToFolded();
+		swtime = clock()-swtime;
+		printf("\ttime for switching dim=%d:   %8.4g\n",
+			   unfold_dim, ((double)(swtime))/CLOCKS_PER_SEC);
+		
+		for (int d = unfold_dim+1; d <= maxdim; d++) {
+			clock_t pertime = clock();
+			kord.performStep<KOrder::fold>(d);
+			pertime = clock()-pertime;
+			printf("\ttime for folded step dim=%d: %8.4g\n",
+				   d, ((double)(pertime))/CLOCKS_PER_SEC);
+			clock_t checktime = clock();
+			double err = kord.check<KOrder::fold>(d);
+			checktime = clock()-checktime;
+			printf("\ttime for step check dim=%d:    %8.4g\n",
+				   d, ((double)(checktime))/CLOCKS_PER_SEC);
+			printf("\tmax error in step dim=%d:      %10.6g\n",
+				   d, err);
+			if (maxerror < err)
+				maxerror = err;
+		}
+	}
+	return maxerror;
+}
+
+class UnfoldKOrderSmall : public TestRunnable {
+public:
+	UnfoldKOrderSmall()
+		: TestRunnable("unfold-3 fold-4 korder (stat=2,pred=3,both=1,forw=2,u=3,dim=4)",
+					   4, 18) {}
+
+	bool run() const
+		{
+			TwoDMatrix gy(8, 4, gy_data);
+			TwoDMatrix gu(8, 3, gu_data);
+			TwoDMatrix v(3, 3, vdata);
+			double err = korder_unfold_fold(4, 3, 2, 3, 1, 2,
+											gy, gu, v);
+
+			return err < 0.08;
+		}
+};
+
+// same dimension as Smets & Wouters
+class UnfoldKOrderSW : public TestRunnable {
+public:
+	UnfoldKOrderSW()
+		: TestRunnable("unfold S&W korder (stat=5,pred=12,both=8,forw=5,u=10,dim=4)",
+					   4, 73) {}
+
+	bool run() const
+		{
+			TwoDMatrix gy(30, 20, gy_data2);
+			TwoDMatrix gu(30, 10, gu_data2);
+			TwoDMatrix v(10, 10, vdata2);
+			v.mult(0.001);
+			gu.mult(.01);
+			double err = korder_unfold_fold(4, 4, 5, 12, 8, 5,
+											gy, gu, v);
+
+			return err < 0.08;
+		}
+};
+
+class UnfoldFoldKOrderSW : public TestRunnable {
+public:
+	UnfoldFoldKOrderSW()
+		: TestRunnable("unfold-2 fold-3 S&W korder (stat=5,pred=12,both=8,forw=5,u=10,dim=3)",
+					   4, 73) {}
+
+	bool run() const
+		{
+			TwoDMatrix gy(30, 20, gy_data2);
+			TwoDMatrix gu(30, 10, gu_data2);
+			TwoDMatrix v(10, 10, vdata2);
+			v.mult(0.001);
+			gu.mult(.01);
+			double err = korder_unfold_fold(4, 3, 5, 12, 8, 5,
+											gy, gu, v);
+
+			return err < 0.08;
+		}
+};
+
+int main()
+{
+	TestRunnable* all_tests[50];
+	// fill in vector of all tests
+	int num_tests = 0;
+	all_tests[num_tests++] = new UnfoldKOrderSmall();
+	all_tests[num_tests++] = new UnfoldKOrderSW();
+	all_tests[num_tests++] = new UnfoldFoldKOrderSW();
+
+	// find maximum dimension and maximum nvar
+	int dmax=0;
+	int nvmax = 0;
+	for (int i = 0; i < num_tests; i++) {
+		if (dmax < all_tests[i]->dim)
+			dmax = all_tests[i]->dim;
+		if (nvmax < all_tests[i]->nvar)
+			nvmax = all_tests[i]->nvar;
+	}
+	tls.init(dmax, nvmax); // initialize library
+
+	// launch the tests
+	int success = 0;
+	for (int i = 0; i < num_tests; i++) {
+		try {
+			if (all_tests[i]->test())
+				success++;
+		} catch (const TLException& e) {
+			printf("Caugth TL exception in <%s>:\n", all_tests[i]->getName());
+			e.print();
+		} catch (SylvException& e) {
+			printf("Caught Sylv exception in <%s>:\n", all_tests[i]->getName());
+			e.printMessage();
+		}
+	}
+
+	printf("There were %d tests that failed out of %d tests run.\n",
+		   num_tests - success, num_tests);
+
+	// destroy
+	for (int i = 0; i < num_tests; i++) {
+		delete all_tests[i];
+	}
+
+	return 0;
+}
diff --git a/dynare++/parser/cc/Makefile b/dynare++/parser/cc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..294eb79dae00daeaf5310833ccaa80ef75a23bf8
--- /dev/null
+++ b/dynare++/parser/cc/Makefile
@@ -0,0 +1,113 @@
+# Copyright (C) 2005, Ondra Kamenik
+
+# $Id: Makefile 1211 2007-03-19 21:43:42Z kamenik $
+
+CC_FLAGS := -Wall -I../..
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g
+else
+	CC_FLAGS := $(CC_FLAGS) -O3
+endif
+
+objects := $(patsubst %.cpp,%.o,$(wildcard *.cpp))
+headers := $(wildcard *.h)
+source  := $(wildcard *.cpp)
+objects := $(objects) formula_tab.o formula_ll.o matrix_tab.o matrix_ll.o \
+           assign_tab.o assign_ll.o namelist_tab.o namelist_ll.o \
+           csv_tab.o csv_ll.o
+headers := $(headers) formula_tab.hh matrix_tab.hh assign_tab.hh namelist_tab.hh csv_tab.hh
+source  := $(source) formula_tab.cc formula_ll.cc formula.y formula.lex \
+           matrix_tab.cc matrix_ll.cc matrix.y matrix.lex \
+           assign_tab.cc assign_ll.cc assign.y assign.lex \
+           namelist_tab.cc namelist_ll.cc namelist.y namelist.lex \
+           csv_tab.cc csv_ll.cc csv.y csv.lex
+
+formula_tab.cc: formula.y tree.h formula_parser.h
+	bison  -d -t --verbose -oformula_tab.cc formula.y
+
+formula_tab.hh: formula.y tree.h formula_parser.h
+	bison  -d -t --verbose -oformula_tab.cc formula.y
+
+formula_ll.cc: formula.lex formula_tab.hh
+	flex -i -oformula_ll.cc formula.lex
+
+formula_ll.o: formula_ll.cc $(headers)
+	$(CC) -O3 -c formula_ll.cc
+
+formula_tab.o: formula_tab.cc $(headers)
+	$(CC) -O3 -c formula_tab.cc
+
+matrix_tab.cc: matrix.y tree.h matrix_parser.h
+	bison  -d -t --verbose -omatrix_tab.cc matrix.y
+
+matrix_tab.hh: matrix.y tree.h matrix_parser.h
+	bison  -d -t --verbose -omatrix_tab.cc matrix.y
+
+matrix_ll.cc: matrix.lex matrix_tab.hh
+	flex -i -omatrix_ll.cc matrix.lex
+
+matrix_ll.o: matrix_ll.cc $(headers)
+	$(CC) -O3 -c matrix_ll.cc
+
+matrix_tab.o: matrix_tab.cc $(headers)
+	$(CC) -O3 -c matrix_tab.cc
+
+assign_tab.cc: assign.y static_atoms.h atom_assignings.h
+	bison -d -t --verbose -oassign_tab.cc assign.y
+
+assign_tab.hh: assign.y static_atoms.h atom_assignings.h
+	bison -d -t --verbose -oassign_tab.cc assign.y
+
+assign_ll.cc: assign.lex assign_tab.hh
+	flex -i -oassign_ll.cc assign.lex
+
+assign_ll.o: assign_ll.cc $(headers)
+	$(CC) -O3 -c assign_ll.cc
+
+assign_tab.o: assign_tab.cc $(hsource)
+	$(CC) -O3 -c assign_tab.cc
+
+namelist_tab.cc: namelist.y namelist.h
+	bison -d -t --verbose -onamelist_tab.cc namelist.y
+
+namelist_tab.hh: namelist.y namelist.h
+	bison -d -t --verbose -onamelist_tab.cc namelist.y
+
+namelist_ll.cc: namelist.lex namelist_tab.hh
+	flex -i -onamelist_ll.cc namelist.lex
+
+namelist_ll.o: namelist_ll.cc $(headers)
+	$(CC) -O3 -c namelist_ll.cc
+
+namelist_tab.o: namelist_tab.cc $(hsource)
+	$(CC) -O3 -c namelist_tab.cc
+
+csv_tab.cc: csv.y csv_parser.h
+	bison -d -t --verbose -ocsv_tab.cc csv.y
+
+csv_tab.hh: csv.y csv_parser.h
+	bison -d -t --verbose -ocsv_tab.cc csv.y
+
+csv_ll.cc: csv.lex csv_tab.hh
+	flex -i -ocsv_ll.cc csv.lex
+
+csv_ll.o: csv_ll.cc $(headers)
+	$(CC) -O3 -c csv_ll.cc
+
+csv_tab.o: csv_tab.cc $(hsource)
+	$(CC) -O3 -c csv_tab.cc
+
+%.o: %.cpp $(headers)
+	$(CC) $(CC_FLAGS) -c $*.cpp
+
+parser.a: $(objects) $(source) $(headers)
+	ar cr parser.a $(objects) 
+
+clear:
+	rm -f *~
+	rm -f *.o
+	rm -f *.cc *.hh
+	rm -f parser.a
+	rm -rf html/
+	rm -rf latex/
diff --git a/dynare++/parser/cc/assign.lex b/dynare++/parser/cc/assign.lex
new file mode 100644
index 0000000000000000000000000000000000000000..2b74f43448d8cc974e31f7ff50359930d3774e0e
--- /dev/null
+++ b/dynare++/parser/cc/assign.lex
@@ -0,0 +1,54 @@
+%{
+#include "location.h"
+#include "assign_tab.hh"
+
+	extern YYLTYPE asgn_lloc;
+
+#define YY_USER_ACTION SET_LLOC(asgn_);
+%}
+
+%option nounput
+%option noyy_top_state
+%option stack
+%option yylineno
+%option prefix="asgn_"
+%option never-interactive
+%x CMT
+
+%%
+
+ /* comments */
+<*>"/*"            {yy_push_state(CMT);}
+<CMT>[^*\n]*
+<CMT>"*"+[^*/\n]*
+<CMT>"*"+"/"       {yy_pop_state();}
+<CMT>[\n]
+"//".*\n
+
+ /* spaces */
+[ \t\r\n]          {return BLANK;}
+
+ /* names */
+[A-Za-z_][A-Za-z0-9_]* {
+	asgn_lval.string = asgn_text;
+	return NAME;
+}
+
+;                  {return SEMICOLON;}
+=                  {return EQUAL_SIGN;}
+. {
+	asgn_lval.character = asgn_text[0];
+	return CHARACTER;
+}
+
+%%
+
+int asgn_wrap()
+{
+	return 1;
+}
+
+void asgn__destroy_buffer(void* p)
+{
+	asgn__delete_buffer((YY_BUFFER_STATE)p);
+}
diff --git a/dynare++/parser/cc/assign.y b/dynare++/parser/cc/assign.y
new file mode 100644
index 0000000000000000000000000000000000000000..f298ff150f8545d5cc90c7e33825a55a0054a30b
--- /dev/null
+++ b/dynare++/parser/cc/assign.y
@@ -0,0 +1,54 @@
+%{
+/* Copyright 2006, Ondra Kamenik */
+
+/* $Id: assign.y 1748 2008-03-28 11:52:07Z kamenik $ */
+
+#include "location.h"
+#include "atom_assignings.h"
+#include "assign_tab.hh"
+
+#include <stdio.h>
+
+	int asgn_error(char*);
+	int asgn_lex(void);
+	extern int asgn_lineno;
+	extern ogp::AtomAssignings* aparser;
+
+%}
+
+%union {
+	int integer;
+	char *string;
+	char character;
+}
+
+%token EQUAL_SIGN SEMICOLON CHARACTER BLANK
+%token <string> NAME;
+
+%name-prefix="asgn_"
+
+%locations
+%error-verbose
+
+%%
+
+root : assignments | ;
+
+assignments : assignments BLANK | assignments assignment | assignment | BLANK;
+
+assignment : NAME EQUAL_SIGN material SEMICOLON {
+	aparser->add_assignment(@1.off, $1, @1.ll, @3.off-@1.off, @3.ll + @4.ll);}
+  | NAME space EQUAL_SIGN material SEMICOLON {
+	aparser->add_assignment(@1.off, $1, @1.ll, @4.off-@1.off, @4.ll + @5.ll);}
+  ;
+
+material : material CHARACTER | material NAME | material BLANK | NAME | CHARACTER | BLANK;
+
+space : space BLANK | BLANK;
+
+%%
+
+int asgn_error(char* mes)
+{
+	aparser->error(mes);
+}
diff --git a/dynare++/parser/cc/atom_assignings.cpp b/dynare++/parser/cc/atom_assignings.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b7973359a1b7d7edc136d9bf1cca7bb8b33aad8b
--- /dev/null
+++ b/dynare++/parser/cc/atom_assignings.cpp
@@ -0,0 +1,215 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: atom_assignings.cpp 92 2007-04-19 11:38:21Z ondra $
+
+#include "atom_assignings.h"
+#include "location.h"
+#include "parser_exception.h"
+
+#include "utils/cc/exception.h"
+
+#include <limits>
+
+using namespace ogp;
+
+AtomAssignings::AtomAssignings(const AtomAssignings& aa, ogp::StaticAtoms& a)
+	: atoms(a), expr(aa.expr, atoms), left_names(aa.left_names),
+	  order(aa.order)
+{
+	// fill the lname2expr
+	for (Tvarintmap::const_iterator it = aa.lname2expr.begin();
+		 it != aa.lname2expr.end(); ++it)
+		lname2expr.insert(Tvarintmap::value_type(left_names.query((*it).first), (*it).second));
+}
+
+/** A global symbol for passing info to the AtomAssignings from
+ * asgn_parse(). */
+AtomAssignings* aparser;
+
+/** The declaration of functions defined in asgn_ll.cc and asgn_tab.cc
+ * generated from assign.lex assign.y */
+void* asgn__scan_buffer(char*, size_t);
+void asgn__destroy_buffer(void*);
+void asgn_parse();
+extern location_type asgn_lloc;
+
+void AtomAssignings::parse(int length, const char* stream)
+{
+	char* buffer = new char[length+2];
+	strncpy(buffer, stream, length);
+	buffer[length] = '\0';
+	buffer[length+1] = '\0';
+	asgn_lloc.off = 0;
+	asgn_lloc.ll = 0;
+	void* p = asgn__scan_buffer(buffer, (unsigned int)length+2);
+	aparser = this;
+	asgn_parse();
+	delete [] buffer;
+	asgn__destroy_buffer(p);
+}
+
+void AtomAssignings::error(const char* mes)
+{
+	throw ParserException(mes, asgn_lloc.off);
+}
+
+void AtomAssignings::add_assignment_to_double(const char* name, double val)
+{
+	// if left hand side is a registered atom, insert it to tree
+	int t;
+	try {
+		if (atoms.check(name))
+			t = expr.add_nulary(name);
+		else
+			t = -1;
+	} catch (const ParserException& e) {
+		t = -1;
+	}
+	// register left hand side in order
+	order.push_back(t);
+
+	// add the double to the tree
+	char tmp[100];
+	sprintf(tmp, "%30.25g", val);
+	try {
+		expr.parse(strlen(tmp), tmp);
+	} catch (const ParserException& e) {
+		// should never happen
+		throw ParserException(string("Error parsing double ")+tmp+": "+e.message(), 0);
+	}
+
+	// register name of the left hand side and put to lname2expr
+	const char* ss = left_names.insert(name);
+	lname2expr.insert(Tvarintmap::value_type(ss, order.size()-1));
+}
+
+void AtomAssignings::add_assignment(int asgn_off, const char* str, int name_len,
+									int right_off, int right_len)
+{
+	// the order of doing things here is important: since the
+	// FormulaParser requires that all references from the i-th tree
+	// refere to trees with index lass than i, so to capture also a
+	// nulary term for the left hand side, it must be inserted to the
+	// expression tree before the expression is parsed.
+
+	// find the name in the atoms, make copy of name to be able to put
+	// '\0' at the end
+	char* buf = new char[name_len+1];
+	strncpy(buf, str, name_len);
+	buf[name_len] = '\0';
+	// if left hand side is a registered atom, insert it to tree
+	int t;
+	try {
+		t = atoms.check(buf);
+		if (t == -1)
+			t = expr.add_nulary(buf);
+	} catch (const ParserException& e) {
+		atoms.register_name(buf);
+		t = expr.add_nulary(buf);
+	}
+	// register left hand side in order
+	order.push_back(t);
+
+	// parse expression on the right
+	try {
+		expr.parse(right_len, str+right_off);
+	} catch (const ParserException& e) {
+		throw ParserException(e, asgn_off+right_off);
+	}
+
+	// register name of the left hand side and put to lname2expr
+	const char* ss = left_names.insert(buf);
+	lname2expr[ss] = order.size()-1;
+
+	// delete name
+	delete [] buf;
+}
+
+void AtomAssignings::apply_subst(const AtomSubstitutions::Toldnamemap& mm)
+{
+	// go through all old variables and see what are their derived new
+	// variables
+	for (AtomSubstitutions::Toldnamemap::const_iterator it = mm.begin();
+		 it != mm.end(); ++it) {
+		const char* oldname = (*it).first;
+		int told = atoms.index(oldname);
+		const AtomSubstitutions::Tshiftnameset& sset = (*it).second;
+		if (told >= 0 && ! sset.empty()) {
+			// at least one substitution here, so make an expression
+			expr.add_formula(told);
+			// say that this expression is not assigned to any atom
+			order.push_back(-1);
+			// now go through all new names derived from the old name and
+			// reference to the newly added formula
+			for (AtomSubstitutions::Tshiftnameset::const_iterator itt = sset.begin();
+				 itt != sset.end(); ++itt) {
+				const char* newname = (*itt).first;
+				const char* nn = left_names.insert(newname);
+				lname2expr.insert(Tvarintmap::value_type(nn, expr.nformulas()-1));
+			}
+		}
+	}
+}
+
+void AtomAssignings::print() const
+{
+	printf("Atom Assignings\nExpressions:\n");
+	expr.print();
+	printf("Left names:\n");
+	for (Tvarintmap::const_iterator it = lname2expr.begin();
+		 it != lname2expr.end(); ++it)
+		printf("%s ==> %d (t=%d)\n", (*it).first, expr.formula((*it).second), order[(*it).second]);
+}
+
+void AtomAsgnEvaluator::setValues(EvalTree& et) const
+{
+	// set values of constants
+	aa.atoms.setValues(et);
+
+	// set values of variables to NaN or to user set values
+	double nan = std::numeric_limits<double>::quiet_NaN();
+	for (int i = 0; i < aa.atoms.nvar(); i++) {
+		const char* ss = aa.atoms.name(i);
+		int t = aa.atoms.index(ss);
+		if (t >= 0) {
+			Tusrvalmap::const_iterator it = user_values.find(t);
+			if (it == user_values.end())
+				et.set_nulary(t, nan);
+			else
+				et.set_nulary(t, (*it).second);
+		}
+	}
+}
+
+void AtomAsgnEvaluator::set_user_value(const char* name, double val)
+{
+	int t = aa.atoms.index(name);
+	if (t >= 0) {
+		Tusrvalmap::iterator it = user_values.find(t);
+		if (it == user_values.end())
+			user_values.insert(Tusrvalmap::value_type(t, val));
+		else
+			(*it).second = val;
+	}
+}
+
+void AtomAsgnEvaluator::load(int i, double res)
+{
+	// set the value
+	operator[](i) = res;
+	// if i-th expression is atom, set its value to this EvalTree
+	int t = aa.order[i];
+	if (t >= 0)
+		etree.set_nulary(t, res);
+}
+
+double AtomAsgnEvaluator::get_value(const char* name) const
+{
+	AtomAssignings::Tvarintmap::const_iterator it = aa.lname2expr.find(name);
+	if (it == aa.lname2expr.end())
+		return std::numeric_limits<double>::quiet_NaN();
+	else
+		return operator[]((*it).second);
+}
+
+
diff --git a/dynare++/parser/cc/atom_assignings.h b/dynare++/parser/cc/atom_assignings.h
new file mode 100644
index 0000000000000000000000000000000000000000..2151f9bccbf545a7dcccdf627cfd702dd26a36bc
--- /dev/null
+++ b/dynare++/parser/cc/atom_assignings.h
@@ -0,0 +1,130 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: atom_assignings.h 149 2007-04-30 02:11:46Z okamenik $
+
+#ifndef OGP_ATOM_ASSIGNINGS_H
+#define OGP_ATOM_ASSIGNINGS_H
+
+#include "static_atoms.h"
+#include "formula_parser.h"
+#include "atom_substitutions.h"
+
+#include <vector>
+#include <map>
+
+namespace ogp {
+
+	class AtomAsgnEvaluator;
+
+	/** This class represents atom assignments used in parameters
+	 * settings and initval initialization. It maintains atoms of the
+	 * all expressions on the right hand side, the parsed formulas of
+	 * the right hand sides, and the information about the left hand
+	 * sides. See documentation to the order member below. */
+	class AtomAssignings {
+		friend class AtomAsgnEvaluator;
+	protected:
+		typedef std::map<const char*, int, ltstr> Tvarintmap;
+		/** All atoms which should be sufficient for formulas at the
+		 * right hand sides. The atoms should be filled with names
+		 * (preregistered). This is a responsibility of the caller. */
+		StaticAtoms& atoms;
+		/** The formulas of right hand sides. */
+		FormulaParser expr;
+		/** Name storage of the names from left hand sides. */
+		NameStorage left_names;
+		/** Information on left hand sides. This maps a name to the
+		 * index of its assigned expression in expr. More than one
+		 * name may reference to the same expression. */
+		Tvarintmap lname2expr;
+		/** Information on left hand sides. If order[i] >= 0, then it
+		 * says that i-th expression in expr is assigned to atom with
+		 * order[i] tree index. */
+		std::vector<int> order;
+	public:
+		/** Construct the object using the provided static atoms. */
+		AtomAssignings(StaticAtoms& a) : atoms(a), expr(atoms)
+			{}
+		/** Make a copy with provided reference to (posibly different)
+		 * static atoms. */
+		AtomAssignings(const AtomAssignings& aa, StaticAtoms& a);
+		virtual ~AtomAssignings()
+			{}
+		/** Parse the assignments from the given string. */
+		void parse(int length, const char* stream);
+		/** Process a syntax error from bison. */
+		void error(const char* mes);
+		/** Add an assignment of the given name to the given
+		 * double. Can be called by a user, anytime. */
+		void add_assignment_to_double(const char* name, double val);
+		/** Add an assignment. Called from assign.y. */
+		void add_assignment(int asgn_off, const char* str, int name_len,
+							int right_off, int right_len);
+		/** This applies old2new map (possibly from atom
+		 * substitutions) to this object. It registers new variables
+		 * in the atoms, and adds the expressions to expr, and left
+		 * names to lname2expr. The information about dynamical part
+		 * of substitutions is ignored, since we are now in the static
+		 * world. */
+		void apply_subst(const AtomSubstitutions::Toldnamemap& mm);
+		/** Debug print. */
+		void print() const;
+	};
+
+	/** This class basically evaluates the atom assignments
+	 * AtomAssignings, so it inherits from ogp::FormulaEvaluator. It
+	 * is also a storage for the results of the evaluation stored as a
+	 * vector, so the class inherits from std::vector<double> and
+	 * ogp::FormulaEvalLoader. As the expressions for atoms are
+	 * evaluated, the results are values for atoms which will be
+	 * used in subsequent evaluations. For this reason, the class
+	 * inherits also from AtomValues. */
+	class AtomAsgnEvaluator : public FormulaEvalLoader,
+							  public AtomValues,
+							  protected FormulaEvaluator,
+							  public std::vector<double> {
+	protected:
+		typedef std::map<int, double> Tusrvalmap;
+		Tusrvalmap user_values;
+		const AtomAssignings& aa;
+	public:
+		AtomAsgnEvaluator(const AtomAssignings& a)
+			: FormulaEvaluator(a.expr),
+			  std::vector<double>(a.expr.nformulas()), aa(a) {}
+		virtual ~AtomAsgnEvaluator() {}
+		/** This sets all initial values to NaNs, all constants and
+		 * all values set by user by call set_value. This is called by
+		 * FormulaEvaluator::eval() method, which is called by eval()
+		 * method passing this argument as AtomValues. So the
+		 * ogp::EvalTree will be always this->etree. */
+		void setValues(EvalTree& et) const;
+		/** User setting of the values. For example in initval,
+		 * parameters are known and should be set to their values. In
+		 * constrast endogenous variables are set initially to NaNs by
+		 * AtomValues::setValues. */
+		void set_user_value(const char* name, double val);
+		/** This sets the result of i-th expression in aa to res, and
+		 * also checks whether the i-th expression is an atom. If so,
+		 * it sets the value of the atom in ogp::EvalTree
+		 * this->etree. */
+		void load(int i, double res);
+		/** After the user values have been set, the assignments can
+		 * be evaluated. For this purpose we have eval() method. The
+		 * result is that this object as std::vector<double> will
+		 * contain the values. It is ordered given by formulas in
+		 * expr. */
+		void eval()
+			{FormulaEvaluator::eval(*this, *this);}
+		/** This returns a value for a given name. If the name is not
+		 * found among atoms, or there is no assignment for the atom,
+		 * NaN is returned. */
+		double get_value(const char* name) const;
+	};
+
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/atom_substitutions.cpp b/dynare++/parser/cc/atom_substitutions.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e6fd79ba38c34dc57514a9f4c54f34bc2e701506
--- /dev/null
+++ b/dynare++/parser/cc/atom_substitutions.cpp
@@ -0,0 +1,275 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: atom_substitutions.cpp 42 2007-01-22 21:53:24Z ondra $
+
+#include "atom_substitutions.h"
+#include "utils/cc/exception.h"
+
+using namespace ogp;
+
+AtomSubstitutions::AtomSubstitutions(const AtomSubstitutions& as, const FineAtoms& oa,
+									 FineAtoms& na)
+	: old_atoms(oa), new_atoms(na)
+{
+	const NameStorage& ns = na.get_name_storage();
+
+	// fill new2old
+	for (Tshiftmap::const_iterator it = as.new2old.begin();
+		 it != as.new2old.end(); ++it)
+		new2old.insert(Tshiftmap::value_type(ns.query((*it).first),
+											 Tshiftname(ns.query((*it).second.first),
+														(*it).second.second)));
+	// fill old2new
+	for (Toldnamemap::const_iterator it = as.old2new.begin();
+		 it != as.old2new.end(); ++it) {
+		Tshiftnameset sset;
+		for (Tshiftnameset::const_iterator itt = (*it).second.begin();
+			 itt != (*it).second.end(); ++itt)
+			sset.insert(Tshiftname(ns.query((*itt).first), (*itt).second));
+		old2new.insert(Toldnamemap::value_type(ns.query((*it).first), sset));
+	}
+}
+
+
+void AtomSubstitutions::add_substitution(const char* newname, const char* oldname, int tshift)
+{
+	// make sure the storage is from the new_atoms
+	newname = new_atoms.get_name_storage().query(newname);
+	oldname = new_atoms.get_name_storage().query(oldname);
+	if (newname == NULL || oldname == NULL)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Bad newname or oldname in AtomSubstitutions::add_substitution");
+
+	// insert to new2old map
+	new2old.insert(Tshiftmap::value_type(newname, Tshiftname(oldname, tshift)));
+	// insert to old2new map
+	Toldnamemap::iterator it = old2new.find(oldname);
+	if (it != old2new.end())
+		(*it).second.insert(Tshiftname(newname, -tshift));
+	else {
+		Tshiftnameset snset;
+		snset.insert(Tshiftname(newname, -tshift));
+		old2new.insert(Toldnamemap::value_type(oldname, snset));
+	}
+
+	// put to info
+	info.num_substs++;
+}
+
+void AtomSubstitutions::substitutions_finished(VarOrdering::ord_type ot)
+{
+	// create an external ordering of new_atoms from old_atoms
+	const vector<const char*>& oa_ext = old_atoms.get_allvar();
+	vector<const char*> na_ext;
+	for (unsigned int i = 0; i < oa_ext.size(); i++) {
+		const char* oname = oa_ext[i];
+		// add the old name itself
+		na_ext.push_back(oname);
+		// add all new names derived from the old name
+		Toldnamemap::const_iterator it = old2new.find(oname);
+		if (it != old2new.end())
+			for (Tshiftnameset::const_iterator itt = (*it).second.begin();
+				 itt != (*it).second.end(); ++itt)
+				na_ext.push_back((*itt).first);
+	}
+
+	// call parsing finished for the new_atoms
+	new_atoms.parsing_finished(ot, na_ext);
+}
+
+const char* AtomSubstitutions::get_new4old(const char* oldname, int tshift) const
+{
+	Toldnamemap::const_iterator it = old2new.find(oldname);
+	if (it != old2new.end()) {
+		const Tshiftnameset& sset = (*it).second;
+		for (Tshiftnameset::const_iterator itt = sset.begin();
+			 itt != sset.end(); ++itt)
+			if ((*itt).second == - tshift)
+				return (*itt).first;
+	}
+	return NULL;
+}
+
+void AtomSubstitutions::print() const
+{
+	printf("Atom Substitutions:\nOld ==> New:\n");
+	for (Toldnamemap::const_iterator it = old2new.begin(); it != old2new.end(); ++it)
+		for (Tshiftnameset::const_iterator itt = (*it).second.begin();
+			 itt != (*it).second.end(); ++itt)
+			printf("    %s ==> [%s, %d]\n", (*it).first, (*itt).first, (*itt).second);
+
+	printf("Old <== New:\n");
+	for (Tshiftmap::const_iterator it = new2old.begin(); it != new2old.end(); ++it)
+		printf("    [%s, %d] <== %s\n", (*it).second.first, (*it).second.second, (*it).first);
+}
+
+void SAtoms::substituteAllLagsAndLeads(FormulaParser& fp, AtomSubstitutions& as)
+{
+	const char* name;
+
+	int mlead, mlag;
+	endovarspan(mlead, mlag);
+
+	// substitute all endo lagged more than 1
+	while (NULL != (name = findEndoWithLeadInInterval(mlag, -2)))
+		makeAuxVariables(name, -1, -2, mlag, fp, as);
+	// substitute all endo leaded more than 1
+	while (NULL != (name = findEndoWithLeadInInterval(2, mlead)))
+		makeAuxVariables(name, 1, 2, mlead, fp, as);
+
+	exovarspan(mlead, mlag);
+	
+	// substitute all lagged exo
+	while (NULL != (name = findExoWithLeadInInterval(mlag, -1)))
+		makeAuxVariables(name, -1, -1, mlag, fp, as);
+	// substitute all leaded exo
+	while (NULL != (name = findExoWithLeadInInterval(1, mlead)))
+		makeAuxVariables(name, 1, 1, mlead, fp, as);
+
+	// notify that substitution have been finished
+	as.substitutions_finished(order_type);
+}
+
+void SAtoms::substituteAllLagsAndExo1Leads(FormulaParser& fp, AtomSubstitutions& as)
+{
+	const char* name;
+
+	int mlead, mlag;
+	endovarspan(mlead, mlag);
+
+	// substitute all endo lagged more than 1
+	while (NULL != (name = findEndoWithLeadInInterval(mlag, -2)))
+		makeAuxVariables(name, -1, -2, mlag, fp, as);
+
+	exovarspan(mlead, mlag);
+	
+	// substitute all lagged exo
+	while (NULL != (name = findExoWithLeadInInterval(mlag, -1)))
+		makeAuxVariables(name, -1, -1, mlag, fp, as);
+	// substitute all leaded exo by 1
+	while (NULL != (name = findExoWithLeadInInterval(1,1)))
+		makeAuxVariables(name, 1, 1, 1, fp, as);
+
+	// notify that substitution have been finished
+	as.substitutions_finished(order_type);
+}
+
+const char* SAtoms::findNameWithLeadInInterval(const vector<const char*>& names,
+											   int ll1, int ll2) const
+{
+	for (unsigned int i = 0; i < names.size(); i++) {
+		const char* name = names[i];
+		DynamicAtoms::Tvarmap::const_iterator it = vars.find(name);
+		if (it != vars.end()) {
+			const DynamicAtoms::Tlagmap& lmap = (*it).second;
+			for (DynamicAtoms::Tlagmap::const_iterator itt = lmap.begin();
+				 itt != lmap.end(); ++itt)
+				if ((*itt).first >= ll1 && (*itt).first <= ll2)
+					return name;
+		}
+	}
+
+	// nothing found
+	return NULL;
+}
+
+void SAtoms::attemptAuxName(const char* str, int ll, string& out) const
+{
+	char c = (ll >= 0)? ((ll == 0)? 'e' : 'p' ) : 'm';
+	char absll[100];
+	sprintf(absll, "%d", std::abs(ll));
+	int iter = 1;
+	do {
+		out = string(str) + '_';
+		for (int i = 0; i < iter; i++)
+			out += c;
+		if (ll != 0)
+			out += absll;
+		iter++;
+	} while (varnames.query(out.c_str()));
+}
+
+void SAtoms::makeAuxVariables(const char* name, int step, int start, int limit_lead,
+							  FormulaParser& fp, AtomSubstitutions& as)
+{
+	if (! (step == 1 || step == -1))
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong value of step in SAtoms::makeAuxVariables");
+	if (step*start > step*limit_lead)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong value of start in SAtoms::makeAuxVariables");
+
+	// make sure that we do not go further than necessary, this is
+	// that the limit lead is not behind maxlead or minlag
+	int mlead, mlag;
+	varspan(name, mlead, mlag);
+	if (step == -1)
+		limit_lead = std::max(limit_lead, mlag);
+	else
+		limit_lead = std::min(limit_lead, mlead);
+
+	// Comment to comments: name="a"; start=-3; step=-1;
+
+	char tmp[500];
+
+	// recover tree index of a previous atom, i.e. set tprev to a tree
+	// index of atom "a(-2)"
+	int tprev = index(name, start-step);
+	if (tprev == -1) {
+		sprintf(tmp, "%s(%d)", name, start-step);
+		tprev = fp.add_nulary(tmp);
+	}
+
+	int ll = start;
+	do {
+		// either create atom "a_m2(0)" with tree index taux and add
+		// equation "a_m2(0)=a(-2)"
+		// or 
+        // check if "a_m2(0)" has not been already created (with
+        // different step), in this case do not add equation "a_m2(0)
+        // = a(-2)"
+		const char* newname;
+		string newname_str;
+		int taux;
+		if (NULL == (newname=as.get_new4old(name, ll-step))) {
+			attemptAuxName(name, ll-step, newname_str);
+			newname = newname_str.c_str();
+			register_uniq_endo(newname);
+			newname = varnames.query(newname);
+			sprintf(tmp, "%s(0)", newname);
+			taux = fp.add_nulary(tmp);
+			// add to substitutions
+			as.add_substitution(newname, name, ll-step);
+
+			// add equation "a_m2(0) = a(-2)", this is taux = tprev
+			fp.add_formula(fp.add_binary(MINUS, taux, tprev));
+		} else {
+			// example: exogenous EPS and occurrence at both EPS(-1)
+			// EPS(+1)
+            // first call makeAuxVariables("EPS",1,1,...) will make endo EPS_p0 = EPS
+            // second call makeAuxVariables("EPS",-1,-1,...) will use this EPS_p0
+			//             to substitute for EPS(-1)
+			taux = index(newname, 0);
+			if (taux < 0)
+				throw ogu::Exception(__FILE__,__LINE__,
+									 "Couldn't find tree index of previously substituted variable");
+		}
+			
+		// create atom "a_m2(-1)" or turn "a(-3)" if any to "a_m2(-1)"; tree index t
+		int t = index(name, ll);
+		if (t == -1) {
+			// no "a(-3)", make t <-> a_m2(-1)
+			sprintf(tmp, "%s(%d)", newname, step);
+			t = fp.add_nulary(tmp);
+		} else {
+			// turn a(-3) to a_m2(-1)
+			unassign_variable(name, ll, t);
+			assign_variable(newname, step, t);
+		}
+
+		// next iteration starts with tprev <-> "a_m2(-1)" (this will be made equal to "a_m3(0)")
+		tprev = t;
+		
+		ll += step;
+	} while (step*ll <= step*limit_lead);
+}
diff --git a/dynare++/parser/cc/atom_substitutions.h b/dynare++/parser/cc/atom_substitutions.h
new file mode 100644
index 0000000000000000000000000000000000000000..82c99946b08e47840b4f98995ab062fd85e472fd
--- /dev/null
+++ b/dynare++/parser/cc/atom_substitutions.h
@@ -0,0 +1,161 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: atom_substitutions.h 42 2007-01-22 21:53:24Z ondra $
+
+#ifndef OGP_ATOM_SUBSTITUTIONS_H
+#define OGP_ATOM_SUBSTITUTIONS_H
+
+#include "fine_atoms.h"
+
+#include <string>
+
+namespace ogp {
+
+	using std::string;
+	using std::map;
+	using std::pair;
+
+	/** This class tracts an information about the performed
+	 * substitutions. In fact, there is only one number to keep track
+	 * about, this is a number of substitutions. */
+	struct SubstInfo {
+		int num_substs;
+		SubstInfo() : num_substs(0) {}
+	};
+
+	/** This class tracks all atom substitutions during the job and
+	 * then builds structures when all substitutions are finished. */
+	class AtomSubstitutions {
+	public:
+		typedef pair<const char*, int> Tshiftname;
+		typedef map<const char*, Tshiftname, ltstr> Tshiftmap;
+		typedef set<Tshiftname> Tshiftnameset;
+		typedef map<const char*, Tshiftnameset, ltstr> Toldnamemap;
+	protected:
+		/** This maps a new name to a shifted old name. This is, one
+		 * entry looks as "a_m3 ==> a(-3)", saying that a variable
+		 * "a_m3" corresponds to a variable "a" lagged by 3. */
+		Tshiftmap new2old;
+		/** This is inverse to new2old, which is not unique. For old
+		 * name, say "a", it says what new names are derived with what
+		 * shifts from the "a". For example, it can map "a" to a two
+		 * element set {["a_m3", +3], ["a_p2", -2]}. This says that
+		 * leading "a_m3" by 3 one gets old "a" and lagging "a_p2" by
+		 * 2 one gets also old "a". */
+		Toldnamemap old2new;
+		/** This is a reference to old atoms with multiple leads and
+		 * lags. They are supposed to be used with parsing finished
+		 * being had called, so that the external ordering is
+		 * available. */
+		const FineAtoms& old_atoms;
+		/** This is a reference to new atoms. All name pointers point
+		 * to storage of these atoms. */
+		FineAtoms& new_atoms;
+		/** Substitutions information. */
+		SubstInfo info;
+	public:
+		/** Create the object with reference to the old and new
+		 * atoms. In the beginning, old atoms are supposed to be with
+		 * parsing_finished() called, and new atoms a simple copy of
+		 * old atoms. The new atoms will be an instance of SAtoms. All
+		 * substitution job is done by a substitution method of the
+		 * new atoms. */
+		AtomSubstitutions(const FineAtoms& oa, FineAtoms& na)
+			: old_atoms(oa), new_atoms(na) {}
+		/** Construct a copy of the object using a different instances
+		 * of old atoms and new atoms, which are supposed to be
+		 * semantically same as the atoms from as. */
+		AtomSubstitutions(const AtomSubstitutions& as, const FineAtoms& oa, FineAtoms& na);
+		virtual ~AtomSubstitutions() {}
+		/** This is called during the substitution job from the
+		 * substitution method of the new atoms. This says that the
+		 * new name, say "a_m3" is a substitution of old name "a"
+		 * shifted by -3. */
+		void add_substitution(const char* newname, const char* oldname, int tshift);
+		/** This is called when all substitutions are finished. This
+		 * forms the new external ordering of the new atoms and calls
+		 * parsing_finished() for the new atoms with the given ordering type. */
+		void substitutions_finished(VarOrdering::ord_type ot);
+		/** Returns a new name for old name and given tshift. For "a"
+		 * and tshift=-3, it returns "a_m3". If there is no such
+		 * substitution, it returns NULL. */
+		const char* get_new4old(const char* oldname, int tshift) const;
+		/** Return new2old. */
+		const Tshiftmap& get_new2old() const
+			{return new2old;}
+		/** Return old2new. */
+		const Toldnamemap& get_old2new() const
+			{return old2new;}
+		/** Return substitution info. */
+		const SubstInfo& get_info() const
+			{return info;}
+		/** Return old atoms. */
+		const FineAtoms& get_old_atoms() const
+			{return old_atoms;}
+		/** Return new atoms. */
+		const FineAtoms& get_new_atoms() const
+			{return new_atoms;}
+		/** Debug print. */
+		void print() const;
+	};
+
+	class SAtoms : public FineAtoms {
+	public:
+		SAtoms()
+			: FineAtoms() {}
+		SAtoms(const SAtoms& sa)
+			: FineAtoms(sa) {}
+		virtual ~SAtoms() {}
+		/** This substitutes all lags and leads for all exogenous and
+		 * all lags and leads greater than 1 for all endogenous
+		 * variables. This is useful for perfect foresight problems
+		 * where we can do that. */
+		void substituteAllLagsAndLeads(FormulaParser& fp, AtomSubstitutions& as);
+		/** This substitutes all lags of all endo and exo and one step
+		 * leads of all exo variables. This is useful for stochastic
+		 * models where we cannot solve leads more than 1. */
+		void substituteAllLagsAndExo1Leads(FormulaParser& fp, AtomSubstitutions& as);
+	protected:
+		/** This finds an endogenous variable name which occurs between
+		 * ll1 and ll2 included. */
+		const char* findEndoWithLeadInInterval(int ll1, int ll2) const
+			{return findNameWithLeadInInterval(get_endovars(), ll1, ll2);}
+		/** This finds an exogenous variable name which occurs between
+		 * ll1 and ll2 included. */
+		const char* findExoWithLeadInInterval(int ll1, int ll2) const
+			{return findNameWithLeadInInterval(get_exovars(), ll1, ll2);}
+
+		/** This attempts to find a non registered name of the form
+		 * <str>_m<abs(ll)> or <str>_p<abs(ll)>. A letter 'p' is
+		 * chosen if ll is positive, 'm' if negative. If a name of
+		 * such form is already registered, one more character (either
+		 * 'p' or 'm') is added and the test is performed again. The
+		 * resulting name is returned in a string out. */ 
+		void attemptAuxName(const char* str, int ll, string& out) const;
+
+		/** This makes auxiliary variables to eliminate all leads/lags
+		 * greater/less than or equal to start up to the limit_lead
+		 * for a variable with the given name. If the limit_lead is
+		 * greater/less than the maxlead/minlag of the variable, than
+		 * maxlead/minlag is used. This process is recorded in
+		 * AtomSubstitutions. The new auxiliary variables and their
+		 * atoms are created in this object. The auxiliary equations
+		 * are created in the given FormulaParser. The value of step
+		 * is allowed to be either -1 (lags) or +1 (leads). */
+		void makeAuxVariables(const char* name, int step, int start, int limit_lead,
+							  FormulaParser& fp, AtomSubstitutions& as);
+	private:
+		/** This is a worker routine for findEndoWithLeadInInterval
+		 * and findExoWithLeadInInterval. */
+		const char* findNameWithLeadInInterval(const vector<const char*>& names,
+											   int ll1, int ll2) const;
+
+	};
+
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/csv.lex b/dynare++/parser/cc/csv.lex
new file mode 100644
index 0000000000000000000000000000000000000000..feb563cb78f796c0d3cf92fcb043e247e6a1bbf6
--- /dev/null
+++ b/dynare++/parser/cc/csv.lex
@@ -0,0 +1,34 @@
+%{
+#include "location.h"
+#include "csv_tab.hh"
+
+	extern YYLTYPE csv_lloc;
+
+#define YY_USER_ACTION SET_LLOC(csv_);
+%}
+
+%option nounput
+%option noyy_top_state
+%option stack
+%option yylineno
+%option prefix="csv_"
+%option never-interactive
+
+%%
+
+,                     {return COMMA;}
+\n                    {return NEWLINE;}
+\r\n                  {return NEWLINE;}
+[^,\n\r]+             {return ITEM;}
+
+%%
+
+int csv_wrap()
+{
+	return 1;
+}
+
+void csv__destroy_buffer(void* p)
+{
+	csv__delete_buffer((YY_BUFFER_STATE)p);
+}
diff --git a/dynare++/parser/cc/csv.y b/dynare++/parser/cc/csv.y
new file mode 100644
index 0000000000000000000000000000000000000000..c0dcfeba5994232bd0f37adae140bfed54d07ba5
--- /dev/null
+++ b/dynare++/parser/cc/csv.y
@@ -0,0 +1,46 @@
+%{
+#include "location.h"
+#include "csv_parser.h"
+#include "csv_tab.hh"
+
+	void csv_error(const char*);
+	int csv_lex(void);
+	extern int csv_lineno;
+	extern ogp::CSVParser* csv_parser;
+	extern YYLTYPE csv_lloc;
+%}
+
+%union {
+	char* string;
+	int integer;
+}
+
+%token COMMA NEWLINE BOGUS
+%token <string> ITEM
+
+%name-prefix="csv_";
+
+%locations
+%error-verbose
+
+%%
+
+csv_file : line_list | line_list line;
+
+line_list : line_list line newline | line newline | line_list newline | newline;
+
+line : line comma | line item | item | comma;
+
+comma : COMMA {csv_parser->nextcol();};
+
+newline : NEWLINE {csv_parser->nextrow();};
+
+item : ITEM {csv_parser->item(@1.off, @1.ll);};
+
+
+%%
+
+void csv_error(const char* mes)
+{
+	csv_parser->csv_error(mes);
+}
diff --git a/dynare++/parser/cc/csv_parser.cpp b/dynare++/parser/cc/csv_parser.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..833a452214fb1cd33126e9300bfcbe461a5e1880
--- /dev/null
+++ b/dynare++/parser/cc/csv_parser.cpp
@@ -0,0 +1,42 @@
+#include "csv_parser.h"
+#include "parser_exception.h"
+#include "location.h"
+#include "csv_tab.hh"
+#include <cstring>
+
+using namespace ogp;
+
+/** A global symbol for passing info to the CSVParser from
+ * csv_parse(). */
+CSVParser* csv_parser;
+
+/** The declaration of functions defined in csv_ll.cc and
+ * csv_tab.cc generated from csv.lex and csv.y. */
+void* csv__scan_buffer(char*, unsigned int);
+void csv__destroy_buffer(void*);
+void csv_parse();
+
+extern ogp::location_type csv_lloc;
+
+void CSVParser::csv_error(const char* mes)
+{
+	throw ParserException(mes, csv_lloc.off);
+}
+
+void CSVParser::csv_parse(int length, const char* str)
+{
+	// allocate temporary buffer and parse
+	char* buffer = new char[length+2];
+	strncpy(buffer, str, length);
+	buffer[length] = '\0';
+	buffer[length+1] = '\0';
+	csv_lloc.off = 0;
+	csv_lloc.ll = 0;
+	parsed_string = buffer;
+	void* p = csv__scan_buffer(buffer, (unsigned int)length+2);
+	csv_parser = this;
+	::csv_parse();
+	delete [] buffer;
+	csv__destroy_buffer(p);
+	parsed_string = NULL;
+}
diff --git a/dynare++/parser/cc/csv_parser.h b/dynare++/parser/cc/csv_parser.h
new file mode 100644
index 0000000000000000000000000000000000000000..3edc52a4e92f6d6d0f0ce830943ae330da09d942
--- /dev/null
+++ b/dynare++/parser/cc/csv_parser.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2007, Ondra Kamenik
+
+// $Id$
+
+#ifndef OGP_CSV_PARSER
+#define OGP_CSV_PARSER
+
+namespace ogp {
+
+	class CSVParserPeer {
+	public:
+		virtual ~CSVParserPeer() {}
+		virtual void item(int irow, int icol, const char* str, int length) = 0;
+	};
+
+	class CSVParser {
+	private:
+		CSVParserPeer& peer;
+		int row;
+		int col;
+		const char* parsed_string;
+	public:
+		CSVParser(CSVParserPeer& p)
+			: peer(p), row(0), col(0), parsed_string(0) {}
+		CSVParser(const CSVParser& csvp)
+			: peer(csvp.peer), row(csvp.row),
+			  col(csvp.col), parsed_string(csvp.parsed_string) {}
+		virtual ~CSVParser() {}
+
+		void csv_error(const char* mes);
+		void csv_parse(int length, const char* str);
+
+		void nextrow()
+			{row++; col = 0;}
+		void nextcol()
+			{col++;}
+		void item(int off, int length)
+			{peer.item(row, col, parsed_string+off, length);}
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/dynamic_atoms.cpp b/dynare++/parser/cc/dynamic_atoms.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8976c740f7f04a3b61a7130c0a23feae147097fc
--- /dev/null
+++ b/dynare++/parser/cc/dynamic_atoms.cpp
@@ -0,0 +1,609 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: dynamic_atoms.cpp 1362 2007-07-10 11:50:18Z kamenik $
+
+#include "utils/cc/exception.h"
+#include "dynamic_atoms.h"
+
+#include <string.h>
+#include <limits.h>
+
+using namespace ogp;
+
+NameStorage::NameStorage(const NameStorage& stor)
+{
+	for (unsigned int i = 0; i < stor.name_store.size(); i++) {
+		char* str = new char[strlen(stor.name_store[i])+1];
+		strcpy(str, stor.name_store[i]);
+		name_store.push_back(str);
+		name_set.insert(str);
+	}
+}
+
+NameStorage::~NameStorage()
+{
+	while (name_store.size() > 0) {
+		delete [] name_store.back();
+		name_store.pop_back();
+	}
+}
+
+const char* NameStorage::query(const char* name) const
+{
+	set<const char*, ltstr>::const_iterator it = name_set.find(name);
+	if (it == name_set.end())
+		return NULL;
+	else
+		return (*it);
+}
+
+const char* NameStorage::insert(const char* name)
+{
+	set<const char*, ltstr>::const_iterator it = name_set.find(name);
+	if (it == name_set.end()) {
+		char* str = new char[strlen(name)+1];
+		strcpy(str, name);
+		name_store.push_back(str);
+		name_set.insert(str);
+		return str;
+	} else {
+		return (*it);
+	}
+}
+
+void NameStorage::print() const
+{
+	for (unsigned int i = 0; i < name_store.size(); i++)
+		printf("%s\n", name_store[i]);
+}
+
+void Constants::import_constants(const Constants& c, OperationTree& otree, Tintintmap& tmap)
+{
+	for (Tconstantmap::const_iterator it = c.cmap.begin();
+		 it != c.cmap.end(); ++it) {
+		int told = (*it).first;
+		int tnew = otree.add_nulary();
+		tmap.insert(Tintintmap::value_type(told, tnew));
+		add_constant(tnew, (*it).second);
+	}
+}
+
+void Constants::setValues(EvalTree& et) const
+{
+	Tconstantmap::const_iterator it;
+	for (it = cmap.begin(); it != cmap.end(); ++it)
+		et.set_nulary((*it).first, (*it).second);
+}
+
+void Constants::add_constant(int t, double val)
+{
+	cmap.insert(Tconstantmap::value_type(t, val));
+	cinvmap.insert(Tconstantinvmap::value_type(val, t));
+}
+
+bool Constants::is_constant(int t) const
+{
+	if (t < OperationTree::num_constants)
+		return true;
+	Tconstantmap::const_iterator it = cmap.find(t);
+	return (it != cmap.end());
+}
+
+double Constants::get_constant_value(int t) const
+{
+	Tconstantmap::const_iterator it = cmap.find(t);
+	if (it != cmap.end())
+		return (*it).second;
+	else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Tree index is not constant in Constants::get_constant_value");
+		return 0;
+	}
+}
+
+int Constants::check(const char* str) const
+{
+	double d;
+	sscanf(str, "%lf", &d);
+ 	Tconstantinvmap::const_iterator it = cinvmap.find(d);
+ 	if (it != cinvmap.end())
+ 		return (*it).second;
+ 	else
+ 		return -1;
+}
+
+void Constants::print() const
+{
+	Tconstantmap::const_iterator it;
+	for (it = cmap.begin(); it != cmap.end(); ++it)
+		printf("$%d:  %8.4g\n", (*it).first, (*it).second);
+}
+
+
+DynamicAtoms::DynamicAtoms()
+	: nv(0), minlag(INT_MAX), maxlead(INT_MIN)
+{
+}
+
+DynamicAtoms::DynamicAtoms(const DynamicAtoms& da)
+	: Constants(da),
+	  varnames(da.varnames), vars(), indices(),
+	  nv(da.nv), minlag(da.minlag), maxlead(da.maxlead)
+{
+	// copy vars
+	for (Tvarmap::const_iterator it = da.vars.begin();
+		 it != da.vars.end(); ++it)
+		vars.insert(Tvarmap::value_type(varnames.query((*it).first),
+										(*it).second));
+	// copy indices
+	for (Tindexmap::const_iterator it = da.indices.begin();
+		 it != da.indices.end(); ++it)
+		indices.insert(Tindexmap::value_type((*it).first,
+											 varnames.query((*it).second)));
+}
+
+
+int DynamicAtoms::check(const char* name) const
+{
+	if (is_string_constant(name))
+		return Constants::check(name);
+
+	return check_variable(name);
+}
+
+int DynamicAtoms::check_variable(const char* name) const
+{
+	string str;
+	int ll;
+	parse_variable(name, str, ll);
+	Tvarmap::const_iterator it = vars.find(str.c_str());
+
+	if (it != vars.end()) {
+		const Tlagmap& lmap = (*it).second;
+		Tlagmap::const_iterator itt = lmap.find(ll);
+		if (itt != lmap.end())
+			return (*itt).second;
+	}
+	return -1;
+}
+
+
+void DynamicAtoms::assign(const char* name, int t)
+{
+	if (is_string_constant(name))
+		assign_constant(name, t);
+	else
+		assign_variable(name, t);
+}
+
+void DynamicAtoms::assign_constant(const char* name, int t)
+{
+	double val;
+	sscanf(name, "%lf", &val);
+	add_constant(t, val);
+}
+
+// parse the name and then call assing_variable(varname, ll, t)
+
+void DynamicAtoms::assign_variable(const char* name, int t)
+{
+	int ll;
+	string str;
+	parse_variable(name, str, ll);
+	// here str is just name without lead/lag
+	const char* ss = varnames.insert(str.c_str());
+
+	assign_variable(ss, ll, t);
+}
+
+void DynamicAtoms::assign_variable(const char* varname, int ll, int t)
+{
+	if (indices.end() != indices.find(t))
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Attempt to assign already allocated tree index");
+
+	Tvarmap::iterator it = vars.find(varname);
+	if (it != vars.end()) {
+		Tlagmap& lmap = (*it).second;
+		if (lmap.end() != lmap.find(ll))
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "Attempt to assign already allocated variable");
+		lmap.insert(Tlagmap::value_type(ll, t));
+	} else {
+		Tlagmap lmap;
+		lmap.insert(Tlagmap::value_type(ll, t));
+		vars.insert(Tvarmap::value_type(varname, lmap));
+	}
+	indices.insert(Tindexmap::value_type(t, varname));
+
+	nv++;
+	if (ll < minlag)
+		minlag = ll;
+	if (ll > maxlead)
+		maxlead = ll;	
+}
+
+void DynamicAtoms::unassign_variable(const char* varname, int ll, int t)
+{
+	Tvarmap::iterator it = vars.find(varname);
+	if (it != vars.end()) {
+		Tlagmap& lmap = (*it).second;
+		Tlagmap::iterator itt = lmap.find(ll);
+		if (itt != lmap.end()) {
+			if ((*itt).second == t) {
+				// erase it from the lagmap; if it becomes empty,
+				// erase the lagmap from varmap
+				lmap.erase(itt);
+				if (lmap.size() == 0)
+					vars.erase(it);
+				// erase it from the indices
+				Tindexmap::iterator ittt = indices.find(t);
+				if (ittt != indices.end())
+					indices.erase(ittt);
+
+				nv--;
+				if (ll == minlag || ll == maxlead)
+					update_minmaxll();
+			} else
+				throw ogu::Exception(__FILE__,__LINE__,
+									 "Tree index inconsistent in DynamicAtoms::unassign_variable");
+		} else
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "Lead/lag of the variable not found in DynamicAtoms::unassign_variable");
+	} else
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Variable not found in DynamicAtoms::unassign_variable");
+}
+
+void DynamicAtoms::update_minmaxll()
+{
+	minlag = INT_MAX;
+	maxlead =INT_MIN;
+	for (Tvarmap::const_iterator it = vars.begin(); it != vars.end(); ++it) {
+		const Tlagmap& lmap = (*it).second;
+		for (Tlagmap::const_iterator itt = lmap.begin(); itt != lmap.end(); ++itt) {
+			int ll = (*itt).first;
+			if (ll < minlag)
+				minlag = ll;
+			if (ll > maxlead)
+				maxlead = ll;
+		}
+	}	
+}
+
+vector<int> DynamicAtoms::variables() const
+{
+	vector<int> res;
+	for (Tvarmap::const_iterator it = vars.begin();
+		 it != vars.end(); ++it) {
+		const Tlagmap& lmap = (*it).second;
+		for (Tlagmap::const_iterator itt = lmap.begin();
+			 itt != lmap.end(); ++itt)
+			res.push_back((*itt).second);
+	}
+	return res;
+}
+
+void DynamicAtoms::varspan(int t, int& mlead, int& mlag) const
+{
+	Tindexmap::const_iterator it = indices.find(t);
+	if (indices.end() == it) {
+		mlead = INT_MIN;
+		mlag = INT_MAX;
+		return;
+	}
+	varspan((*it).second, mlead, mlag);
+}
+
+void DynamicAtoms::varspan(const char* name, int& mlead, int& mlag) const
+{
+	Tvarmap::const_iterator it = vars.find(name);
+	if (vars.end() == it) {
+		mlead = INT_MIN;
+		mlag = INT_MAX;
+		return;
+	}
+	const Tlagmap& lmap = (*it).second;
+	Tlagmap::const_iterator beg = lmap.begin();
+	Tlagmap::const_reverse_iterator end = lmap.rbegin();
+	mlag = (*beg).first;
+	mlead = (*end).first;
+}
+
+void DynamicAtoms::varspan(const vector<const char*>& names, int& mlead, int& mlag) const
+{
+	mlead = INT_MIN;
+	mlag = INT_MAX;
+	for (unsigned int i = 0; i < names.size(); i++) {
+		int lag, lead;
+		varspan(names[i], lead, lag);
+		if (lead > mlead)
+			mlead = lead;
+		if (lag < mlag)
+			mlag = lag;
+	}
+}
+
+bool DynamicAtoms::is_named_atom(int t) const
+{
+	return (indices.end() != indices.find(t));
+}
+
+int DynamicAtoms::index(const char* name, int ll) const
+{
+	Tvarmap::const_iterator it = vars.find(name);
+	if (vars.end() != it) {
+		const Tlagmap& lmap = (*it).second;
+		Tlagmap::const_iterator itt = lmap.find(ll);
+		if (lmap.end() != itt)
+			return (*itt).second;
+	}
+	return -1;
+}
+
+const DynamicAtoms::Tlagmap& DynamicAtoms::lagmap(const char* name) const
+{
+	Tvarmap::const_iterator it = vars.find(name);
+	if (vars.end() == it)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 std::string("Couldn't find the name ")
+							 + name + " in DynamicAtoms::lagmap");
+	return (*it).second;
+}
+
+const char* DynamicAtoms::name(int t) const
+{
+	Tindexmap::const_iterator it = indices.find(t);
+	if (indices.end() == it)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Couldn't find tree index in DynamicAtoms::name");
+	return (*it).second;
+}
+
+int DynamicAtoms::lead(int t) const
+{
+	const char* nam = name(t);
+	const Tlagmap& lmap = lagmap(nam);
+	Tlagmap::const_iterator it = lmap.begin();
+	while (it != lmap.end() && (*it).second != t)
+		++it;
+	if (lmap.end() == it)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Couldn't find the three index in DynamicAtoms::lead");
+	return (*it).first;
+}
+
+void DynamicAtoms::print() const
+{
+	printf("names:\n");
+	varnames.print();
+	printf("constants:\n");
+	Constants::print();
+	printf("variables:\n");
+	for (Tvarmap::const_iterator it = vars.begin();
+		 it != vars.end(); ++it) {
+		const Tlagmap& lmap = (*it).second;
+		for (Tlagmap::const_iterator itt = lmap.begin();
+			 itt != lmap.end(); ++itt)
+			printf("$%d: %s(%d)\n", (*itt).second, (*it).first, (*itt).first);
+	}
+	printf("indices:\n");
+	for (Tindexmap::const_iterator it = indices.begin();
+		 it != indices.end(); ++it)
+		printf("t=%d ==> %s\n", (*it).first, (*it).second);
+}
+
+/** Note that the str has been parsed by the lexicographic
+ * analyzer. It can be either a variable or a double. So it is easy to
+ * recognize it by the first character. */
+bool DynamicAtoms::is_string_constant(const char* str)
+{
+	return str[0] == '.' || str[0] == '-' || (str[0] >= '0' && str[0] <= '9');
+}
+
+VarOrdering::VarOrdering(const VarOrdering& vo, const vector<const char*>& vnames,
+						 const DynamicAtoms& a)
+	: n_stat(vo.n_stat), n_pred(vo.n_pred), n_both(vo.n_both), n_forw(vo.n_forw),
+	  der_atoms(vo.der_atoms), positions(vo.positions),
+	  outer2y(vo.outer2y), y2outer(vo.y2outer), varnames(vnames), atoms(a)
+{
+}
+
+bool VarOrdering::check(int t) const
+{
+	map<int,int>::const_iterator it = positions.find(t);
+	return it != positions.end();
+}
+
+int VarOrdering::get_pos_of(int t) const
+{
+	map<int,int>::const_iterator it = positions.find(t);
+	if (it != positions.end()) {
+		return (*it).second;
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Couldn't find the tree index in VarOrdering::get_pos_of");
+		return -1;
+	}
+}
+
+void VarOrdering::do_general(ord_type ordering)
+{
+	// auxiliary vectors for setting der_atoms and map
+	vector<int> pred_minus;
+	vector<int> both_minus;
+	vector<int> stat;
+	vector<int> pred_pad;
+	vector<int> both_pad;
+	vector<int> forw_pad;
+	vector<int> both_plus;
+	vector<int> forw_plus;
+
+	// auxiliary vectors for setting y2outer and outer2y 
+	vector<int> y2o_stat;
+	vector<int> y2o_pred;
+	vector<int> y2o_both;
+	vector<int> y2o_forw;
+
+	for (unsigned int i = 0; i < varnames.size(); i++) {
+		const char* ss = varnames[i];
+		int lead;
+		int lag;
+		atoms.varspan(ss, lead, lag);
+		if (lag == 0 && lead == 0) {
+			stat.push_back(atoms.index(ss, 0));
+			y2o_stat.push_back(i);
+		} else if (lag == -1 && lead < 1) {
+			pred_minus.push_back(atoms.index(ss, -1));
+			pred_pad.push_back(atoms.index(ss, 0));
+			y2o_pred.push_back(i);
+		} else if (lag > -1 && lead == 1) {
+			forw_pad.push_back(atoms.index(ss, 0));
+			forw_plus.push_back(atoms.index(ss, 1));
+			y2o_forw.push_back(i);
+		} else if (lag == -1 && lead == 1) {
+			both_minus.push_back(atoms.index(ss, -1));
+			both_pad.push_back(atoms.index(ss, 0));
+			both_plus.push_back(atoms.index(ss, 1));
+			y2o_both.push_back(i);
+		} else {
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "A wrong lag/lead of a variable in VarOrdering::do_pbspbfbf");
+		}			
+	}
+
+	// here we fill ords according to ordering
+	vector<int>* ords[8];
+	if (ordering == pbspbfbf) {
+		ords[0] = &pred_minus;
+		ords[1] = &both_minus;
+		ords[2] = &stat;
+		ords[3] = &pred_pad;
+		ords[4] = &both_pad;
+		ords[5] = &forw_pad;
+		ords[6] = &both_plus;
+		ords[7] = &forw_plus;
+	} else if (ordering == bfspbfpb) {
+		ords[0] = &both_plus;
+		ords[1] = &forw_plus;
+		ords[2] = &stat;
+		ords[3] = &pred_pad;
+		ords[4] = &both_pad;
+		ords[5] = &forw_pad;
+		ords[6] = &pred_minus;
+		ords[7] = &both_minus;
+	} else { // BEWARE: when implementing a new ordering, check also a
+			 // code below setting y2outer
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Ordering not implemented in VarOrdering::do_general");		
+	}
+
+	// make der_atoms and positions
+	int off = 0;
+	for (unsigned int i = 0; i < 8; i++)
+		for (unsigned int j = 0; j < (ords[i])->size(); j++, off++)
+			if ((*(ords[i]))[j] != -1) {
+				der_atoms.push_back((*(ords[i]))[j]);
+				positions.insert(std::pair<int,int>((*(ords[i]))[j], off));
+			}
+
+	// set integer constants
+	n_stat = stat.size();
+	n_pred = pred_pad.size();
+	n_both = both_pad.size();
+	n_forw = forw_pad.size();
+
+	// make y2outer mapping
+	y2outer.insert(y2outer.end(), y2o_stat.begin(), y2o_stat.end());
+	y2outer.insert(y2outer.end(), y2o_pred.begin(), y2o_pred.end());
+	y2outer.insert(y2outer.end(), y2o_both.begin(), y2o_both.end());
+	y2outer.insert(y2outer.end(), y2o_forw.begin(), y2o_forw.end());
+	// make outer2y mapping
+	outer2y.resize(y2outer.size(), -1);
+	for (unsigned int i = 0; i < y2outer.size(); i++)
+		outer2y[y2outer[i]] = i;
+}
+
+void VarOrdering::do_increasing_time()
+{
+	// get maxlead and minlag of the variables
+	int mlag, mlead;
+	atoms.varspan(varnames, mlead, mlag);
+	// setup the matrix of tree indices, if there is no occurrence,
+	// the index is set to -1
+	vector<int> ll_init(varnames.size(), -1);
+	vector<vector<int> > tree_ind(mlead-mlag+1, ll_init);
+	for (unsigned int iv = 0; iv < varnames.size(); iv++) {
+		try {
+			const DynamicAtoms::Tlagmap& lmap = atoms.lagmap(varnames[iv]);
+			for (DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+				 it != lmap.end(); ++it) {
+				int ll = (*it).first;
+				int t = (*it).second;
+				tree_ind[ll-mlag][iv] = t;
+			}
+		} catch (const ogu::Exception& e) {
+			// ignore the error of not found variable in the tree
+		}
+	}
+
+	// setup der_atoms and positions
+	for (int ll = mlag; ll <= mlead; ll++)
+		for (unsigned int iv = 0; iv < varnames.size(); iv++) {
+			int t = tree_ind[ll-mlag][iv];
+			if (t != -1) {
+				der_atoms.push_back(t);
+				int pos = (ll-mlag)*varnames.size() + iv;
+				positions.insert(map<int,int>::value_type(t, pos));
+			}
+		}
+
+	// set outer2y and y2outer to identities
+	for (unsigned int iv = 0; iv < varnames.size(); iv++) {
+		outer2y.push_back(iv);
+		y2outer.push_back(iv);
+	}
+
+	// set n_stat, n_pred, n_both, and n_forw
+	for (unsigned int iv = 0; iv < varnames.size(); iv++) {
+		int mmlag, mmlead;
+		atoms.varspan(varnames[iv], mmlead, mmlag);
+		if (mmlead == 0 && mmlag == 0) {
+			n_stat++;
+		} else if (mmlead <= 0 && mmlag < 0) {
+			n_pred++;
+		} else if (mmlead > 0 && mmlag >=0) {
+			n_forw++;
+		} else if (mmlead > 0 && mmlag < 0) {
+			n_both++;
+		} else if (mmlead < mmlag) {
+			// variable does not occur in the tree, cound as static
+			n_stat++;
+		} else {
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "A wrong lag/lead of a variable in VarOrdering::do_increasing_time");
+		}
+	}
+}
+
+void VarOrdering::print() const
+{
+	printf("nstat=%d, npred=%d, nboth=%d, nforw=%d\n", n_stat, n_pred, n_both, n_forw);
+	printf("der_atoms:\n");
+	for (unsigned int i = 0; i < der_atoms.size(); i++)
+		printf(" %d", der_atoms[i]);
+	printf("\nmap:\n");
+	for (map<int,int>::const_iterator it = positions.begin(); it != positions.end(); ++it)
+		printf(" [%d->%d]", (*it).first, (*it).second);
+	printf("\ny2outer:\n");
+	for (unsigned int i = 0; i < y2outer.size(); i++)
+		printf(" %d", y2outer[i]);
+	printf("\nouter2y:\n");
+	for (unsigned int i = 0; i < outer2y.size(); i++)
+		printf(" %d", outer2y[i]);
+	printf("\n");
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/dynamic_atoms.h b/dynare++/parser/cc/dynamic_atoms.h
new file mode 100644
index 0000000000000000000000000000000000000000..03cb0411fdc1ff8f54b4e4bc07ae6621324362af
--- /dev/null
+++ b/dynare++/parser/cc/dynamic_atoms.h
@@ -0,0 +1,403 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: dynamic_atoms.h 2269 2008-11-23 14:33:22Z michel $
+
+#ifndef OGP_DYNAMIC_ATOMS_H
+#define OGP_DYNAMIC_ATOMS_H
+
+#include "formula_parser.h"
+
+#include <vector>
+#include <map>
+#include <set>
+#include <string>
+#include <cstring>
+
+namespace ogp {
+	using std::vector;
+	using std::map;
+	using std::set;
+	using std::string;
+
+	struct ltstr {
+		bool operator()(const char* a1, const char* a2) const
+			{ return strcmp(a1, a2) < 0; }
+	};
+
+	/** Class storing names. We will keep names of variables in
+	 * various places, and all these pointers will point to one
+	 * storage, which will be responsible for allocation and
+	 * deallocation. The main function of the class is to allocate
+	 * space for names, and return a pointer of the stored name if
+	 * required. */
+	class NameStorage {
+	protected:
+		/** Vector of names allocated, this is the storage. */
+		vector<char*> name_store;
+		/** Map useful to quickly decide if the name is already
+		 * allocated or not. */
+		set<const char*, ltstr> name_set;
+	public:
+		NameStorage() {}
+		NameStorage(const NameStorage& stor);
+		virtual ~NameStorage();
+		/** Query for the name. If the name has been stored, it
+		 * returns its address, otherwise 0. */
+		const char* query(const char* name) const;
+		/** Insert the name if it has not been inserted yet, and
+		 * return its new or old allocation. */
+		const char* insert(const char* name);
+		int num() const
+			{return (int)name_store.size();}
+		const char* get_name(int i) const
+			{return name_store[i];}
+		/** Debug print. */
+		void print() const;
+	};
+
+	class Constants : public AtomValues {
+	public:
+		/** Type for a map mapping tree indices to double values. */
+		typedef map<int,double> Tconstantmap;
+		typedef map<int,int> Tintintmap;
+	protected:
+		/** Map mapping a tree index of a constant to its double value. */ 
+		Tconstantmap cmap;
+	public:
+		Constants() {}
+		/** Copy constructor. */
+		Constants(const Constants& c)
+			: cmap(c.cmap), cinvmap(c.cinvmap) {}
+		/** Copy constructor registering the constants in the given
+		 * tree. The mapping from old tree indices to new ones is
+		 * traced in tmap. */
+		Constants(const Constants& c, OperationTree& otree, Tintintmap& tmap)
+			{import_constants(c, otree, tmap);}
+		/** Import constants registering their tree indices in the
+		 * given tree. The mapping form old tree indices to new ones
+		 * is traced in tmap. */
+		void import_constants(const Constants& c, OperationTree& otree, Tintintmap& tmap);
+		/** Implements AtomValues interface. This sets the values to
+		 * the evaluation tree EvalTree. */
+		void setValues(EvalTree& et) const;
+		/** This adds a constant with the given tree index. The
+		 * constant must be checked previously and asserted that it
+		 * does not exist. */
+		void add_constant(int t, double val);
+		/** Returns true if the tree index is either an hardwired
+		 * constant (initial number OperationTree:num_constants in
+		 * OperationTree) or the tree index is a registered constant
+		 * by add_constant method. */
+		bool is_constant(int t) const;
+		double get_constant_value(int t) const;
+		/** Return -1 if the given string representation of a constant
+		 * is not among the constants (double represenations). If it
+		 * is, its tree index is returned. */ 
+		int check(const char* str) const;
+		/** Debug print. */
+		void print() const;
+		const Tconstantmap& get_constantmap() const
+			{return cmap;}
+	private:
+		/** Inverse map to Tconstantmap. */
+		typedef map<double,int> Tconstantinvmap;
+		/** This is an inverse map to cmap. This is only used for fast
+		 * queries for the existing double constants in check
+		 * method and add_constant. */
+		Tconstantinvmap cinvmap;
+	};
+
+    /** This class is a parent to Atoms classes which distinguish between
+	 * constants (numerical literals), and variables with lags and
+	 * leads. This abstraction does not distinguish between a parameter
+	 * and a variable without lag or lead. In this sense, everything is a
+	 * variable.*/
+	class DynamicAtoms : public Atoms, public Constants {
+	public:
+		/** Definition of a type mapping lags to the indices of the variables. */
+		typedef map<int,int> Tlagmap;
+	protected:
+		/** Definition of a type mapping names of the atoms to Tlagmap. */
+		typedef map<const char*, Tlagmap, ltstr> Tvarmap;
+		/** Definition of a type mapping indices of variables to the variable names. */
+		typedef map<int, const char*> Tindexmap;
+		/** This is just a storage for variable names, since all other
+		 * instances of a variable name just point to the memory
+		 * allocated by this object. */
+		NameStorage varnames;
+		/** This is the map for variables. Each variable name is
+		 * mapped to the Tlagmap, which maps lags/leads to the nulary
+		 * term indices in the tree. */
+		Tvarmap vars;
+		/** This is almost inverse map to the vars. It maps variable
+		 * indices to the names. A returned name can be in turn used
+		 * as a key in vars. */
+		Tindexmap indices;
+
+		/** Number of variables. */
+		int nv;
+		/** Minimum lag, if there is at least one lag, than this is a negative number. */
+		int minlag;
+		/** Maximum lead, if there is at least one lead, than this is a positive number. */
+		int maxlead;
+	public:
+		/** Construct empty DynamicAtoms. */
+		DynamicAtoms();
+		DynamicAtoms(const DynamicAtoms& da);
+		virtual ~DynamicAtoms() {}
+		/** Check the nulary term identified by its string
+		 * representation. The nulary term can be either a constant or
+		 * a variable. If constant, -1 is returned so that it could be
+		 * assigned regardless if the same constant has already
+		 * appeared or not. If variable, then -1 is returned only if
+		 * the variable has not been assigned an index, otherwise the
+		 * assigned index is returned. */ 
+		int check(const char* name) const;
+		/** Assign the nulary term identified by its string
+		 * representation. This method should be called when check()
+		 * returns -1. */
+		void assign(const char* name, int t);
+		/** Return a number of all variables. */
+		int nvar() const
+			{ return nv; }
+		/** Return the vector of variable indices. */
+		vector<int> variables() const;
+		/** Return max lead and min lag for a variable given by the
+		 * index. If a variable cannot be found, the method retursn
+		 * the smallest integer as maxlead and the largest integer as
+		 * minlag. */
+		void varspan(int t, int& mlead, int& mlag) const;
+		/** Return max lead and min lag for a variable given by the
+		 * name (without lead, lag). The same is valid if the variable
+		 * name cannot be found. */
+		void varspan(const char* name, int& mlead, int& mlag) const;
+		/** Return max lead and min lag for a vector of variables given by the names. */
+		void varspan(const vector<const char*>& names, int& mlead, int& mlag) const;
+		/** Return true for all tree indices corresponding to a
+		 * variable in the sense of this class. (This is parameters,
+		 * exo and endo). Since the semantics of 'variable' will be
+		 * changed in subclasses, we use name 'named atom'. These are
+		 * all atoms but constants. */
+		bool is_named_atom(int t) const;
+		/** Return index of the variable described by the variable
+		 * name and lag/lead. If it doesn't exist, return -1. */
+		int index(const char* name, int ll) const;
+		/** Return the lag map for the variable name. */
+		const Tlagmap& lagmap(const char* name) const;
+		/** Return the variable name for the tree index. It throws an
+		 * exception if the tree index t is not a named atom. */
+		const char* name(int t) const;
+		/** Return the lead/lag for the tree index. It throws an
+		 * exception if the tree index t is not a named atom. */
+		int lead(int t) const;
+		/** Return maximum lead. */
+		int get_maxlead() const
+			{return maxlead;}
+		/** Return minimum lag. */
+		int get_minlag() const
+			{return minlag;}
+		/** Return the name storage to allow querying to other
+		 * classes. */
+		const NameStorage& get_name_storage() const
+			{return varnames;}
+		/** Assign the variable with a given lead. The varname must be
+		 * from the varnames storage. The method checks if the
+		 * variable iwht the given lead/lag is not assigned. If so, an
+		 * exception is thrown. */
+		void assign_variable(const char* varname, int ll, int t);
+		/** Unassign the variable with a given lead and given tree
+		 * index. The tree index is only provided as a check. An
+		 * exception is thrown if the name, ll, and the tree index t
+		 * are not consistent. The method also updates nv, indices,
+		 * maxlead and minlag. The varname must be from the varnames
+		 * storage. */
+		void unassign_variable(const char* varname, int ll, int t);
+		/** Debug print. */
+		void print() const;
+	protected:
+		/** Do the check for the variable. A subclass may need to
+		 * reimplement this so that it could raise an error if the
+		 * variable is not among a given list. */
+		virtual int check_variable(const char* name) const;
+		/** Assign the constant. */
+		void assign_constant(const char* name, int t);
+		/** Assign the variable. */
+		void assign_variable(const char* name, int t);
+		/** The method just updates minlag or/and maxlead. Note that
+		 * when assigning variables, the update is done when inserting
+		 * to the maps, however, if removing a variable, we need to
+		 * call this method. */
+		void update_minmaxll();
+		/** The method parses the string to recover a variable name
+		 * and lag/lead ll. The variable name doesn't contain a lead/lag. */
+		virtual void parse_variable(const char* in, string& out, int& ll) const = 0;
+	public:
+		/** Return true if the str represents a double.*/ 
+		static bool is_string_constant(const char* str);
+	};
+
+
+	/** This class is a parent of all orderings of the dynamic atoms
+	 * of variables which can appear before t, at t, or after t. It
+	 * encapsulates the ordering, and the information about the number
+	 * of static (appearing only at time t) predetermined (appearing
+	 * before t and possibly at t), both (appearing before t and after
+	 * t and possibly at t) and forward looking (appearing after t and
+	 * possibly at t).
+	 *
+	 * The constructor takes a list of variable names. The class also
+	 * provides mapping from the ordering of the variables in the list
+	 * (outer) to the new ordering (at time t) and back.
+	 *
+	 * The user of the subclass must call do_ordering() after
+	 * initialization.
+	 *
+	 * The class contains a few preimplemented methods for
+	 * ordering. The class is used in this way: Make a subclass, and
+	 * implement pure virtual do_ordering() by just plugging a
+	 * preimplemented method, or plugging your own implementation. The
+	 * method do_ordering() is called by the user after the constructor.
+	 */
+	class VarOrdering {
+	protected:
+		/** Number of static variables. */
+		int n_stat;
+		/** Number of predetermined variables. */
+		int n_pred;
+		/** Number of both variables. */
+		int n_both;
+		/** Number of forward looking variables. */
+		int n_forw;
+		/** This is a set of tree indices corresponding to the
+		 * variables at all times as they occur in the formulas. In
+		 * fact, since this is used only for derivatives, the ordering
+		 * of this vector is only important for ordering of the
+		 * derivatives, in other contexts the ordering is not
+		 * important, so it is rather a set of indices.*/
+		vector<int> der_atoms;
+		/** This maps tree index of the variable to the position in
+		 * the row of the ordering. One should be careful with making
+		 * space in the positions for variables not appearing at time
+		 * t. For instance in the pred(t-1), both(t-1), stat(t),
+		 * pred(t), both(t), forw(t), both(t+1), forw(t+1) ordering,
+		 * the variables x(t-1), y(t-1), x(t+1), z(t-1), z(t), and
+		 * z(t+1) having tree indices 6,5,4,3,2,1 will be ordered as
+		 * follows: y(t-1), x(t-1), z(t-1), [y(t)], [x(t)], z(t),
+		 * x(t+1), where a bracketed expresion means non-existent by
+		 * occupying a space. The map thus will look as follows:
+		 * {5->0, 6->1, 3->2, 2->5, 3->6}. Note that nothing is mapped
+		 * to positions 3 and 4. */ 
+		map<int,int> positions;
+		/** This maps an ordering of the list of variables in
+		 * constructor to the new ordering (at time t). The length is
+		 * the number of variables. */
+		vector<int> outer2y;
+		/** This maps a new ordering to the ordering of the list of
+		 * variables in constructor (at time t). The length is the
+		 * number of variables. */
+		vector<int> y2outer;
+		/** This is just a reference for variable names to keep it
+		 * from constructor to do_ordering() implementations. */
+		const vector<const char*>& varnames;
+		/** This is just a reference to atoms to keep it from
+		 * constructor to do_ordering() implementations. */
+		const DynamicAtoms& atoms;
+	public:
+		/** This is an enum type for an ordering type implemented by
+		 * do_general. */
+		enum ord_type {pbspbfbf, bfspbfpb};
+		/** Construct the ordering of the variables given by the names
+		 * with their dynamic occurrences defined by the atoms. It
+		 * calls the virtual method do_ordering which can be
+		 * reimplemented. */
+		VarOrdering(const vector<const char*>& vnames, const DynamicAtoms& a)
+			: n_stat(0), n_pred(0), n_both(0), n_forw(0), varnames(vnames), atoms(a)
+			{}
+		VarOrdering(const VarOrdering& vo, const vector<const char*>& vnames,
+					const DynamicAtoms& a);
+		virtual VarOrdering* clone(const vector<const char*>& vnames,
+								   const DynamicAtoms& a) const = 0;
+		/** Destructor does nothing here. */
+		virtual ~VarOrdering() {}
+		/** This is the method setting the ordering and the map. A
+		 * subclass must reimplement it, possibly using a
+		 * preimplemented ordering. This method must be called by the
+		 * user after the class has been created. */
+		virtual void do_ordering() = 0;
+		/** Return number of static. */
+		int nstat() const
+			{return n_stat;}
+		/** Return number of predetermined. */
+		int npred() const
+			{return n_pred;}
+		/** Return number of both. */
+		int nboth() const
+			{return n_both;}
+		/** Return number of forward looking. */
+		int nforw() const
+			{return n_forw;}
+		/** Return the set of tree indices for derivatives. */
+		const vector<int>& get_der_atoms() const
+			{return der_atoms;}
+		/** Return the y2outer. */
+		const vector<int>& get_y2outer() const
+			{return y2outer;}
+		/** Return the outer2y. */
+		const vector<int>& get_outer2y() const
+			{return outer2y;}
+		/** Query the atom given by the tree index. True is returned
+		 * if the atom is one of the variables in the object. */
+		bool check(int t) const;
+		/** Return the position of the atom (nulary term) given by a
+		 * tree index. It is a lookup to the map. If the atom cannot
+		 * be found, the exception is raised. */
+		int get_pos_of(int t) const;
+		/** This returns a length of ordered row of atoms. In all
+		 * cases so far, it does not depend on the ordering and it is
+		 * as follows. */
+		int length() const
+			{return n_stat+2*n_pred+3*n_both+2*n_forw;}
+		/** Debug print. */
+		void print() const;
+	protected:
+		/** This is a general ordering method which orders the
+		 * variables by the given ordering ord_type. See documentation
+		 * for respective do_ methods. */
+		void do_general(ord_type ordering);
+		/** This is a preimplemented ordering for do_ordering()
+		 * method. It assumes that the variables appear only at time
+		 * t-1, t, t+1. It orders the atoms as pred(t-1), both(t-1),
+		 * stat(t), pred(t), both(t), forw(t), both(t+1),
+		 * forw(t+1). It builds the der_atoms, the map of positions,
+		 * as well as y2outer and outer2y. */
+		void do_pbspbfbf()
+			{do_general(pbspbfbf);}
+		/** This is a preimplemented ordering for do_ordering()
+		 * method. It assumes that the variables appear only at time
+		 * t-1, t, t+1. It orders the atoms as both(t+1), forw(t+1),
+		 * stat(t), pred(t), both(t), forw(t), pred(t-1),
+		 * both(t-1). It builds the der_atoms, the map of positions,
+		 * as well as y2outer and outer2y. */
+		void do_bfspbfpb()
+			{do_general(bfspbfpb);}
+		/** This is a preimplemented ordering for do_ordering()
+		 * method. It makes no assumptions about occurences of
+		 * variables at different times. It orders the atoms with
+		 * increasing time keeping the given ordering within one
+		 * time. This implies that y2outer and outer2y will be
+		 * identities. The der_atoms will be just a sequence of atoms
+		 * from the least to the most time preserving the order of atoms
+		 * within one time. */
+		void do_increasing_time();
+	private:
+		/** Declare this copy constructor as private to hide it. */
+		VarOrdering(const VarOrdering& vo);
+	};
+
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/fine_atoms.cpp b/dynare++/parser/cc/fine_atoms.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..17920b8ad5e1b6c456dcae40ac9044da5fb43a96
--- /dev/null
+++ b/dynare++/parser/cc/fine_atoms.cpp
@@ -0,0 +1,482 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: fine_atoms.cpp 1759 2008-03-31 14:25:20Z kamenik $
+
+#include "utils/cc/exception.h"
+
+#include "parser_exception.h"
+#include "fine_atoms.h"
+
+using namespace ogp;
+
+AllvarOuterOrdering::AllvarOuterOrdering(const vector<const char*>& allvar_outer,
+										 const FineAtoms& a)
+	: atoms(a), allvar(),
+	  endo2all(a.get_endovars().size(), -1),
+	  exo2all(a.get_exovars().size(), -1)
+{
+	// fill in the allvar from allvar_outer
+	for (unsigned int i = 0; i < allvar_outer.size(); i++) {
+		const char* s = atoms.varnames.query(allvar_outer[i]);
+		if (s)
+			allvar.push_back(s);
+		else
+			throw ogu::Exception(__FILE__, __LINE__,
+								 string("Variable ") + allvar_outer[i] + " is not a declared symbol in AllvarOuterOrdering constructor");
+	}
+
+	// fill in endo2all and exo2all
+	for (unsigned int i = 0; i < allvar.size(); i++) {
+		Tvarintmap::const_iterator it = atoms.endo_outer_map.find(allvar[i]);
+		if (it != atoms.endo_outer_map.end())
+			endo2all[(*it).second] = i;
+		else {
+			it = atoms.exo_outer_map.find(allvar[i]);
+			if (it != atoms.exo_outer_map.end())
+				exo2all[(*it).second] = i;
+			else
+				throw ogu::Exception(__FILE__, __LINE__,
+									 string("Name ") + allvar[i] + " is neither endogenous nor exogenous variable in AllvarOuterOrdering constructor");
+		}
+	}
+
+	// check whether everything has been filled
+	unsigned int iendo = 0;
+	while (iendo < endo2all.size() && endo2all[iendo] != -1) iendo++;
+	unsigned int iexo = 0;
+	while (iexo < exo2all.size() && exo2all[iexo] != -1) iexo++;
+	if (iendo < endo2all.size())
+		throw ogu::Exception(__FILE__, __LINE__,
+							 string("Endogenous variable ") + atoms.get_endovars()[iendo] +
+							 " not found in outer all ordering in AllvarOuterOrdering constructor");
+	if (iexo < exo2all.size())
+		throw ogu::Exception(__FILE__, __LINE__,
+							 string("Exogenous variable ") + atoms.get_exovars()[iexo] +
+							 " not found in outer all ordering in AllvarOuterOrdering constructor");
+}
+
+AllvarOuterOrdering::AllvarOuterOrdering(const AllvarOuterOrdering& avo,
+										 const FineAtoms& a)
+	: atoms(a), allvar(),
+	  endo2all(avo.endo2all),
+	  exo2all(avo.exo2all)
+{
+	// fill in the allvar from avo.allvar
+	for (unsigned int i = 0; i < avo.allvar.size(); i++) {
+		const char* s = atoms.varnames.query(avo.allvar[i]);
+		allvar.push_back(s);
+	}
+}
+
+
+FineAtoms::FineAtoms(const FineAtoms& fa)
+	: DynamicAtoms(fa), params(), endovars(), exovars(),
+	  endo_order(NULL), exo_order(NULL), allvar_order(NULL),
+	  der_atoms(fa.der_atoms),
+	  endo_atoms_map(fa.endo_atoms_map),
+	  exo_atoms_map(fa.exo_atoms_map)
+{
+	// fill in params
+	for (unsigned int i = 0; i < fa.params.size(); i++) {
+		const char* s = varnames.query(fa.params[i]);
+		if (! s)
+			throw ogu::Exception(__FILE__, __LINE__,
+								 string("Parameter ") + fa.params[i] + " does not exist in FineAtoms copy cosntructor");
+		params.push_back(s);
+		param_outer_map.insert(Tvarintmap::value_type(s, params.size()-1));
+	}
+	// fill in endovars
+	for (unsigned int i = 0; i < fa.endovars.size(); i++) {
+		const char* s = varnames.query(fa.endovars[i]);
+		if (! s)
+			throw ogu::Exception(__FILE__, __LINE__,
+								 string("Endo variable ") + fa.endovars[i] + " does not exist in FineAtoms copy constructor");
+		endovars.push_back(s);
+		endo_outer_map.insert(Tvarintmap::value_type(s, endovars.size()-1));
+	}
+	// fill in exovars
+	for (unsigned int i = 0; i < fa.exovars.size(); i++) {
+		const char* s = varnames.query(fa.exovars[i]);
+		if (! s)
+			throw ogu::Exception(__FILE__, __LINE__,
+								 string("Exo variable ") + fa.exovars[i] + " does not exist in FineAtoms copy cosntructor");
+		exovars.push_back(s);
+		exo_outer_map.insert(Tvarintmap::value_type(s, exovars.size()-1));
+	}
+
+	if (fa.endo_order)
+		endo_order = fa.endo_order->clone(endovars, *this);
+
+	if (fa.exo_order)
+		exo_order = fa.exo_order->clone(exovars, *this);
+
+	if (fa.allvar_order)
+		allvar_order = new AllvarOuterOrdering(*(fa.allvar_order), *this);
+}
+
+int FineAtoms::check_variable(const char* name) const
+{
+	string str;
+	int ll;
+	parse_variable(name, str, ll);
+	if (varnames.query(str.c_str()))
+		return DynamicAtoms::check_variable(name);
+	else {
+		throw ParserException(string("Variable <")+str+"> not declared.",0);
+		return -1;
+	}
+}
+
+int FineAtoms::num_exo_periods() const
+{
+	int mlead, mlag;
+	exovarspan(mlead, mlag);
+	return mlead-mlag+1;
+}
+
+void FineAtoms::parsing_finished(VarOrdering::ord_type ot)
+{
+	make_internal_orderings(ot);
+
+	// by default, concatenate outer endo and outer exo and make it as
+	// allvar outer:
+	vector<const char*> allvar_tmp;
+	allvar_tmp.insert(allvar_tmp.end(), endovars.begin(), endovars.end());
+	allvar_tmp.insert(allvar_tmp.end(), exovars.begin(), exovars.end());
+
+	if (allvar_order)
+		delete allvar_order;
+	allvar_order = new AllvarOuterOrdering(allvar_tmp, *this);
+}
+
+void FineAtoms::parsing_finished(VarOrdering::ord_type ot,
+								 const vector<const char*> allvar)
+{
+	make_internal_orderings(ot);
+	if (allvar_order)
+		delete allvar_order;
+	allvar_order = new AllvarOuterOrdering(allvar, *this);
+}
+
+const vector<const char*>& FineAtoms::get_allvar() const
+{
+	if (! allvar_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::get_allvars called before parsing_finished");
+
+	return allvar_order->get_allvar();
+}
+
+const vector<int>& FineAtoms::outer_endo2all() const
+{
+	if (! allvar_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::outer_endo2all called before parsing_finished");
+
+	return allvar_order->get_endo2all();
+}
+
+const vector<int>& FineAtoms::outer_exo2all() const
+{
+	if (! allvar_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::outer_exo2all called before parsing_finished");
+
+	return allvar_order->get_exo2all();
+}
+
+
+vector<int> FineAtoms::variables() const
+{
+	if (endo_order) {
+		return der_atoms;
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::variables called before parsing_finished");
+		return vector<int>();
+	}
+}
+
+int FineAtoms::nstat() const
+{
+	if (endo_order) {
+		return endo_order->nstat();
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::nstat called before parsing_finished");
+		return -1;
+	}
+}
+
+int FineAtoms::npred() const
+{
+	if (endo_order) {
+		return endo_order->npred();
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::npred called before parsing_finished");
+		return -1;
+	}
+}
+
+int FineAtoms::nboth() const
+{
+	if (endo_order) {
+		return endo_order->nboth();
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::nboth called before parsing_finished");
+		return -1;
+	}
+}
+
+int FineAtoms::nforw() const
+{
+	if (endo_order) {
+		return endo_order->nforw();
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::nforw called before parsing_finished");
+		return -1;
+	}
+}
+
+int FineAtoms::get_pos_of_endo(int t) const
+{
+	if (endo_order) {
+		return endo_order->get_pos_of(t);
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::get_pos_of_endo called before parsing_finished");
+		return -1;
+	}
+}
+
+int FineAtoms::get_pos_of_exo(int t) const
+{
+	if (exo_order) {
+		return exo_order->get_pos_of(t);
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::get_pos_of_exo called before parsing_finished");
+		return -1;
+	}
+}
+
+int FineAtoms::get_pos_of_all(int t) const
+{
+	if (endo_order && exo_order) {
+		if (endo_order->check(t))
+			return endo_order->get_pos_of(t);
+		else if (exo_order->check(t))
+			return endo_order->length() + exo_order->get_pos_of(t);
+		else {
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "Atom is not endo nor exo in FineAtoms::get_pos_of_all");
+			return -1;
+		}
+	} else {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::get_pos_of_exo called before parsing_finished");
+		return -1;
+	}
+}
+
+const vector<int>& FineAtoms::y2outer_endo() const
+{
+	if (! endo_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::y2outer_endo called before parsing_finished");
+	return endo_order->get_y2outer();
+}
+
+const vector<int>& FineAtoms::outer2y_endo() const
+{
+	if (! endo_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::outer2y_endo called before parsing_finished");
+	return endo_order->get_outer2y();
+}
+
+const vector<int>& FineAtoms::y2outer_exo() const
+{
+	if (! exo_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::y2outer_endo called before parsing_finished");
+	return exo_order->get_y2outer();
+}
+
+const vector<int>& FineAtoms::outer2y_exo() const
+{
+	if (! exo_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::outer2y_exo called before parsing_finished");
+	return exo_order->get_outer2y();
+}
+
+const vector<int>& FineAtoms::get_endo_atoms_map() const
+{
+	if (! endo_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::get_endo_atoms_map called before parsing_finished");
+	return endo_atoms_map;
+}
+
+const vector<int>& FineAtoms::get_exo_atoms_map() const
+{
+	if (! exo_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::get_exo_atoms_map called before parsing_finished");
+	return exo_atoms_map;
+}
+
+int FineAtoms::name2outer_param(const char* name) const
+{
+	Tvarintmap::const_iterator it = param_outer_map.find(name);
+	if (it == param_outer_map.end())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Name is not a parameter in FineAtoms::name2outer_param");
+	return (*it).second;
+}
+
+int FineAtoms::name2outer_endo(const char* name) const
+{
+	Tvarintmap::const_iterator it = endo_outer_map.find(name);
+	if (it == endo_outer_map.end())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Name is not an endogenous variable in FineAtoms::name2outer_endo");
+	return (*it).second;
+}
+
+int FineAtoms::name2outer_exo(const char* name) const
+{
+	Tvarintmap::const_iterator it = exo_outer_map.find(name);
+	if (it == exo_outer_map.end())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Name is not an exogenous variable in FineAtoms::name2outer_exo");
+	return (*it).second;
+}
+
+int FineAtoms::name2outer_allvar(const char* name) const
+{
+	if (! allvar_order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "FineAtoms::name2outer_allvar called beore parsing_finished");
+
+	Tvarintmap::const_iterator it = endo_outer_map.find(name);
+	if (it != endo_outer_map.end())
+		return allvar_order->get_endo2all()[(*it).second];
+	else {
+		it = exo_outer_map.find(name);
+		if (it != exo_outer_map.end())
+			return allvar_order->get_exo2all()[(*it).second];
+	}
+
+	throw ogu::Exception(__FILE__,__LINE__,
+						 string("Name ") + name + " is neither endo nor exo variable in FineAtoms::name2outer_allvar");
+	return -1;
+}
+
+void FineAtoms::register_uniq_endo(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("Endogenous variable <")+name+"> is not unique.",0);
+	const char* ss = varnames.insert(name);
+	endovars.push_back(ss);
+	endo_outer_map.insert(Tvarintmap::value_type(ss, endovars.size()-1));
+}
+
+void FineAtoms::register_uniq_exo(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("Exogenous variable <")+name+"> is not unique.",0);
+	const char* ss = varnames.insert(name);
+	exovars.push_back(ss);
+	exo_outer_map.insert(Tvarintmap::value_type(ss, exovars.size()-1));
+}
+
+void FineAtoms::register_uniq_param(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("Parameter <")+name+"> is not unique.",0);
+	const char* ss = varnames.insert(name);
+	params.push_back(ss);
+	param_outer_map.insert(Tvarintmap::value_type(ss, params.size()-1));
+}
+
+void FineAtoms::make_internal_orderings(VarOrdering::ord_type ot)
+{
+	bool endo_ordering_done = false;
+	bool exo_ordering_done = false;
+
+	order_type = ot;
+
+	int mlead, mlag;
+	endovarspan(mlead, mlag);
+	if (mlag >= -1 && mlead <= 1) {
+		// make endo ordering
+		if (endo_order)
+			delete endo_order;
+		if (ot == VarOrdering::pbspbfbf)
+			endo_order = new EndoVarOrdering1(endovars, *this);
+		else
+			endo_order = new EndoVarOrdering2(endovars, *this);
+		endo_order->do_ordering();
+		endo_ordering_done = true;
+	}
+
+	exovarspan(mlead, mlag);
+	if (mlag == 0 && mlead == 0) {
+		// make exo ordering
+		if (exo_order)
+			delete exo_order;
+		exo_order = new ExoVarOrdering(exovars, *this);
+		exo_order->do_ordering();
+		exo_ordering_done = true;
+	}
+
+	if (endo_ordering_done && exo_ordering_done) {
+		// concatenate der atoms from endo_order and exo_order
+		der_atoms.clear();
+		der_atoms.insert(der_atoms.end(), 
+						 endo_order->get_der_atoms().begin(),
+						 endo_order->get_der_atoms().end());
+		der_atoms.insert(der_atoms.end(), 
+						 exo_order->get_der_atoms().begin(),
+						 exo_order->get_der_atoms().end());
+		
+		// create endo_atoms_map; der_atoms is a concatenation, so it is easy
+		int endo_atoms = endo_order->get_der_atoms().size();
+		endo_atoms_map.clear();
+		for (int i = 0; i < endo_atoms; i++)
+			endo_atoms_map.push_back(i);
+		// create exo_atoms_map
+		int exo_atoms = exo_order->get_der_atoms().size();
+		exo_atoms_map.clear();
+		for (int i = 0; i < exo_atoms; i++)
+			exo_atoms_map.push_back(endo_atoms + i);
+	}
+}
+
+void FineAtoms::print() const
+{
+	DynamicAtoms::print();
+	if (endo_order) {
+		printf("Endo ordering:\n");
+		endo_order->print();
+	} else {
+		printf("Endo ordering not created.\n");
+	}
+	if (exo_order) {
+		printf("Exo ordering:\n");
+		exo_order->print();
+	} else {
+		printf("Exo ordering not created.\n");
+	}
+	printf("endo atoms map:\n");
+	for (unsigned int i = 0; i < endo_atoms_map.size(); i++)
+		printf("%d --> %d\n", i, endo_atoms_map[i]);
+	printf("exo atoms map:\n");
+	for (unsigned int i = 0; i < exo_atoms_map.size(); i++)
+		printf("%d --> %d\n", i, exo_atoms_map[i]);
+}
diff --git a/dynare++/parser/cc/fine_atoms.h b/dynare++/parser/cc/fine_atoms.h
new file mode 100644
index 0000000000000000000000000000000000000000..7cc82560864d4771823370e38895363ab9a3a5b3
--- /dev/null
+++ b/dynare++/parser/cc/fine_atoms.h
@@ -0,0 +1,350 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: fine_atoms.h 1759 2008-03-31 14:25:20Z kamenik $
+
+#ifndef OGP_FINE_ATOMS_H
+#define OGP_FINE_ATOMS_H
+
+#include "dynamic_atoms.h"
+
+#include <vector>
+#include <string>
+
+namespace ogp {
+
+	using std::vector;
+	using std::string;
+
+	/** This is just ordering used for endogenous variables. It
+	 * assumes that we have only time t-1, t, and t+1, orders them as
+	 * pred(t-1), both(t-1), stat(t), pred(t), both(t), forw(t),
+	 * both(t+1), forw(t+1). */
+	class EndoVarOrdering1 : public VarOrdering {
+	public:
+		EndoVarOrdering1(const vector<const char*>& vnames, const DynamicAtoms& a)
+			: VarOrdering(vnames, a) {}
+		EndoVarOrdering1(const EndoVarOrdering1& vo, const vector<const char*>& vnames,
+						 const DynamicAtoms& a)
+			: VarOrdering(vo, vnames, a) {}
+		VarOrdering* clone(const vector<const char*>& vnames, const DynamicAtoms& a) const
+			{return new EndoVarOrdering1(*this, vnames, a);}
+		void do_ordering()
+			{do_pbspbfbf();}
+	};
+
+	/** This is just another ordering used for endogenous
+	 * variables. It assumes that we have only time t-1, t, and t+1,
+	 * orders them as both(t+1), forw(t+1), pred(t-1), both(t-1),
+	 * stat(t), pred(t), both(t), forw(t). */
+	class EndoVarOrdering2 : public VarOrdering {
+	public:
+		EndoVarOrdering2(const vector<const char*>& vnames, const DynamicAtoms& a)
+			: VarOrdering(vnames, a) {}
+		EndoVarOrdering2(const EndoVarOrdering2& vo, const vector<const char*>& vnames,
+						 const DynamicAtoms& a)
+			: VarOrdering(vo, vnames, a) {}
+		VarOrdering* clone(const vector<const char*>& vnames, const DynamicAtoms& a) const
+			{return new EndoVarOrdering2(*this, vnames, a);}
+		void do_ordering()
+			{do_bfspbfpb();}
+	};
+
+	/** This is just ordering used for exogenous variables. It makes
+	 * no assumptions about their timing. It orders them from the
+	 * least time to the latest time. */
+	class ExoVarOrdering : public VarOrdering {
+	public:
+		ExoVarOrdering(const vector<const char*>& vnames, const DynamicAtoms& a)
+			: VarOrdering(vnames, a) {}
+		ExoVarOrdering(const ExoVarOrdering& vo, const vector<const char*>& vnames,
+					   const DynamicAtoms& a)
+			: VarOrdering(vo, vnames, a) {}
+		VarOrdering* clone(const vector<const char*>& vnames, const DynamicAtoms& a) const
+			{return new ExoVarOrdering(*this, vnames, a);}
+		void do_ordering()
+			{do_increasing_time();}
+	};
+
+	class FineAtoms;
+
+	/** This class provides an outer ordering of all variables (endo
+	 * and exo). It maps the ordering to the particular outer
+	 * orderings of endo and exo. It works tightly with the FineAtoms
+	 * class. */
+	class AllvarOuterOrdering {
+	protected:
+		/** Type for a map mapping a variable name to an integer. */
+		typedef map<const char*, int, ltstr> Tvarintmap;
+		/** Reference to atoms. */
+		const FineAtoms& atoms;
+		/** The vector of all endo and exo variables in outer
+		 * ordering. The pointers point to storage in atoms. */
+		vector<const char*> allvar;
+		/** The mapping from outer endogenous to outer all. For
+		 * example endo2all[0] is the order of the first outer
+		 * endogenous variable in the allvar ordering. */
+		vector<int> endo2all;
+		/** The mapping from outer exogenous to outer all. For example
+		 * exo2all[0] is the order of the first outer exogenous
+		 * variables in the allvar ordering. */
+		vector<int> exo2all;
+	public:
+		/** Construct the allvar outer ordering from the provided
+		 * sequence of endo and exo names. The names can have an
+		 * arbitrary storage, the storage is transformed to the atoms
+		 * storage. An exception is thrown if either the list is not
+		 * exhaustive, or some string is not a variable. */
+		AllvarOuterOrdering(const vector<const char*>& allvar_outer, const FineAtoms& a);
+		/** Copy constructor using the storage of provided atoms. */
+		AllvarOuterOrdering(const AllvarOuterOrdering& allvar_outer, const FineAtoms& a);
+		/** Return endo2all mapping. */
+		const vector<int>& get_endo2all() const
+			{return endo2all;}
+		/** Return exo2all mapping. */
+		const vector<int>& get_exo2all() const
+			{return exo2all;}
+		/** Return the allvar ordering. */
+		const vector<const char*>& get_allvar() const
+			{return allvar;}
+	};
+
+	/** This class refines the DynamicAtoms by distinguishing among
+	 * parameters (no lag and leads) and endogenous and exogenous
+	 * variables (with lags and leads). For parameters, endogenous and
+	 * exogenous, it defines outer orderings and internal
+	 * orderings. The internal orderings are created by
+	 * parsing_finished() method when it is sure that no new variables
+	 * would be registered. The outer orderings are given by the order
+	 * of calls of registering methods.
+     * 
+     * In addition, the class also defines outer ordering of
+     * endogenous and exogenous variables. This is input as a
+     * parameter to parsing_finished(). By default, this whole outer
+     * ordering is just a concatenation of outer ordering of
+     * endogenous and exogenous variables.
+	 *
+	 * The internal ordering of all endo and exo variables is just a
+	 * concatenation of endo and exo variables in their internal
+	 * orderings. This is the ordering with respect to which all
+	 * derivatives are taken. */
+	class FineAtoms : public DynamicAtoms {
+		friend class AllvarOuterOrdering;
+	protected:
+		typedef map<const char*, int, ltstr> Tvarintmap;
+	private:
+		/** The vector of parameters names. The order gives the order
+		 * the data is communicated with outside world. */
+		vector<const char*> params;
+		/** A map mapping a name of a parameter to an index in the outer
+		 * ordering. */
+		Tvarintmap param_outer_map;
+		/** The vector of endogenous variables. This defines the order
+		 * like parameters. */
+		vector<const char*> endovars;
+		/** A map mapping a name of an endogenous variable to an index
+		 * in the outer ordering. */
+		Tvarintmap endo_outer_map;
+		/** The vector of exogenous variables. Also defines the order
+		 * like parameters and endovars. */
+		vector<const char*> exovars;
+		/** A map mapping a name of an exogenous variable to an index
+		 * in the outer ordering. */
+		Tvarintmap exo_outer_map;
+
+	protected:
+		/** This is the internal ordering of all atoms corresponding
+		 * to endogenous variables. It is constructed by
+		 * parsing_finished() method, which should be called after all
+		 * parsing jobs have been finished. */ 
+		VarOrdering* endo_order;
+		/** This is the internal ordering of all atoms corresponding
+		 * to exogenous variables. It has the same handling as
+		 * endo_order. */
+		VarOrdering* exo_order;
+		/** This is the all variables outer ordering. It is
+		 * constructed by parsing finished. */
+		AllvarOuterOrdering* allvar_order;
+		/** This vector defines a set of atoms as tree indices used
+		 * for differentiation. The order of the atoms in this vector
+		 * defines ordering of the derivative tensors. The ordering is
+		 * a concatenation of atoms from endo_order and then
+		 * exo_order. This vector is setup by parsing_finished() and
+		 * is returned by variables(). */
+		vector<int> der_atoms;
+		/** This is a mapping from endogenous atoms to all atoms in
+		 * der_atoms member. The mapping maps index in endogenous atom
+		 * ordering to index (not value) in der_atoms. It is useful if
+		 * one wants to evaluate derivatives wrt only endogenous
+		 * variables. It is set by parsing_finished(). By definition,
+		 * it is monotone. */
+		vector<int> endo_atoms_map;
+		/** This is a mapping from exogenous atoms to all atoms in
+		 * der_atoms member. It is the same as endo_atoms_map for
+		 * atoms of exogenous variables. */
+		vector<int> exo_atoms_map;
+	public:
+		FineAtoms()
+			: endo_order(NULL), exo_order(NULL), allvar_order(NULL) {}
+		FineAtoms(const FineAtoms& fa);
+		/** Deletes endo_order and exo_order. */
+		virtual ~FineAtoms()
+			{
+				if (endo_order) delete endo_order;
+				if (exo_order) delete exo_order;
+				if (allvar_order) delete allvar_order;
+			}
+		/** Overrides DynamicAtoms::check_variable so that the error
+		 * would be raised if the variable name is not declared. A
+		 * variable is declared by inserting it to
+		 * DynamicAtoms::varnames. This is a responsibility of a
+		 * subclass. */
+		int check_variable(const char* name) const;
+		/** This calculates min lag and max lead of endogenous variables. */
+		void endovarspan(int& mlead, int& mlag) const
+			{varspan(endovars, mlead, mlag);}
+		/** This calculates mim lag and max lead of exogenous variables. */
+		void exovarspan(int& mlead, int& mlag) const
+			{varspan(exovars, mlead, mlag);}
+		/** This calculates the number of periods in which at least
+		 * one exogenous variable occurs. */
+		int num_exo_periods() const;
+		/** Return an (external) ordering of parameters. */
+		const vector<const char*>& get_params() const
+			{return params;}
+		/** Return an external ordering of endogenous variables. */
+		const vector<const char*>& get_endovars() const
+			{return endovars;}
+		/** Return an external ordering of exogenous variables. */
+		const vector<const char*>& get_exovars() const
+			{return exovars;}
+		/** This constructs internal orderings and makes the indices
+		 * returned by variables method available. Further it
+		 * constructs outer ordering of all variables by a simple
+		 * concatenation of outer endogenous and outer exogenous. In
+		 * addition, it makes nstat, npred, nboth, nforw available. */
+		void parsing_finished(VarOrdering::ord_type ot);
+		/** This does the same thing as
+		 * parsing_finished(VarOrdering::ord_type) plus it allows for
+		 * inputing a different outer ordering of all variables. The
+		 * ordering is input as a list of strings, their storage can
+		 * be arbitrary. */
+		void parsing_finished(VarOrdering::ord_type ot, const vector<const char*> avo);
+		/** Return the external ordering of all variables (endo and
+		 * exo). This is either the second argument to
+		 * parsing_finished or the default external ordering. This
+		 * must be called only after parsing_finished. */
+		const vector<const char*>& get_allvar() const;
+		/** Return the map from outer ordering of endo variables to
+		 * the allvar ordering. This must be called only after
+		 * parsing_finished. */
+		const vector<int>& outer_endo2all() const;
+		/** Return the map from outer ordering of exo variables to
+		 * the allvar ordering. This must be called only after
+		 * parsing_finished. */
+		const vector<int>& outer_exo2all() const;
+		/** Return the atoms with respect to which we are going to
+		 * differentiate. This must be called after
+		 * parsing_finished. */
+		vector<int> variables() const;
+		/** Return the number of static. */
+		int nstat() const;
+		/** Return the number of predetermined. */
+		int npred() const;
+		/** Return the number of both. */
+		int nboth() const;
+		/** Return the number of forward looking. */
+		int nforw() const;
+		/** Return the index of an endogenous atom given by tree index in
+		 * the endo ordering. This must be also called only after
+		 * parsing_finished(). */
+		int get_pos_of_endo(int t) const;
+		/** Return the index of an exogenous atom given by tree index in
+		 * the exo ordering. This must be also called only after
+		 * parsing_finished(). */
+		int get_pos_of_exo(int t) const;
+		/** Return the index of either endogenous or exogenous atom
+		 * given by tree index in the concatenated ordering of
+		 * endogenous and exogenous atoms. This must be also called
+		 * only after parsing_finished(). */
+		int get_pos_of_all(int t) const;
+		/** Return the mapping from endogenous at time t to outer
+		 * ordering of endogenous. */
+		const vector<int>& y2outer_endo() const;
+		/** Return the mapping from the outer ordering of endogenous to endogenous
+		 * at time t. */
+		const vector<int>& outer2y_endo() const;
+		/** Return the mapping from exogenous at time t to outer
+		 * ordering of exogenous. */
+		const vector<int>& y2outer_exo() const;
+		/** Return the mapping from the outer ordering of exogenous to exogenous
+		 * at time t. */
+		const vector<int>& outer2y_exo() const;
+		/** Return the endo_atoms_map. */
+		const vector<int>& get_endo_atoms_map() const;
+		/** Return the exo_atoms_map. */
+		const vector<int>& get_exo_atoms_map() const;
+		/** Return an index in the outer ordering of a given
+		 * parameter. An exception is thrown if the name is not a
+		 * parameter. */
+		int name2outer_param(const char* name) const;
+		/** Return an index in the outer ordering of a given
+		 * endogenous variable. An exception is thrown if the name is not a
+		 * and endogenous variable. */
+		int name2outer_endo(const char* name) const;
+		/** Return an index in the outer ordering of a given
+		 * exogenous variable. An exception is thrown if the name is not a
+		 * and exogenous variable. */
+		int name2outer_exo(const char* name) const;
+		/** Return an index in the outer ordering of all variables
+		 * (endo and exo) for a given name. An exception is thrown if
+		 * the name is not a variable. This must be called only after
+		 * parsing_finished(). */
+		int name2outer_allvar(const char* name) const;
+		/** Return the number of endogenous variables at time t-1, these are state
+		 * variables. */
+		int nys() const
+			{return npred()+nboth();}
+		/** Return the number of endogenous variables at time t+1. */
+		int nyss() const
+			{return nboth()+nforw();}
+		/** Return the number of endogenous variables. */
+		int ny() const
+			{return endovars.size();}
+		/** Return the number of exogenous variables. */
+		int nexo() const
+			{return (int)exovars.size();}
+		/** Return the number of parameters. */
+		int np() const
+			{return (int)(params.size());}
+		/** Register unique endogenous variable name. The order of
+		 * calls defines the endo outer ordering. The method is
+		 * virtual, since a superclass may want to do some additional
+		 * action. */
+		virtual void register_uniq_endo(const char* name);
+		/** Register unique exogenous variable name. The order of
+		 * calls defines the exo outer ordering. The method is
+		 * virtual, since a superclass may want to do somem additional
+		 * action. */
+		virtual void register_uniq_exo(const char* name);
+		/** Register unique parameter name. The order of calls defines
+		 * the param outer ordering. The method is
+		 * virtual, since a superclass may want to do somem additional
+		 * action. */
+		virtual void register_uniq_param(const char* name);
+		/** Debug print. */
+		void print() const;
+	private:
+		/** This performs the common part of parsing_finished(), which
+		 * is a construction of internal orderings. */
+		void make_internal_orderings(VarOrdering::ord_type ot);
+	protected:
+		/** This remembers the ordering type of the last call make_internal_ordering. */
+		VarOrdering::ord_type order_type;
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/formula.lex b/dynare++/parser/cc/formula.lex
new file mode 100644
index 0000000000000000000000000000000000000000..15254afd2fc5f6701b60d626832ec10188fb6ee7
--- /dev/null
+++ b/dynare++/parser/cc/formula.lex
@@ -0,0 +1,72 @@
+%{
+#include "location.h"
+#include "formula_tab.hh"
+
+	extern YYLTYPE fmla_lloc;
+
+#define YY_USER_ACTION SET_LLOC(fmla_);
+%}
+
+%option nounput
+%option noyy_top_state
+%option stack
+%option yylineno
+%option prefix="fmla_"
+%option never-interactive
+%x CMT
+
+%%
+
+ /* comments */
+<*>"/*"              {yy_push_state(CMT);}
+<CMT>[^*\n]*
+<CMT>"*"+[^*/\n]*
+<CMT>"*"+"/"         {yy_pop_state();}
+<CMT>[\n]
+"//".*\n
+
+ /* initial spaces or tabs are ignored */
+
+[ \t\r\n]
+[+]                  {return YPLUS;}
+[-]                  {return YMINUS;}
+[*]                  {return YTIMES;}
+[/]                  {return YDIVIDE;}
+[\^]                 {return YPOWER;}
+exp                  {return YEXP;}
+log                  {return YLOG;}
+sin                  {return YSIN;}
+cos                  {return YCOS;}
+tan                  {return YTAN;}
+sqrt                 {return YSQRT;}
+erf                  {return YERF;}
+erfc                 {return YERFC;}
+diff                 {return YDIFF;}
+
+ /* names: parameters, variables (lagged/leaded) */
+[A-Za-z_][A-Za-z0-9_]*([\(\{][+-]?[0-9]+[\)\}])? {
+	fmla_lval.string=fmla_text;
+	return NAME;
+}
+
+ /* floating point numbers */
+(([0-9]*\.?[0-9]+)|([0-9]+\.))([edED][-+]?[0-9]+)? {
+	fmla_lval.string=fmla_text;
+	return DNUMBER;
+}
+
+=                    {return EQUAL_SIGN;}
+
+.                    {return fmla_text[0];}
+
+%%
+
+int fmla_wrap()
+{
+	return 1;
+}
+
+void fmla__destroy_buffer(void* p)
+{
+	fmla__delete_buffer((YY_BUFFER_STATE)p);
+}
diff --git a/dynare++/parser/cc/formula.y b/dynare++/parser/cc/formula.y
new file mode 100644
index 0000000000000000000000000000000000000000..d8d53405e23a79d88d5a590704f1512e78d3def1
--- /dev/null
+++ b/dynare++/parser/cc/formula.y
@@ -0,0 +1,87 @@
+%{
+/* Copyright 2006, Ondra Kamenik */
+
+/* $Id: formula.y 1749 2008-03-28 11:59:29Z kamenik $ */
+
+#include "location.h"
+#include "formula_parser.h" 
+#include "formula_tab.hh"
+
+	int fmla_error(char*);
+	int fmla_lex(void);
+	extern int fmla_lineno;
+	extern ogp::FormulaParser* fparser;
+	extern YYLTYPE fmla_lloc;
+
+	static void print_token_value (FILE *, int, YYSTYPE);
+#define YYPRINT(file, type, value) print_token_value (file, type, value)
+
+%}
+
+%union {
+	char* string;
+	double dvalue;
+	int integer;
+}
+
+%token EQUAL_SIGN
+%left YPLUS YMINUS
+%left YTIMES YDIVIDE
+%left YUMINUS YUPLUS
+%right YPOWER
+%token YEXP YLOG YSIN YCOS YTAN YSQRT YERF YERFC YDIFF
+%token <string> DNUMBER NAME
+%type <integer> expression
+
+%name-prefix="fmla_"
+
+%locations
+%error-verbose
+
+%%
+ root : equation_list
+      | expression
+                                  {fparser->add_formula($1);}
+      ; 
+
+ equation_list : equation_list equation | equation ;
+
+ equation : expression EQUAL_SIGN expression ';' 
+                                  {fparser->add_formula(fparser->add_binary(ogp::MINUS,$1,$3));}
+      | expression ';'
+                                  {fparser->add_formula($1);}
+      ;
+
+  expression : '(' expression ')' { $$ = $2;}
+      | expression YPLUS expression {$$=fparser->add_binary(ogp::PLUS,$1,$3);}
+      | expression YMINUS expression {$$=fparser->add_binary(ogp::MINUS,$1,$3);}
+      | expression YTIMES expression {$$=fparser->add_binary(ogp::TIMES,$1,$3);}
+      | expression YDIVIDE expression {$$=fparser->add_binary(ogp::DIVIDE,$1,$3);}
+      | expression YPOWER expression {$$=fparser->add_binary(ogp::POWER,$1,$3);}
+      | YMINUS expression %prec YUMINUS {$$=fparser->add_unary(ogp::UMINUS,$2);}
+      | YPLUS expression %prec YUPLUS {$$ = $2;}
+      | YSIN '(' expression ')' {$$=fparser->add_unary(ogp::SIN,$3);}
+      | YCOS '(' expression ')' {$$=fparser->add_unary(ogp::COS,$3);}
+      | YTAN '(' expression ')' {$$=fparser->add_unary(ogp::TAN,$3);}
+      | YEXP '(' expression ')' {$$=fparser->add_unary(ogp::EXP,$3);}
+      | YLOG '(' expression ')' {$$=fparser->add_unary(ogp::LOG,$3);}
+      | YSQRT '(' expression ')' {$$=fparser->add_unary(ogp::SQRT,$3);}
+      | YERF '(' expression ')' {$$=fparser->add_unary(ogp::ERF,$3);}
+      | YERFC '(' expression ')' {$$=fparser->add_unary(ogp::ERFC,$3);}
+      | YDIFF '(' expression ',' NAME ')' {$$=fparser->add_derivative($3, fparser->add_nulary($5));}
+      | NAME {$$=fparser->add_nulary($1);}
+      | DNUMBER {$$=fparser->add_nulary($1);}
+      ;
+
+%%
+
+int fmla_error(char* s)
+{
+	fparser->error(s);
+}
+
+static void print_token_value(FILE* file, int type, YYSTYPE value)
+{
+	if (type == NAME)
+		fprintf(file, "%s", value.string);
+}
diff --git a/dynare++/parser/cc/formula_parser.cpp b/dynare++/parser/cc/formula_parser.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b2b65de2f56bb7222b7ce7fe239340e6e119cc6d
--- /dev/null
+++ b/dynare++/parser/cc/formula_parser.cpp
@@ -0,0 +1,517 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: formula_parser.cpp 2268 2008-11-22 10:38:03Z michel $
+
+#include "utils/cc/pascal_triangle.h"
+#include "utils/cc/exception.h"
+
+#include "parser_exception.h"
+#include "location.h"
+#include "formula_parser.h"
+#include "formula_tab.hh"
+
+#include <cmath>
+
+using namespace ogp;
+
+extern location_type fmla_lloc;
+
+FormulaParser::FormulaParser(const FormulaParser& fp, Atoms& a)
+	: otree(fp.otree), atoms(a), formulas(fp.formulas), ders()
+{
+	// create derivatives
+	for (unsigned int i = 0; i < fp.ders.size(); i++)
+		ders.push_back(new FormulaDerivatives(*(fp.ders[i])));
+}
+
+FormulaParser::~FormulaParser()
+{
+	destroy_derivatives();
+}
+
+void FormulaParser::differentiate(int max_order)
+{
+	destroy_derivatives();
+	vector<int> vars;
+	vars = atoms.variables();
+	for (unsigned int i = 0; i < formulas.size(); i++)
+		ders.push_back(new FormulaDerivatives(otree, vars, formulas[i], max_order));
+}
+
+const FormulaDerivatives& FormulaParser::derivatives(int i) const
+{
+	if (i < (int)ders.size())
+		return *(ders[i]);
+	else
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong formula index in FormulaParser::derivatives");
+	return *(ders[0]); // just because of compiler
+}
+
+
+
+void FormulaParser::add_formula(int t)
+{
+	formulas.push_back(t);
+}
+
+int FormulaParser::add_binary(code_t code, int t1, int t2)
+{
+	return otree.add_binary(code, t1, t2);
+}
+
+int FormulaParser::add_unary(code_t code, int t)
+{
+	return otree.add_unary(code, t);
+}
+
+int FormulaParser::add_nulary(const char* str)
+{
+	int t = -1;
+	try {
+		t = atoms.check(str);
+	} catch (const ParserException& e) {
+		throw ParserException(e, fmla_lloc.off);
+	}
+	if (t == -1) {
+		t = otree.add_nulary();
+		atoms.assign(str, t);
+	}
+	return t;
+}
+
+void FormulaParser::add_subst_formulas(const map<int,int>& subst, const FormulaParser& fp)
+{
+	for (int i = 0; i < fp.nformulas(); i++) {
+		int f = add_substitution(fp.formula(i), subst, fp);
+		add_formula(f);
+	}
+}
+
+void FormulaParser::substitute_formulas(const map<int,int>& smap)
+{
+	for (int i = 0; i < nformulas(); i++) {
+		// make substitution and replace the formula for it
+		int f = add_substitution(formulas[i], smap);
+		formulas[i] = f;
+		// update the derivatives if any
+		if (i < (int)ders.size() && ders[i]) {
+			int order = ders[i]->get_order();
+			delete ders[i];
+			ders[i] = new FormulaDerivatives(otree, atoms.variables(), formulas[i], order);
+		}
+	}
+}
+
+/** Global symbols for passing info to parser. */
+FormulaParser* fparser;
+
+/** The declarations of functions defined in formula_ll.cc and
+ * formula_tab.cc generated from formula.lex and formula.y */
+void* fmla__scan_buffer(char*, size_t);
+void fmla__destroy_buffer(void*);
+void fmla_parse();
+extern location_type fmla_lloc; 
+
+/** This makes own copy of provided data, sets the buffer for the
+ * parser with fmla_scan_buffer, and launches fmla_parse(). Note that
+ * the pointer returned from fmla_scan_buffer must be freed at the
+ * end. */ 
+void FormulaParser::parse(int length, const char* stream)
+{
+	char* buffer = new char[length+2];
+	strncpy(buffer, stream, length);
+	buffer[length] = '\0';
+	buffer[length+1] = '\0';
+	fmla_lloc.off = 0;
+	fmla_lloc.ll = 0;
+	void* p = fmla__scan_buffer(buffer, (unsigned int)length+2);
+	fparser = this;
+	fmla_parse();
+	delete [] buffer;
+	fmla__destroy_buffer(p);
+}
+
+void FormulaParser::error(const char* mes) const
+{
+	throw ParserException(mes, fmla_lloc.off);
+}
+
+int FormulaParser::last_formula() const
+{
+	int res = -1;
+	for (unsigned int i = 0; i < formulas.size(); i++)
+		if (res < formulas[i])
+			res = formulas[i];
+	return std::max(res, otree.get_last_nulary());
+}
+
+int FormulaParser::pop_last_formula()
+{
+	if (formulas.size() == 0)
+		return -1;
+	int t = formulas.back();
+	if (formulas.size() == ders.size()) {
+		delete ders.back();
+		ders.pop_back();
+	}
+	formulas.pop_back();
+	return t;
+}
+
+void FormulaParser::print() const
+{
+	atoms.print();
+	for (unsigned int i = 0; i < formulas.size(); i++) {
+		printf("formula %d:\n", formulas[i]);
+		otree.print_operation(formulas[i]);
+	}
+	for (unsigned int i = 0; i < ders.size(); i++) {
+		printf("derivatives for the formula %d:\n", formulas[i]);
+		ders[i]->print(otree);
+	}
+}
+
+void FormulaParser::destroy_derivatives()
+{
+	while (ders.size() > 0) {
+		delete ders.back();
+		ders.pop_back();
+	}
+}
+
+/** This constructor makes a vector of indices for formulas
+ * corresponding to derivatives of the given formula. The formula is
+ * supposed to belong to the provided tree, the created derivatives
+ * are added to the tree. 
+ *
+ * The algorithm is as follows. todo: update description of the
+ * algorithm
+*/
+FormulaDerivatives::FormulaDerivatives(OperationTree& otree,
+									   const vector<int>& vars, int f, int max_order)
+	: nvar(vars.size()), order(max_order)
+{
+	FoldMultiIndex fmi_zero(nvar);
+	tder.push_back(f);
+	indices.push_back(fmi_zero);
+	unsigned int last_order_beg = 0;
+	unsigned int last_order_end = tder.size();
+
+	for (int k = 1; k <= order; k++) {
+		// interval <last_order_beg,last_order_end) is guaranteed
+		// here to contain at least one item
+		for (unsigned int run = last_order_beg; run < last_order_end; run++) {
+			// shift one order from the run
+			FoldMultiIndex fmi(indices[run], 1);
+			// set starting variable from the run, note that if k=1,
+			// the shift order ctor of fmi will set it to zero
+			int ivar_start = fmi[k-1];
+			for (int ivar = ivar_start; ivar < nvar; ivar++, fmi.increment()) {
+				int der = otree.add_derivative(tder[run], vars[ivar]);
+				if (der != OperationTree::zero) {
+					tder.push_back(der);
+					indices.push_back(fmi);
+				}
+			}
+		}
+
+		// set new last_order_beg and last_order_end
+		last_order_beg = last_order_end;
+		last_order_end = tder.size();
+		// if there was no new derivative, break out from the loop
+		if (last_order_beg >= last_order_end)
+			break;
+	}
+
+	// build ind2der map
+	for (unsigned int i = 0; i < indices.size(); i++)
+		ind2der.insert(Tfmiintmap::value_type(indices[i], i));
+
+}
+
+FormulaDerivatives::FormulaDerivatives(const FormulaDerivatives& fd)
+	: tder(fd.tder), indices(fd.indices), ind2der(fd.ind2der),
+	  nvar(fd.nvar), order(fd.order)
+{
+}
+
+int FormulaDerivatives::derivative(const FoldMultiIndex& mi) const
+{
+	if (mi.order() > order)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong order of multi-index in FormulaDerivatives::derivative");
+	if (mi.nv() != nvar)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong multi-index variables in FormulaDerivatives::derivative");
+
+	Tfmiintmap::const_iterator it = ind2der.find(mi);
+	if (it == ind2der.end())
+		return OperationTree::zero;
+	else
+		return tder[(*it).second];
+}
+
+void FormulaDerivatives::print(const OperationTree& otree) const
+{
+	for (Tfmiintmap::const_iterator it = ind2der.begin();
+		 it != ind2der.end(); ++it) {
+		printf("derivative ");
+		(*it).first.print();
+		printf(" is formula %d\n", tder[(*it).second]);
+		otree.print_operation(tder[(*it).second]);
+	}
+}
+
+void FormulaCustomEvaluator::eval(const AtomValues& av, FormulaEvalLoader& loader)
+{
+	etree.reset_all();
+	av.setValues(etree);
+	for (unsigned int i = 0; i < terms.size(); i++) {
+		double res = etree.eval(terms[i]);
+		loader.load((int)i, res);
+	}
+}
+
+FoldMultiIndex::FoldMultiIndex(int nv)
+	: nvar(nv), ord(0), data(new int[ord])
+{
+}
+
+FoldMultiIndex::FoldMultiIndex(int nv, int ordd, int ii)
+	: nvar(nv), ord(ordd), data(new int[ord])
+{
+	for (int i = 0; i < ord; i++)
+		data[i] = ii;
+}
+
+/** Note that a monotone sequence mapped by monotone mapping yields a
+ * monotone sequence. */
+FoldMultiIndex::FoldMultiIndex(int nv, const FoldMultiIndex& mi, const vector<int>& mp)
+	: nvar(nv), ord(mi.ord), data(new int[ord])
+{
+	for (int i = 0; i < ord; i++) {
+		if (i < ord-1 && mp[i+1] < mp[i])
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "Mapping not monotone in FoldMultiIndex constructor");
+		if (mp[mi[i]] >= nv || mp[mi[i]] < 0)
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "Mapping out of bounds in FoldMultiIndex constructor");
+		data[i] = mp[mi[i]];
+	}
+}
+
+FoldMultiIndex::FoldMultiIndex(const FoldMultiIndex& fmi, int new_orders)
+	: nvar(fmi.nvar),
+	  ord(fmi.ord+new_orders),
+	  data(new int[ord])
+{
+	memcpy(data, fmi.data, fmi.ord*sizeof(int));
+	int new_item = (fmi.ord > 0)? fmi.data[fmi.ord-1] : 0;
+	for (int i = fmi.ord; i < ord; i++) {
+		data[i] = new_item;
+	}
+}
+
+FoldMultiIndex::FoldMultiIndex(const FoldMultiIndex& fmi)
+	: nvar(fmi.nvar),
+	  ord(fmi.ord),
+	  data(new int[fmi.ord])
+{
+	memcpy(data, fmi.data, ord*sizeof(int));
+}
+
+const FoldMultiIndex& FoldMultiIndex::operator=(const FoldMultiIndex& fmi)
+{
+	if (ord != fmi.ord) {
+		delete [] data;
+		data = new int[fmi.ord];
+	}
+
+	ord = fmi.ord;
+	nvar = fmi.nvar;
+	memcpy(data, fmi.data, ord*sizeof(int));
+
+	return *this;
+}
+
+bool FoldMultiIndex::operator<(const FoldMultiIndex& fmi) const
+{
+	if (nvar != fmi.nvar)
+		ogu::Exception(__FILE__,__LINE__,
+					   "Different nvar in FoldMultiIndex::operator<");
+
+	if (ord < fmi.ord)
+		return true;
+	if (ord > fmi.ord)
+		return false;
+
+	int i = 0;
+	while (i < ord && data[i] == fmi.data[i])
+		i++;
+	if (i == ord)
+		return false;
+	else
+		return data[i] < fmi.data[i];
+}
+
+bool FoldMultiIndex::operator==(const FoldMultiIndex& fmi) const
+{
+	bool res = true;
+	res = res && (nvar == fmi.nvar) && (ord == fmi.ord);
+	if (res)
+		for (int i = 0; i < ord; i++)
+			if (data[i] != fmi.data[i])
+				return false;
+	return res;
+}
+
+void FoldMultiIndex::increment()
+{
+	if (ord == 0)
+		return;
+
+	int k = ord-1;
+	data[k]++;
+	while (k > 0 && data[k] == nvar) {
+		data[k] = 0;
+		data[--k]++;
+	}
+	for (int kk = 1; kk < ord; kk++)
+		if (data[kk-1] > data[kk])
+			data[kk] = data[kk-1];
+}
+
+
+// For description of an algorithm for calculation of folded offset,
+// see Tensor Library Documentation, Ondra Kamenik, 2005, description
+// of FTensor::getOffsetRecurse().
+int FoldMultiIndex::offset() const
+{
+	// make copy for the recursions
+	int* tmp = new int[ord];
+	for (int i = 0; i < ord; i++)
+		tmp[i] = data[i];
+	// call the recursive algorithm
+	int res = offset_recurse(tmp, ord, nvar);
+
+	delete [] tmp;
+	return res;
+}
+
+void FoldMultiIndex::print() const
+{
+	printf("[");
+	for (int i = 0; i < ord; i++)
+		printf("%d ", data[i]);
+	printf("]");
+}
+
+int FoldMultiIndex::offset_recurse(int* data, int len, int nv)
+{
+	if (len == 0)
+		return 0;
+	// calculate length of initial constant indices
+	int prefix = 1;
+	while (prefix < len && data[0] == data[prefix])
+		prefix++;
+
+	int m = data[0];
+	int s1 = ptriang.noverk(nv+len-1, len) - ptriang.noverk(nv-m+len-1,len);
+
+	// cancel m from the rest of the sequence
+	for (int i = prefix; i < len; i++)
+		data[i] -= m;
+
+	// calculate offset of the remaining sequence
+	int s2 = offset_recurse(data+prefix, len-prefix, nv-m);
+	// return the sum
+	return s1+s2;
+}
+
+
+bool ltfmi::operator()(const FoldMultiIndex& i1, const FoldMultiIndex& i2) const
+{
+	return i1 < i2;
+}
+
+
+FormulaDerEvaluator::FormulaDerEvaluator(const FormulaParser& fp)
+	: etree(fp.otree, -1)
+{
+	for (unsigned int i = 0; i < fp.ders.size(); i++)
+		ders.push_back((const FormulaDerivatives*)(fp.ders[i]));
+
+	der_atoms = fp.atoms.variables();
+}
+
+void FormulaDerEvaluator::eval(const AtomValues& av, FormulaDerEvalLoader& loader, int order)
+{
+	if (ders.size() == 0)
+		return;
+	int maxorder = ders[0]->order;
+
+	if (order > maxorder)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong order in FormulaDerEvaluator::eval");
+
+	etree.reset_all();
+	av.setValues(etree);
+
+	int* vars = new int[order];
+
+	for (unsigned int i = 0; i < ders.size(); i++) {
+		for (FormulaDerivatives::Tfmiintmap::const_iterator it = ders[i]->ind2der.begin();
+			 it != ders[i]->ind2der.end(); ++it) {
+			const FoldMultiIndex& mi = (*it).first;
+			if (mi.order() == order) {
+				// set vars from multiindex mi and variables
+				for (int k = 0; k < order; k++)
+					vars[k] = der_atoms[mi[k]];
+				// evaluate
+				double res = etree.eval(ders[i]->tder[(*it).second]);
+				// load
+				loader.load(i, order, vars, res);
+			}
+		}
+	}
+
+	delete [] vars;
+}
+
+void FormulaDerEvaluator::eval(const vector<int>& mp, const AtomValues& av,
+							   FormulaDerEvalLoader& loader, int order)
+{
+	etree.reset_all();
+	av.setValues(etree);
+
+	int nvar_glob = der_atoms.size();
+	int nvar = mp.size();
+	int* vars = new int[order];
+
+	for (unsigned int i = 0; i < ders.size(); i++) {
+		FoldMultiIndex mi(nvar, order);
+		do {
+			// find index of the derivative in the tensor
+			FoldMultiIndex mi_glob(nvar_glob, mi, mp);
+			int der = ders[i]->derivative(mi_glob);
+			if (der != OperationTree::zero) {
+				// set vars from the global multiindex
+				for (int k = 0; k < order; k++)
+					vars[k] = der_atoms[mi_glob[k]];
+				// evaluate derivative
+				double res = etree.eval(der);
+				// load
+				loader.load(i, order, vars, res);
+			}
+			mi.increment();
+		} while (! mi.past_the_end());
+	}
+
+	delete [] vars;
+}
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/formula_parser.h b/dynare++/parser/cc/formula_parser.h
new file mode 100644
index 0000000000000000000000000000000000000000..691b2d4e0043f8fb4f934015a004908f3d3b4fd6
--- /dev/null
+++ b/dynare++/parser/cc/formula_parser.h
@@ -0,0 +1,418 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: formula_parser.h 1760 2008-03-31 14:26:35Z kamenik $
+
+#ifndef OGP_FORMULA_PARSER_H
+#define OGP_FORMULA_PARSER_H
+
+#include "tree.h"
+
+namespace ogp {
+	using std::vector;
+
+	/** Pure virtual class defining a minimal interface for
+	 * representation of nulary terms within FormulaParser. */
+	class Atoms {
+	public:
+		Atoms() {}
+		virtual ~Atoms() {}
+		/** This returns previously assigned internal index to the
+		 * given atom, or returns -1 if the atom has not been assigned
+		 * yet. The method can raise an exception, if the Atoms
+		 * implementation is strict and the name is not among
+		 * prescribed possible values. */
+		virtual int check(const char* name) const = 0;
+		/** This method assigns an internal index to the nulary term
+		 * described by the name. The internal index is allocated by
+		 * OperationTree class. */
+		virtual void assign(const char* name, int t) = 0;
+		/** Returns a number of variables which will be used for
+		 * differentiations. */
+		virtual int nvar() const = 0;
+		/** Returns a vector of variable's internal indices which will
+		 * be used for differentiations. */
+		virtual vector<int> variables() const = 0;
+		/** Debug print. */
+		virtual void print() const = 0;
+	};
+
+	/** Pure virtual class defining interface for all classes able to
+	 * set nulary terms to evaluation tree EvalTree. The
+	 * implementations of this class will have to be connected with
+	 * Atoms to have knowledge about the atoms and their indices in
+	 * the tree, and will call EvalTree::set_nulary. */
+	class AtomValues {
+	public:
+		virtual ~AtomValues() {}
+		virtual void setValues(EvalTree& et) const = 0;
+	};
+
+	class FormulaDerEvaluator;
+	class FoldMultiIndex;
+	/** For ordering FoldMultiIndex in the std::map. */
+	struct ltfmi {
+		bool operator()(const FoldMultiIndex& i1, const FoldMultiIndex& i2) const;
+	};
+
+	/** This class stores derivatives (tree indices) of one formula
+	 * for all orders upto a given one. It stores the derivatives as a
+	 * sequence (vector) of these tree indices and sequence of the
+	 * multidimensional indices of variables wrt which the derivatives
+	 * were taken. In order to speed up querying for a derivative
+	 * given the variables, we have a map mapping the multidimensional
+	 * index to the order of the derivative in the sequence.
+	 * 
+	 * The only reason we do not have only this map is that the
+	 * iterators of the map do not survive the insertions to the map,
+	 * and implementation of the constructor has to be very difficult.
+	 */
+	class FormulaDerivatives {
+		friend class FormulaDerEvaluator;
+	protected:
+		/** Vector of derivatives. This is a list of derivatives (tree
+		 * indices), the ordering is given by the algorithm used to
+		 * create it. Currently, it starts with zero-th derivative,
+		 * the formula itself and carries with first order, second,
+		 * etc. */
+		vector<int> tder;
+		/** Vector of multiindices corresponding to the vector of
+		 * derivatives. */
+		vector<FoldMultiIndex> indices;
+		/** For retrieving derivatives via a multiindex, we have a map
+		 * mapping a multiindex to a derivative in the tder
+		 * ordering. This means that indices[ind2der[index]] == index. */
+		typedef map<FoldMultiIndex, int, ltfmi> Tfmiintmap;
+		Tfmiintmap ind2der;
+		/** The number of variables. */
+		int nvar;
+		/** The maximum order of derivatives. */
+		int order;
+	public:
+		/** The constructor allocates and fills the sequence of the
+		 * indices of derivatives for a formula.
+		 * @param otree the OperationTree for which all work is done
+		 * and to which the derivatives are added.
+		 * @param vars the vector of nulary terms in the tree; the
+		 * derivatives are taken with respect to these variables in
+		 * the ordering given by the vector.
+		 * @param f the index of the formula being differentiated. The
+		 * zero derivative is set to f.
+		 * @param max_order the maximum order of differentiation.
+		 */ 
+		FormulaDerivatives(OperationTree& otree, const vector<int>& vars, int f, int max_order);
+		/** Copy constructor. */
+		FormulaDerivatives(const FormulaDerivatives& fd);
+		virtual ~FormulaDerivatives(){}
+		/** Random access to the derivatives via multiindex. */
+		int derivative(const FoldMultiIndex& mi) const;
+		/** Return the order. */
+		int get_order() const
+			{return order;}
+		/** Debug print. */
+		void print(const OperationTree& otree) const;
+	};
+
+	class FormulaEvaluator;
+
+	/** This class is able to parse a number of formulas and
+	 * differentiate them. The life cycle of the object is as follows:
+	 * After it is created, a few calls to parse will add formulas
+	 * (zero derivatives) to the object. Then a method differentiate()
+	 * can be called and a vector of pointers to derivatives for each
+	 * formula is created. After this, no one should call other
+	 * parse() or differentiate(). A const reference of the object can
+	 * be used in constructors of FormulaEvaluator and
+	 * FormulaDerEvaluator in order to evaluate formulas (zero
+	 * derivatives) and higher derivatives resp. */
+	class FormulaParser {
+		friend class FormulaCustomEvaluator;
+		friend class FormulaDerEvaluator;
+	protected:
+		/** The OperationTree of all formulas, including derivatives. */
+		OperationTree otree;
+		/** Reference to Atoms. The Atoms are filled with nulary terms
+		 * during execution of parse(). */
+		Atoms& atoms;
+		/** Vector of formulas (zero derivatives) in the order as they
+		 * have been parsed. */
+		vector<int> formulas;
+		/** The vector to derivatives, each vector corresponds to a
+		 * formula in the vector formulas. */
+		vector<FormulaDerivatives*> ders;
+	public:
+		/** Construct an empty formula parser. */
+		FormulaParser(Atoms& a)
+			: atoms(a) {}
+		/** Copy constructor using a different instance of Atoms. */
+		FormulaParser(const FormulaParser& fp, Atoms& a);
+		virtual ~FormulaParser();
+
+		/** Requires an addition of the formula; called from the
+		 * parser. */
+		void add_formula(int t);
+		/** Requires an addition of the binary operation; called from
+		 * the parser. */
+		int add_binary(code_t code, int t1, int t2);
+		/** Requires an addition of the unary operation; called from
+		 * the parser. */
+		int add_unary(code_t code, int t);
+		/** Requires an addition of the nulary operation given by the
+		 * string. The Atoms are consulted for uniquness and are given
+		 * an internal index generated by the OperationTree. This is
+		 * the channel through which the Atoms are filled. */
+		int add_nulary(const char* str);
+
+		/** Adds a derivative to the tree. This just calls
+		 * OperationTree::add_derivative. */
+		int add_derivative(int t, int v)
+			{return otree.add_derivative(t, v);}
+		/** Adds a substitution. This just calls
+		 * OperationTree::add_substitution. */
+		int add_substitution(int t, const map<int,int>& subst)
+			{return otree.add_substitution(t, subst);}
+		/** Add the substitution given by the map where left sides of
+		 * substitutions come from another parser. The right sides are
+		 * from this object. The given t is from the given parser fp. */
+		int add_substitution(int t, const map<int,int>& subst,
+							 const FormulaParser& fp)
+			{return otree.add_substitution(t, subst, fp.otree);}
+		/** This adds formulas from the given parser with (possibly)
+		 * different atoms applying substitutions from the given map
+		 * mapping atoms from fp to atoms of the object. */
+		void add_subst_formulas(const map<int,int>& subst, const FormulaParser& fp);
+		/** Substitute formulas. For each i from 1 through all
+		 * formulas, it adds a substitution of the i-th formula and
+		 * make it to be i-th formula.*/
+		void substitute_formulas(const std::map<int,int>& subst);
+		/** This method turns the given term to nulary operation. It
+		 * should be used with caution, since this method does not
+		 * anything do with atoms, but usually some action is also
+		 * needed (at leat to assign the tree index t to some
+		 * atom). */
+		void nularify(int t)
+			{otree.nularify(t);}
+		/** Returns a set of nulary terms of the given term. Just
+		 * calls OperationTree::nulary_of_term. */
+		const hash_set<int>& nulary_of_term(int t) const
+			{return otree.nulary_of_term(t);}
+
+		/** Parse a given string containing one or more formulas. The
+		 * formulas are parsed and added to the OperationTree and to
+		 * the formulas vector. */
+		void parse(int length, const char* stream);
+		/** Processes a syntax error from bison. */
+		void error(const char* mes) const;
+		/** Differentiate all the formulas up to the given order. The
+		 * variables with respect to which the derivatives are taken
+		 * are obtained by Atoms::variables(). If the derivates exist,
+		 * they are destroyed and created again (with possibly
+		 * different order). */
+		void differentiate(int max_order);
+		/** Return i-th formula derivatives. */
+		const FormulaDerivatives& derivatives(int i) const;
+
+		/** This returns a maximum index of zero derivative formulas
+		 * including all nulary terms. This is a mimumum length of the
+		 * tree for which it is safe to evaluate zero derivatives of
+		 * the formulas. */
+		int last_formula() const;
+		/** This returns a tree index of the i-th formula in the
+		 * vector. */
+		int formula(int i) const
+			{return formulas[i];}
+
+
+		/** This returns a tree index of the last formula and pops its
+		 * item from the formulas vector. The number of formulas is
+		 * then less by one. Returns -1 if there is no formula. If
+		 * there are derivatives of the last formula, they are
+		 * destroyed and the vector ders is popped from the back. */
+		int pop_last_formula();
+
+		/** This returns a number of formulas. */
+		int nformulas() const
+			{return (int)(formulas.size());}
+
+		/** This returns a reference to atoms. */
+		const Atoms& getAtoms() const
+			{return atoms;}
+		Atoms& getAtoms()
+			{return atoms;}
+		/** This returns the tree. */
+		const OperationTree& getTree() const
+			{return otree;}
+		OperationTree& getTree()
+			{return otree;}
+
+		/** Debug print. */
+		void print() const;
+	private:
+		/** Hide this copy constructor declaration by declaring it as
+		 * private. */
+		FormulaParser(const FormulaParser& fp);
+		/** Destroy all derivatives. */
+		void destroy_derivatives();
+	};
+
+	/** This is a pure virtual class defining an interface for all
+	 * classes which will load the results of formula (zero
+	 * derivative) evaluations. A primitive implementation of this
+	 * class can be a vector of doubles. */
+	class FormulaEvalLoader {
+	public:
+		virtual ~FormulaEvalLoader() {}
+		/** Set the value res for the given formula. The formula is
+		 * identified by an index corresponding to the ordering in
+		 * which the formulas have been parsed (starting from
+		 * zero). */
+		virtual void load(int i, double res) = 0;
+	};
+
+	/** This class evaluates a selected subset of terms of the
+	 * tree. In the protected constructor, one can constraint the
+	 * initialization of the evaluation tree to a given number of
+	 * terms in the beginning. Using this constructor, one has to make
+	 * sure, that the terms in the beginning do not refer to terms
+	 * behind the initial part. */
+	class FormulaCustomEvaluator {
+	protected:
+		/** The evaluation tree. */
+		EvalTree etree;
+		/** The custom tree indices to be evaluated. */
+		vector<int> terms;
+	public:
+		/** Construct from FormulaParser and given list of terms. */
+		FormulaCustomEvaluator(const FormulaParser& fp, const vector<int>& ts)
+			: etree(fp.otree), terms(ts)
+			{}
+		/** Construct from OperationTree and given list of terms. */
+		FormulaCustomEvaluator(const OperationTree& ot, const vector<int>& ts)
+			: etree(ot), terms(ts)
+			{}
+		/** Evaluate the terms using the given AtomValues and load the
+		 * results using the given loader. The loader is called for
+		 * each term in the order of the terms. */
+		void eval(const AtomValues& av, FormulaEvalLoader& loader);
+	protected:
+		FormulaCustomEvaluator(const FormulaParser& fp)
+			: etree(fp.otree, fp.last_formula()), terms(fp.formulas)
+			{}
+	};
+
+	/** This class evaluates zero derivatives of the FormulaParser. */
+	class FormulaEvaluator : public FormulaCustomEvaluator {
+	public:
+		/** Construct from FormulaParser. */
+		FormulaEvaluator(const FormulaParser& fp)
+			: FormulaCustomEvaluator(fp) {}
+	};
+
+	/** This is a pure virtual class defining an interface for all
+	 * classes which will load the results of formula derivative
+	 * evaluations. */
+	class FormulaDerEvalLoader {
+	public:
+		virtual ~FormulaDerEvalLoader() {}
+		/** This loads the result of the derivative of the given
+		 * order. The semantics of i is the same as in
+		 * FormulaEvalLoader::load. The indices of variables with
+		 * respect to which the derivative was taken are stored in
+		 * memory pointed by vars. These are the tree indices of the
+		 * variables. */
+		virtual void load(int i, int order, const int* vars, double res) = 0;
+	};
+
+	/** This class is a utility class representing the tensor
+	 * multindex. It can basically increment itself, and calculate
+	 * its offset in the folded tensor. */
+	class FoldMultiIndex {
+		/** Number of variables. */
+		int nvar;
+		/** Dimension. */
+		int ord;
+		/** The multiindex. */
+		int* data;
+	public:
+		/** Initializes to the zero derivative. Order is 0, data is
+		 * empty. */
+		FoldMultiIndex(int nv);
+		/** Initializes the multiindex to zeros or given i. */
+		FoldMultiIndex(int nv, int order, int i = 0);
+		/** Makes a new multiindex of the same order applying a given
+		 * mapping to the indices. The mapping is supposed to be monotone. */
+		FoldMultiIndex(int nv, const FoldMultiIndex& mi, const vector<int>& mp);
+		/** Shifting constructor. This adds a given number of orders
+		 * to the end, copying the last item to the newly added items,
+		 * keeping the index ordered. If the index was empty (zero-th
+		 * dimension), then zeros are added. */
+		FoldMultiIndex(const FoldMultiIndex& fmi, int new_orders);
+		/** Copy constructor. */
+		FoldMultiIndex(const FoldMultiIndex& fmi);
+		/** Desctructor. */
+		virtual ~FoldMultiIndex()
+			{delete [] data;}
+		/** Assignment operator. */
+		const FoldMultiIndex& operator=(const FoldMultiIndex& fmi);
+		/** Operator < implementing lexicographic ordering within one
+		 * order, increasing order across orders. */
+		bool operator<(const FoldMultiIndex& fmi) const;
+		bool operator==(const FoldMultiIndex& fmi) const;
+		/** Increment the multiindex. */
+		void increment();
+		/** Return offset of the multiindex in the folded tensor. */ 
+		int offset() const;
+		const int& operator[](int i) const
+			{return data[i];}
+		/** Return order of the multiindex, i.e. dimension of the
+		 * tensor. */ 
+		int order() const
+			{return ord;}
+		/** Return the number of variables. */
+		int nv() const
+			{return nvar;}
+		/** Return the data. */
+		const int* ind() const
+			{return data;}
+		/** Return true if the end of the tensor is reached. The
+		 * result of a subsequent increment should be considered
+		 * unpredictable. */
+		bool past_the_end() const
+			{return (ord == 0) || (data[0] == nvar);}
+		/** Prints the multiindex in the brackets. */
+		void print() const;
+	private:
+		static int offset_recurse(int* data, int len, int nv);
+	};
+
+	/** This class evaluates derivatives of the FormulaParser. */
+	class FormulaDerEvaluator {
+		/** Its own instance of EvalTree. */
+		EvalTree etree;
+		/** The indices of derivatives for each formula. This is a
+		 * const copy FormulaParser::ders. We do not allocate nor
+		 * deallocate anything here. */
+		vector<const FormulaDerivatives*> ders;
+		/** A copy of tree indices corresponding to atoms to with
+		 * respect the derivatives were taken. */
+		vector<int> der_atoms;
+	public:
+		/** Construct the object from FormulaParser. */
+		FormulaDerEvaluator(const FormulaParser& fp);
+		/** Evaluate the derivatives from the FormulaParser wrt to all
+		 * atoms in variables vector at the given AtomValues. The
+		 * given loader is used for output. */
+		void eval(const AtomValues& av, FormulaDerEvalLoader& loader, int order);
+		/** Evaluate the derivatives from the FormulaParser wrt to a
+		 * selection of atoms of the atoms in der_atoms vector at the
+		 * given AtomValues. The selection is given by a monotone
+		 * mapping to the indices (not values) of the der_atoms. */
+		void eval(const vector<int>& mp, const AtomValues& av, FormulaDerEvalLoader& loader,
+				  int order);
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/location.h b/dynare++/parser/cc/location.h
new file mode 100644
index 0000000000000000000000000000000000000000..55182942bc337483f273ce1e2840656ce87f06cd
--- /dev/null
+++ b/dynare++/parser/cc/location.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: location.h 762 2006-05-22 13:00:07Z kamenik $
+
+// Purpose: This file defines macros for lex and bison so that the
+// very primitive location tracking would be enabled. The location of
+// a token is given by offset of its first character. The offset is
+// relative to the number which is (and must be) initialized before
+// parsing. This file is to be included to the top of bison and lex
+// sources.
+
+// How to use: in preamble of bison and flex, you must include this
+// file and declare extern YYLTYPE prefix##lloc. In addition, in flex,
+// you must define int prefix##ll =0; and use macro SET_LLOC(prefix)
+// in EVERY action consuming material (this can be done with #define
+// YY_USER_ACTION) and in bison you must use option %locations.
+
+
+#ifndef OG_LOCATION_H
+#define OG_LOCATION_H
+
+namespace ogp {
+
+	struct location_type {
+		int off; // offset of the token
+		int ll; // length ot the token
+		location_type() : off(0), ll(0) {}
+	};
+
+};
+
+#define YYLTYPE ogp::location_type
+
+// set current off to the first off and add all lengths
+#define YYLLOC_DEFAULT(Current, Rhs, N) \
+  {(Current).off    =  (Rhs)[1].off;    \
+   (Current).ll     =  0;               \
+   for (int i = 1; i <= N; i++) (Current).ll += (Rhs)[i].ll;}
+
+#define SET_LLOC(prefix) (prefix##lloc.off += prefix##lloc.ll, prefix##lloc.ll = prefix##leng)
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/matrix.lex b/dynare++/parser/cc/matrix.lex
new file mode 100644
index 0000000000000000000000000000000000000000..637b553ce1f19fc5a68654962129660d8273035c
--- /dev/null
+++ b/dynare++/parser/cc/matrix.lex
@@ -0,0 +1,59 @@
+%{
+#include "location.h"
+#include "matrix_tab.hh"
+
+	extern YYLTYPE matrix_lloc;
+	extern void matrix_error(char*);
+
+#define YY_USER_ACTION SET_LLOC(matrix_);
+%}
+
+%option nounput
+%option noyy_top_state
+%option stack
+%option yylineno
+%option prefix="matrix_"
+%option never-interactive
+%x CMT
+
+%%
+
+ /* comments */
+<*>"/*"              {yy_push_state(CMT);}
+<CMT>[^*\n]*
+<CMT>"*"+[^*/\n]*
+<CMT>"*"+"/"         {yy_pop_state();}
+<CMT>[\n]
+"//".*\n
+
+ /* ignore spaces and commas */
+[ \t,]
+ /* new row */
+\r\n                 {return NEW_ROW;}
+\n                   {return NEW_ROW;}
+;[ \t]*\n            {return NEW_ROW;}
+;[ \t]*\r\n          {return NEW_ROW;}
+;                    {return NEW_ROW;}
+
+[+-]?(([0-9]*\.?[0-9]+)|([0-9]+\.))([edED][-+]?[0-9]+)? {
+	matrix_lval.val = strtod(matrix_text, NULL);
+	return DNUMBER;
+}
+
+. {
+	char mes[300];
+	sprintf(mes, "Unrecognized character %s", matrix_text);
+	matrix_error(mes); 
+}
+
+%%
+
+int matrix_wrap()
+{
+	return 1;
+}
+
+void matrix__destroy_buffer(void* p)
+{
+	matrix__delete_buffer((YY_BUFFER_STATE)p);
+}
diff --git a/dynare++/parser/cc/matrix.y b/dynare++/parser/cc/matrix.y
new file mode 100644
index 0000000000000000000000000000000000000000..d82bdc450e3b36f6939f011e52fdda40b0752d68
--- /dev/null
+++ b/dynare++/parser/cc/matrix.y
@@ -0,0 +1,66 @@
+%{
+#include "location.h"
+#include "matrix_parser.h" 
+#include "matrix_tab.hh"
+
+	void matrix_error(char*);
+	int matrix_lex(void);
+	extern int matrix_lineno;
+	extern ogp::MatrixParser* mparser;
+	extern YYLTYPE matrix_lloc;
+
+//	static void print_token_value (FILE *, int, YYSTYPE);
+//#define YYPRINT(file, type, value) print_token_value (file, type, value)
+
+%}
+
+%union {
+	double val;
+	int integer;
+}
+
+%token NEW_ROW
+%token <val> DNUMBER
+
+%name-prefix="matrix_";
+
+%locations
+%error-verbose
+
+%%
+
+matrix : first_row other_rows
+    | first_row other_rows empty_rows
+    | first_row empty_rows other_rows empty_rows
+    | first_row empty_rows other_rows
+    | empty_rows first_row other_rows
+    | empty_rows first_row other_rows empty_rows
+    | empty_rows first_row empty_rows other_rows empty_rows
+    | empty_rows first_row empty_rows
+    | first_row empty_rows
+    | empty_rows first_row
+    | first_row
+    | empty_rows
+    ;
+
+empty_rows : empty_rows NEW_ROW | NEW_ROW;
+
+lod : DNUMBER {mparser->add_item($1);}
+    | lod DNUMBER {mparser->add_item($2);}
+    ;
+
+first_row : lod;
+
+other_rows : other_rows one_row | other_rows empty_rows one_row |one_row ;
+
+one_row : NEW_ROW {mparser->start_row();} lod;
+
+
+%%
+
+void matrix_error(char* s)
+{
+	mparser->error(s);
+}
+
+
diff --git a/dynare++/parser/cc/matrix_parser.cpp b/dynare++/parser/cc/matrix_parser.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4e37516d361fabe317fd4936260a5370a041131f
--- /dev/null
+++ b/dynare++/parser/cc/matrix_parser.cpp
@@ -0,0 +1,101 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: matrix_parser.cpp 2269 2008-11-23 14:33:22Z michel $
+
+#include "parser_exception.h"
+#include "matrix_parser.h"
+#include "location.h"
+#include "matrix_tab.hh"
+#include <cstring>
+
+using namespace ogp;
+
+/** A global symbol for passing info to the MatrixParser from
+ * matrix_parse(). */
+MatrixParser* mparser;
+
+/** The declaration of functions defined in matrix_ll.cc and
+ * matrix_tab.cc generated from matrix.lex and matrix.y. */
+void* matrix__scan_buffer(char*, size_t);
+void matrix__destroy_buffer(void*);
+void matrix_parse();
+extern ogp::location_type matrix_lloc;
+
+void MatrixParser::parse(int length, const char* stream)
+{
+	// reinitialize the object
+	data.clear();
+	row_lengths.clear();
+	nc = 0;
+	// allocate temporary buffer and parse
+	char* buffer = new char[length+2];
+	strncpy(buffer, stream, length);
+	buffer[length] = '\0';
+	buffer[length+1] = '\0';
+	matrix_lloc.off = 0;
+	matrix_lloc.ll = 0;
+	void* p = matrix__scan_buffer(buffer, (unsigned int)length+2);
+	mparser = this;
+	matrix_parse();
+	delete [] buffer;
+	matrix__destroy_buffer(p);
+}
+
+void MatrixParser::add_item(double v)
+{
+	data.push_back(v);
+	if (row_lengths.size() == 0)
+		row_lengths.push_back(0);
+	(row_lengths.back())++;
+	if (row_lengths.back() > nc)
+		nc = row_lengths.back();
+}
+
+void MatrixParser::start_row()
+{
+	row_lengths.push_back(0);
+}
+
+void MatrixParser::error(const char* mes) const
+{
+	throw ParserException(mes, matrix_lloc.off);
+}
+
+int MatrixParser::find_first_non_empty_row(int start) const
+{
+	int r = start;
+	while (r < (int)row_lengths.size() && row_lengths[r] == 0)
+		r++;
+	return r;
+}
+
+MPIterator MatrixParser::begin() const
+{
+	MPIterator it(*this);
+	return it;
+}
+
+MPIterator MatrixParser::end() const
+{
+	MPIterator it(*this, "end");
+	return it;
+}
+
+MPIterator::MPIterator(const MatrixParser& mp)
+	: p(&mp), i(0), c(0), r(mp.find_first_non_empty_row())
+{}
+
+MPIterator::MPIterator(const MatrixParser& mp, const char* dummy)
+	: p(&mp), i(mp.data.size()), c(0), r(mp.row_lengths.size())
+{}
+
+MPIterator& MPIterator::operator++()
+{
+	i++;
+	c++;
+	if (p->row_lengths[r] <= c) {
+		c = 0;
+		r = p->find_first_non_empty_row(r+1);
+	}
+	return *this;
+}
diff --git a/dynare++/parser/cc/matrix_parser.h b/dynare++/parser/cc/matrix_parser.h
new file mode 100644
index 0000000000000000000000000000000000000000..f253a2f3751ef60be38b3ebe2e6c622babb7e21d
--- /dev/null
+++ b/dynare++/parser/cc/matrix_parser.h
@@ -0,0 +1,118 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: matrix_parser.h 762 2006-05-22 13:00:07Z kamenik $
+
+#ifndef OGP_MATRIX_PARSER
+#define OGP_MATRIX_PARSER
+
+#include <vector>
+
+namespace ogp {
+	using std::vector;
+
+	/** This class reads the given string and parses it as a
+	 * matrix. The matrix is read row by row. The row delimiter is
+	 * either a newline character or semicolon (first newline
+	 * character after the semicolon is ignored), the column delimiter
+	 * is either blank character or comma. A different number of items
+	 * in the row is not reconciliated, we do not construct a matrix
+	 * here. The class provides only an iterator to go through all
+	 * read items, the iterator provides information on row number and
+	 * column number of the item. */
+	class MPIterator;
+	class MatrixParser {
+		friend class MPIterator;
+	protected:
+		/** Raw data as they were read. */
+		vector<double> data;
+		/** Number of items in each row. */
+		vector<int> row_lengths;
+		/** Maximum number of row lengths. */
+		int nc;
+	public:
+		MatrixParser()
+			: nc(0) {}
+		MatrixParser(const MatrixParser& mp)
+			: data(mp.data), row_lengths(mp.row_lengths), nc(mp.nc) {}
+		virtual ~MatrixParser() {}
+		/** Return a number of read rows. */
+		int nrows() const
+			{return (int) row_lengths.size();}
+		/** Return a maximum number of items in the rows. */
+		int ncols() const
+			{return nc;}
+		/** Parses a given data. This initializes the object data. */
+		void parse(int length, const char* stream);
+		/** Adds newly read item. This should be called from bison
+		 * parser. */
+		void add_item(double v);
+		/** Starts a new row. This should be called from bison
+		 * parser. */
+		void start_row();
+		/** Process a parse error from the parser. */
+		void error(const char* mes) const;
+		/** Return begin iterator. */
+		MPIterator begin() const;
+		/** Return end iterator. */
+		MPIterator end() const;
+	protected:
+		/** Returns an index of the first non-empty row starting at
+		 * start. If the start row is non-empty, returns the start. If
+		 * there is no other non-empty row, returns
+		 * row_lengths.size(). */
+		int find_first_non_empty_row(int start = 0) const;
+	};
+
+	/** This is an iterator intended to iterate through a matrix parsed
+	 * by MatrixParser. The iterator provides only read-only access. */
+	class MPIterator {
+		friend class MatrixParser;
+	protected:
+		/** Reference to the matrix parser. */
+		const MatrixParser* p;
+		/** The index of the pointed item in the matrix parser. */
+		unsigned int i;
+		/** The column number of the pointed item starting from zero. */
+		int c;
+		/** The row number of the pointed item starting from zero. */
+		int r;
+
+	public:
+		MPIterator() : p(NULL), i(0), c(0), r(0) {}
+		/** Constructs an iterator pointing to the beginning of the
+		 * parsed matrix. */
+		MPIterator(const MatrixParser& mp);
+		/** Constructs an iterator pointing to the past-the-end of the
+		 * parsed matrix. */
+		MPIterator(const MatrixParser& mp, const char* dummy);
+		/** Return read-only reference to the pointed item. */
+		const double& operator*() const
+			{return p->data[i];}
+		/** Return a row index of the pointed item. */
+		int row() const
+			{return r;}
+		/** Return a column index of the pointed item. */
+		int col() const
+			{return c;}
+		/** Assignment operator. */
+		const MPIterator& operator=(const MPIterator& it)
+			{p = it.p; i = it.i; c = it.c; r = it.r; return *this;}
+		/** Return true if the iterators are the same, this is if they
+		 * have the same underlying object and the same item index. */ 
+		bool operator==(const MPIterator& it) const
+			{return it.p == p && it.i == i;}
+		/** Negative of the operator==. */
+		bool operator!=(const MPIterator& it) const
+			{return ! (it == *this);} 
+		/** Increment operator. */
+		MPIterator& operator++();
+	};
+};
+
+
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/namelist.cpp b/dynare++/parser/cc/namelist.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1b304def55f61d4245ed2e829c19470fce38e1e0
--- /dev/null
+++ b/dynare++/parser/cc/namelist.cpp
@@ -0,0 +1,30 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: namelist.cpp 42 2007-01-22 21:53:24Z ondra $
+
+#include "namelist.h"
+
+#include <string.h>
+
+using namespace ogp;
+
+/** A global symbol for passing info to NameListParser from its
+ * parser. */
+NameListParser* name_list_parser;
+
+void* namelist__scan_buffer(char*, unsigned int);
+void namelist__destroy_buffer(void*);
+void namelist_parse();
+
+void NameListParser::namelist_parse(int length, const char* stream)
+{
+	char* buffer = new char[length+2];
+	strncpy(buffer, stream, length);
+	buffer[length] = '\0';
+	buffer[length+1] = '\0';
+	void* p = namelist__scan_buffer(buffer, (unsigned int)length+2);
+	name_list_parser = this;
+	::namelist_parse();
+	delete [] buffer;
+	namelist__destroy_buffer(p);
+}
diff --git a/dynare++/parser/cc/namelist.h b/dynare++/parser/cc/namelist.h
new file mode 100644
index 0000000000000000000000000000000000000000..0bde1232582221c4477ddcdb0073f35569d6c486
--- /dev/null
+++ b/dynare++/parser/cc/namelist.h
@@ -0,0 +1,32 @@
+// Copyright (C) 2007, Ondra Kamenik
+
+// $Id: namelist.h 107 2007-05-10 22:35:04Z ondra $
+
+#ifndef OGP_NAMELIST
+#define OGP_NAMELIST
+
+namespace ogp {
+
+	/** Parent class of all parsers parsing a namelist. They must
+	 * implement add_name() method and error() method, which is called
+	 * when an parse error occurs. 
+	 *
+	 * Parsing a name list is done as follows: implement
+	 * NameListParser interface, create the object, and call
+	 * NameListParser::namelist_parse(int lengt, const char*
+	 * text). When implementing error(), one may consult global
+	 * location_type namelist_lloc. */
+	class NameListParser {
+	public:
+		virtual ~NameListParser() {}
+		virtual void add_name(const char* name) = 0;
+		virtual void namelist_error(const char* mes) = 0;
+		void namelist_parse(int length, const char* text);
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/namelist.lex b/dynare++/parser/cc/namelist.lex
new file mode 100644
index 0000000000000000000000000000000000000000..0a14f904f97dc0068f678ff562f283c4cb8782f0
--- /dev/null
+++ b/dynare++/parser/cc/namelist.lex
@@ -0,0 +1,52 @@
+%{
+#include "location.h"
+#include "namelist_tab.hh"
+
+	extern YYLTYPE namelist_lloc;
+
+#define YY_USER_ACTION SET_LLOC(namelist_);
+%}
+
+%option nounput
+%option noyy_top_state
+%option stack
+%option prefix="namelist_"
+%option never-interactive
+%x CMT
+
+%%
+
+ /* comments */
+<*>"/*"            {yy_push_state(CMT);}
+<CMT>[^*\n]*
+<CMT>"*"+[^*/\n]*
+<CMT>"*"+"/"       {yy_pop_state();}
+<CMT>[\n]
+"//".*\n
+
+ /* initial spaces or tabs are ignored */
+[ \t\r\n\0]
+
+ /* names */
+[A-Za-z_][A-Za-z0-9_]* {
+	namelist_lval.string = namelist_text;
+	return NAME;
+}
+
+,                  {return COMMA;}
+. {
+	namelist_lval.character = namelist_text[0];
+	return CHARACTER;
+}
+
+%%
+
+int namelist_wrap()
+{
+	return 1;
+}
+
+void namelist__destroy_buffer(void* p)
+{
+	namelist__delete_buffer((YY_BUFFER_STATE)p);
+}
diff --git a/dynare++/parser/cc/namelist.y b/dynare++/parser/cc/namelist.y
new file mode 100644
index 0000000000000000000000000000000000000000..629eb26ab09780f2f162593cf0afff474e7f2896
--- /dev/null
+++ b/dynare++/parser/cc/namelist.y
@@ -0,0 +1,38 @@
+%{
+#include "location.h"
+#include "namelist.h"
+#include "namelist_tab.hh"
+
+	int namelist_error(char*);
+	int namelist_lex(void);
+	extern ogp::NameListParser* name_list_parser;
+
+%}
+
+%union {
+	int integer;
+	char *string;
+	char character;
+}
+
+%token COMMA CHARACTER
+%token <string> NAME;
+
+%name-prefix="namelist_"
+
+%locations
+%error-verbose
+
+%%
+
+namelist : namelist NAME       {name_list_parser->add_name($2);}
+         | namelist COMMA NAME {name_list_parser->add_name($3);}
+         | NAME                {name_list_parser->add_name($1);}
+         ;
+
+%%
+
+int namelist_error(char* mes)
+{
+	name_list_parser->namelist_error(mes);
+}
diff --git a/dynare++/parser/cc/parser_exception.cpp b/dynare++/parser/cc/parser_exception.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f6ee32ccc1f5297ce6ff4ef36f8d0d62d0fab69a
--- /dev/null
+++ b/dynare++/parser/cc/parser_exception.cpp
@@ -0,0 +1,116 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: parser_exception.cpp 2269 2008-11-23 14:33:22Z michel $
+
+#include "parser_exception.h"
+#include <cstring>
+
+using namespace ogp;
+
+ParserException::ParserException(const char* m, int offset)
+	: mes(new char[strlen(m)+1]), off(offset),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	strcpy(mes, m);
+}
+
+ParserException::ParserException(const string& m, int offset)
+	: mes(new char[m.size()+1]), off(offset),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	strncpy(mes, m.c_str(), m.size());
+	mes[m.size()] = '\0';
+}
+
+ParserException::ParserException(const string& m, const char* dum, int i1)
+	: mes(new char[m.size()+1]), off(0),
+	  aux_i1(i1), aux_i2(-1), aux_i3(-1)
+{
+	strncpy(mes, m.c_str(), m.size());
+	mes[m.size()] = '\0';
+}
+
+ParserException::ParserException(const string& m, const char* dum, int i1, int i2) 
+	: mes(new char[m.size()+1]), off(0),
+	  aux_i1(i1), aux_i2(i2), aux_i3(-1)
+{
+	strncpy(mes, m.c_str(), m.size());
+	mes[m.size()] = '\0';
+}
+
+ParserException::ParserException(const string& m, const char* dum, int i1, int i2, int i3) 
+	: mes(new char[m.size()+1]), off(0),
+	  aux_i1(i1), aux_i2(i2), aux_i3(i3)
+{
+	strncpy(mes, m.c_str(), m.size());
+	mes[m.size()] = '\0';
+}
+
+ParserException::ParserException(const ParserException& m, int plus_offset)
+	: mes(NULL),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	copy(m);
+	off += plus_offset;
+}
+
+ParserException::ParserException(const ParserException& m, const char* dum, int i)
+	: mes(NULL),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	copy(m);
+	aux_i3 = m.aux_i2;
+	aux_i2 = m.aux_i1;
+	aux_i1 = i;
+}
+
+ParserException::ParserException(const ParserException& m, const char* dum, int i1, int i2)
+	: mes(NULL),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	copy(m);
+	aux_i3 = m.aux_i1;
+	aux_i2 = i2;
+	aux_i1 = i1;
+}
+
+ParserException::ParserException(const ParserException& m, const char* dum, int i1, int i2, int i3)
+	: mes(NULL),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	copy(m);
+	aux_i3 = i3;
+	aux_i2 = i2;
+	aux_i1 = i1;
+}
+
+
+ParserException::ParserException(const ParserException& e)
+	: mes(NULL),
+	  aux_i1(-1), aux_i2(-1), aux_i3(-1)
+{
+	copy(e);
+} 
+
+ParserException::~ParserException()
+{
+	delete [] mes;
+}
+
+void ParserException::copy(const ParserException& e)
+{
+	if (mes)
+		delete [] mes;
+	mes = new char[strlen(e.mes)+1];
+	strcpy(mes, e.mes);
+	off = e.off;
+	aux_i1 = e.aux_i1;
+	aux_i2 = e.aux_i2;
+	aux_i3 = e.aux_i3;
+}
+
+void ParserException::print(FILE* fd) const
+{
+	// todo: to be refined
+	fprintf(fd, "%s: offset %d\n", mes, off);
+}
diff --git a/dynare++/parser/cc/parser_exception.h b/dynare++/parser/cc/parser_exception.h
new file mode 100644
index 0000000000000000000000000000000000000000..2a8668a93037fae03dab9acbb1d69fa4772eaf5c
--- /dev/null
+++ b/dynare++/parser/cc/parser_exception.h
@@ -0,0 +1,71 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: parser_exception.h 1761 2008-03-31 14:27:13Z kamenik $
+
+#ifndef OG_FORMULA_PARSER_H
+#define OG_FORMULA_PARSER_H
+
+#include <string>
+
+namespace ogp {
+	using std::string;
+
+	/** This is an easy exception, which, besides the message, stores
+	 * also an offset of the parse error. Since we might need to track
+	 * the argument number and for example the filed in the argument
+	 * which caused the error, we add three integers, which have no
+	 * semantics here. They should be documented in the function which
+	 * throws an exception and sets them. Their default value is -1,
+	 * which means they have not been set. */
+	class ParserException {
+	protected:
+		char* mes;
+		int off;
+		int aux_i1;
+		int aux_i2;
+		int aux_i3;
+	public:
+		ParserException(const char* m, int offset);
+		ParserException(const string& m, int offset);
+		ParserException(const string& m, const char* dum, int i1);
+		ParserException(const string& m, const char* dum, int i1, int i2);
+		ParserException(const string& m, const char* dum, int i1, int i2, int i3);
+		ParserException(const ParserException& e, int plus_offset);
+		/** Makes a copy and pushes given integer to aux_i1 shuffling
+		 * others and forgetting the last. */
+		ParserException(const ParserException& e, const char* dum, int i);
+		/** Makes a copy and pushes given two integers to aux_i1 and aux_i2  shuffling
+		 * others and forgetting the last two. */
+		ParserException(const ParserException& e, const char* dum, int i1, int i2);
+		/** Makes a copy and pushes given three integers to aux_i1, aux_i2, aus_i3 shuffling
+		 * others and forgetting the last three. */
+		ParserException(const ParserException& e, const char* dum, int i1, int i2, int i3);
+		ParserException(const ParserException& e);
+		virtual ~ParserException();
+		void print(FILE* fd) const;
+		const char* message() const
+			{return mes;}
+		int offset() const
+			{return off;}
+		const int& i1() const
+			{return aux_i1;}
+		int& i1()
+			{return aux_i1;}
+		const int& i2() const
+			{return aux_i2;}
+		int& i2()
+			{return aux_i2;}
+		const int& i3() const
+			{return aux_i3;}
+		int& i3()
+			{return aux_i3;}
+	protected:
+		void copy(const ParserException& e);
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/static_atoms.cpp b/dynare++/parser/cc/static_atoms.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0151cade68f2c66c2c7a83e6f3f39bc634b3e799
--- /dev/null
+++ b/dynare++/parser/cc/static_atoms.cpp
@@ -0,0 +1,122 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: static_atoms.cpp 1360 2007-07-10 11:44:20Z kamenik $
+
+#include "static_atoms.h"
+#include "utils/cc/exception.h"
+
+using namespace ogp;
+
+StaticAtoms::StaticAtoms(const StaticAtoms& a)
+	: Atoms(), Constants(a), varnames(a.varnames),
+	  varorder(), vars(), indices()
+{
+	// fill varorder
+	for (unsigned int i = 0; i < a.varorder.size(); i++) {
+		const char* s = varnames.query(a.varorder[i]);
+		varorder.push_back(s);
+	}
+
+	// fill vars
+	for (Tvarmap::const_iterator it = a.vars.begin();
+		 it != a.vars.end(); ++it) {
+		const char* s = varnames.query((*it).first);
+		vars.insert(Tvarmap::value_type(s, (*it).second));
+	}
+
+	// fill indices
+	for (Tinvmap::const_iterator it = a.indices.begin();
+		 it != a.indices.end(); ++it) {
+		const char* s = varnames.query((*it).second);
+		indices.insert(Tinvmap::value_type((*it).first, s));
+	}
+}
+
+void StaticAtoms::import_atoms(const DynamicAtoms& da, OperationTree& otree, Tintintmap& tmap)
+{
+	Constants::import_constants(da, otree, tmap);
+
+	for (int i = 0; i < da.get_name_storage().num(); i++) {
+		const char* name = da.get_name_storage().get_name(i);
+		register_name(name);
+		int tnew = otree.add_nulary();
+		assign(name, tnew);
+		try {
+			const DynamicAtoms::Tlagmap& lmap = da.lagmap(name);
+			for (DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+				 it != lmap.end(); ++it) {
+				int told = (*it).second;
+				tmap.insert(Tintintmap::value_type(told, tnew));
+			}
+		} catch (const ogu::Exception& e) {
+		}
+	}
+}
+
+
+int StaticAtoms::check(const char* name) const
+{
+	if (DynamicAtoms::is_string_constant(name)) {
+		return Constants::check(name);
+	} else {
+		return check_variable(name);
+	}
+}
+
+int StaticAtoms::index(const char* name) const
+{
+	Tvarmap::const_iterator it = vars.find(name);
+	if (it == vars.end())
+		return -1;
+	else
+		return (*it).second;
+}
+
+const char* StaticAtoms::inv_index(int t) const
+{
+	Tinvmap::const_iterator it = indices.find(t);
+	if (it == indices.end())
+		return NULL;
+	else
+		return (*it).second;
+}
+
+void StaticAtoms::assign(const char* name, int t)
+{
+	if (DynamicAtoms::is_string_constant(name)) {
+		double val;
+		sscanf(name, "%lf", &val);
+		add_constant(t, val);
+	} else {
+		const char* ss = varnames.insert(name);
+		vars.insert(Tvarmap::value_type(ss, t));
+		indices.insert(Tinvmap::value_type(t, ss));
+	}
+}
+
+vector<int> StaticAtoms::variables() const
+{
+	vector<int> res;
+	for (Tvarmap::const_iterator it = vars.begin();
+		 it != vars.end(); ++it) {
+		res.push_back((*it).second);
+	}
+	return res;
+}
+
+void StaticAtoms::register_name(const char* name)
+{
+	const char* ss = varnames.insert(name);
+	varorder.push_back(ss);
+}
+
+void StaticAtoms::print() const
+{
+	printf("constants:\n");
+	Constants::print();
+	printf("variable names:\n");
+	varnames.print();
+	printf("map to tree indices:\n");
+	for (Tvarmap::const_iterator it = vars.begin(); it != vars.end(); ++it)
+		printf("%s\t->\t%d\n", (*it).first, (*it).second);
+}
diff --git a/dynare++/parser/cc/static_atoms.h b/dynare++/parser/cc/static_atoms.h
new file mode 100644
index 0000000000000000000000000000000000000000..dbe58017a3d2c735658d1aecaeb4ca5d4014fb4a
--- /dev/null
+++ b/dynare++/parser/cc/static_atoms.h
@@ -0,0 +1,89 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: static_atoms.h 1218 2007-03-19 21:52:49Z kamenik $
+
+#ifndef OGP_STATIC_ATOMS
+#define OGP_STATIC_ATOMS
+
+#include "dynamic_atoms.h"
+
+namespace ogp {
+
+	class StaticAtoms : public Atoms, public Constants {
+	protected:
+		typedef map<const char*, int, ltstr> Tvarmap;
+		typedef map<int, const char*> Tinvmap;
+		/** Storage for names. */
+		NameStorage varnames;
+		/** Outer order of variables. */
+		vector<const char*> varorder;
+		/** This is the map mapping a variable name to the tree
+		 * index. */
+		Tvarmap vars;
+		/** This is the inverse mapping. It maps a tree index to the
+		 * variable name. */
+		Tinvmap indices;
+	public:
+		StaticAtoms() : Atoms(), Constants(), varnames(), varorder(), vars()
+			{}
+		/* Copy constructor. */
+		StaticAtoms(const StaticAtoms& a);
+		/** Conversion from DynamicAtoms. This takes all atoms from
+		 * the DynamicAtoms and adds its static version. The new tree
+		 * indices are allocated in the passed OperationTree. Whole
+		 * the process is traced in the map mapping old tree indices
+		 * to new tree indices. */
+		StaticAtoms(const DynamicAtoms& da, OperationTree& otree, Tintintmap& tmap)
+			: Atoms(), Constants(), varnames(), varorder(), vars()
+			{import_atoms(da, otree, tmap);}
+		/* Destructor. */
+		virtual ~StaticAtoms() {}
+		/** This imports atoms from dynamic atoms inserting the new
+		 * tree indices to the given tree (including constants). The
+		 * mapping from old atoms to new atoms is traced in tmap. */
+		void import_atoms(const DynamicAtoms& da, OperationTree& otree,
+						  Tintintmap& tmap);
+		/** If the name is constant, it returns its tree index if the
+		 * constant is registered in Constants, it returns -1
+		 * otherwise. If the name is not constant, it returns result
+		 * from check_variable, which is implemented by a subclass. */
+		int check(const char* name) const;
+		/** This assigns a given tree index to the variable name. The
+		 * name should have been checked before the call. */
+		void assign(const char* name, int t);
+		int nvar() const
+			{return varnames.num();}
+		/** This returns a vector of all variables. */
+		vector<int> variables() const;
+		/** This returns a tree index of the given variable. */
+		int index(const char* name) const;
+		/** This returns a name from the given tree index. NULL is
+		 * returned if the tree index doesn't exist. */
+		const char* inv_index(int t) const;
+		/** This returns a name in a outer ordering. (There is no other ordering.) */
+		const char* name(int i) const
+			{return varorder[i];}
+		/** Debug print. */
+		void print() const;
+		/** This registers a variable. A subclass can reimplement
+		 * this, for example, to ensure uniqueness of the
+		 * name. However, this method should be always called in
+		 * overriding methods to do the registering job. */
+		virtual void register_name(const char* name);
+		/** Return the name storage to allow querying to other
+		 * classes. */
+		const NameStorage& get_name_storage() const
+			{return varnames;}
+	protected:
+		/** This checks the variable. The implementing subclass might
+		 * want to throw an exception if the variable has not been
+		 * registered. */
+		virtual int check_variable(const char* name) const = 0;
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/static_fine_atoms.cpp b/dynare++/parser/cc/static_fine_atoms.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9c5f90a14ea441229ba8e7402a871c643b359697
--- /dev/null
+++ b/dynare++/parser/cc/static_fine_atoms.cpp
@@ -0,0 +1,217 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: static_fine_atoms.cpp 82 2007-04-19 11:33:30Z ondra $
+
+#include "utils/cc/exception.h"
+
+#include "static_fine_atoms.h"
+#include "parser_exception.h"
+
+using namespace ogp;
+
+StaticFineAtoms::StaticFineAtoms(const StaticFineAtoms& sfa)
+	: StaticAtoms(sfa),
+	  params(), param_outer_map(),
+	  endovars(), endo_outer_map(),
+	  exovars(), exo_outer_map(),
+	  der_atoms(sfa.der_atoms),
+	  endo_atoms_map(sfa.endo_atoms_map),
+	  exo_atoms_map(sfa.exo_atoms_map)
+{
+	for (unsigned int i = 0; i < sfa.params.size(); i++) {
+		const char* name = varnames.query(sfa.params[i]);
+		params.push_back(name);
+		param_outer_map.insert(Tvarintmap::value_type(name, i));
+	}
+
+	for (unsigned int i = 0; i < sfa.endovars.size(); i++) {
+		const char* name = varnames.query(sfa.endovars[i]);
+		endovars.push_back(name);
+		endo_outer_map.insert(Tvarintmap::value_type(name, i));
+	}
+
+	for (unsigned int i = 0; i < sfa.exovars.size(); i++) {
+		const char* name = varnames.query(sfa.exovars[i]);
+		exovars.push_back(name);
+		exo_outer_map.insert(Tvarintmap::value_type(name, i));
+	}
+}
+
+void StaticFineAtoms::import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap)
+{
+	StaticAtoms::import_atoms(fa, otree, tmap);
+
+	// we just need to put parameters, endovars, and exovars to
+	// respective vectors, the names are already in the storage
+
+	// parameters
+	const vector<const char*>& fa_params = fa.get_params();
+	for (unsigned int i = 0; i < fa_params.size(); i++)
+		register_param(fa_params[i]);
+
+	// endogenous
+	const vector<const char*>& fa_endovars = fa.get_endovars();
+	for (unsigned int i = 0; i < fa_endovars.size(); i++)
+		register_endo(fa_endovars[i]);
+
+	// exogenous
+	const vector<const char*>& fa_exovars = fa.get_exovars();
+	for (unsigned int i = 0; i < fa_exovars.size(); i++)
+		register_exo(fa_exovars[i]);
+
+	parsing_finished();
+}
+
+void StaticFineAtoms::import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap,
+								   const char* dummy)
+{
+	StaticAtoms::import_atoms(fa, otree, tmap);
+
+	// we just need to put parameters, endovars, and exovars to
+	// respective vectors, the names are already in the storage
+
+	// parameters
+	const vector<const char*>& fa_params = fa.get_params();
+	for (unsigned int i = 0; i < fa_params.size(); i++)
+		register_param(fa_params[i]);
+
+	// endogenous
+	const vector<const char*>& fa_endovars = fa.get_endovars();
+	for (unsigned int i = 0; i < fa_endovars.size(); i++)
+		register_endo(fa_endovars[fa.y2outer_endo()[i]]);
+
+	// exogenous
+	const vector<const char*>& fa_exovars = fa.get_exovars();
+	for (unsigned int i = 0; i < fa_exovars.size(); i++)
+		register_exo(fa_exovars[fa.y2outer_exo()[i]]);
+
+	parsing_finished();
+}
+
+int StaticFineAtoms::check_variable(const char* name) const
+{
+	const char* ss = varnames.query(name);
+	if (ss == NULL)
+		throw ParserException(string("Variable <")+name+"> not declared.",0);
+	return index(name);
+}
+
+void StaticFineAtoms::parsing_finished()
+{
+	// build der_atoms, and endo_atoms_map and exo_atoms_map
+	der_atoms.clear();
+	endo_atoms_map.clear();
+	exo_atoms_map.clear();
+
+	// go through all endo and exo insert tree indices, ignore names
+	// whose tree index is -1 (those which are not referenced)
+	for (unsigned int i = 0; i < endovars.size(); i++) {
+		int t = index(endovars[i]);
+		if (t != -1) {
+			endo_atoms_map.push_back(der_atoms.size());
+			der_atoms.push_back(t);
+		}
+	}
+	for (unsigned int i = 0; i < exovars.size(); i++) {
+		int t = index(exovars[i]);
+		if (t != -1) {
+			exo_atoms_map.push_back(der_atoms.size());
+			der_atoms.push_back(t);
+		}
+	}
+}
+
+int StaticFineAtoms::name2outer_param(const char* name) const
+{
+	Tvarintmap::const_iterator it = param_outer_map.find(name);
+	if (it == param_outer_map.end())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Name is not a parameter in StaticFineAtoms::name2outer_param");
+	return (*it).second;
+}
+
+int StaticFineAtoms::name2outer_endo(const char* name) const
+{
+	Tvarintmap::const_iterator it = endo_outer_map.find(name);
+	if (it == endo_outer_map.end())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Name is not an endogenous variable in StaticFineAtoms::name2outer_endo");
+	return (*it).second;
+}
+
+int StaticFineAtoms::name2outer_exo(const char* name) const
+{
+	Tvarintmap::const_iterator it = exo_outer_map.find(name);
+	if (it == exo_outer_map.end())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Name is not an exogenous variable in StaticFineAtoms::name2outer_exo");
+	return (*it).second;
+}
+
+void StaticFineAtoms::register_uniq_endo(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("Endogenous variable <")+name+"> is not unique.",0);
+	const char* ss = varnames.insert(name);
+	register_endo(ss);
+}
+
+void StaticFineAtoms::register_uniq_exo(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("Exogenous variable <")+name+"> is not unique.",0);
+	const char* ss = varnames.insert(name);
+	register_exo(ss);
+}
+
+void StaticFineAtoms::register_uniq_param(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("Parameter <")+name+"> is not unique.",0);
+	const char* ss = varnames.insert(name);
+	register_param(ss);
+}
+
+void StaticFineAtoms::print() const
+{
+	StaticAtoms::print();
+	printf("endo atoms map:\n");
+	for (unsigned int i = 0; i < endo_atoms_map.size(); i++)
+		printf("%d --> %d\n", i, endo_atoms_map[i]);
+	printf("exo atoms map:\n");
+	for (unsigned int i = 0; i < exo_atoms_map.size(); i++)
+		printf("%d --> %d\n", i, exo_atoms_map[i]);	
+	printf("der atoms:\n");
+	for (unsigned int i = 0; i < der_atoms.size(); i++)
+		printf("%d\t%d\n",i, der_atoms[i]);
+}
+
+void StaticFineAtoms::register_endo(const char* name)
+{
+	const char* ss = varnames.query(name);
+	if (ss == NULL)
+		throw ogp::ParserException(string("Endogenous variable <")
+								   +name+"> not found in storage.",0);
+	endovars.push_back(ss);
+	endo_outer_map.insert(Tvarintmap::value_type(ss, endovars.size()-1));
+}
+
+void StaticFineAtoms::register_exo(const char* name)
+{
+	const char* ss = varnames.query(name);
+	if (ss == NULL)
+		throw ogp::ParserException(string("Exogenous variable <")
+								   +name+"> not found in storage.",0);
+	exovars.push_back(ss);
+	exo_outer_map.insert(Tvarintmap::value_type(ss, exovars.size()-1));
+}
+
+void StaticFineAtoms::register_param(const char* name)
+{
+	const char* ss = varnames.query(name);
+	if (ss == NULL)
+		throw ogp::ParserException(string("Parameter <")+name+"> not found in storage.",0);
+	params.push_back(ss);
+	param_outer_map.insert(Tvarintmap::value_type(ss, params.size()-1));
+}
+
diff --git a/dynare++/parser/cc/static_fine_atoms.h b/dynare++/parser/cc/static_fine_atoms.h
new file mode 100644
index 0000000000000000000000000000000000000000..e80af1eaed9becec0ff9ab0e45ef6d9f38190a7a
--- /dev/null
+++ b/dynare++/parser/cc/static_fine_atoms.h
@@ -0,0 +1,177 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: static_fine_atoms.h 42 2007-01-22 21:53:24Z ondra $
+
+#ifndef OGP_STATIC_FINE_ATOMS_H
+#define OGP_STATIC_FINE_ATOMS_H
+
+#include "static_atoms.h"
+#include "fine_atoms.h"
+
+namespace ogp {
+
+	/** This class represents static atoms distinguishing between
+	 * parameters, endogenous and exogenous variables. The class
+	 * maintains also ordering of all three categories (referenced as
+	 * outer or inner, since there is only one ordering). It can be
+	 * constructed either from scratch, or from fine dynamic atoms. In
+	 * the latter case, one can decide if the ordering of this static
+	 * atoms should be internal or external ordering of the original
+	 * dynamic fine atoms. */
+	class StaticFineAtoms : public StaticAtoms {
+	public:
+		typedef map<int,int> Tintintmap;
+	protected:
+		typedef map<const char*, int, ltstr> Tvarintmap;
+	private:
+		/** The vector of parameter names, gives the parameter
+		 * ordering. */
+		vector<const char*> params;
+		/** A map mappping a parameter name to an index in the ordering. */
+		Tvarintmap param_outer_map;
+		/** The vector of endogenous variables. This defines the order
+		 * like parameters. */
+		vector<const char*> endovars;
+		/** A map mapping a name of an endogenous variable to an index
+		 * in the ordering. */
+		Tvarintmap endo_outer_map;
+		/** The vector of exogenous variables. Also defines the order
+		 * like parameters and endovars. */
+		vector<const char*> exovars;
+		/** A map mapping a name of an exogenous variable to an index
+		 * in the outer ordering. */
+		Tvarintmap exo_outer_map;
+		/** This vector defines a set of atoms as tree indices used
+		 * for differentiation. The order of the atoms in is the
+		 * concatenation of the outer ordering of endogenous and
+		 * exogenous. This vector is setup by parsing_finished() and
+		 * is returned by variables(). */
+		vector<int> der_atoms;
+		/** This is a mapping from endogenous atoms to all atoms in
+		 * der_atoms member. The mapping maps index in endogenous atom
+		 * ordering to index (not value) in der_atoms. It is useful if
+		 * one wants to evaluate derivatives wrt only endogenous
+		 * variables. It is set by parsing_finished(). By definition,
+		 * it is monotone. */
+		vector<int> endo_atoms_map;
+		/** This is a mapping from exogenous atoms to all atoms in
+		 * der_atoms member. It is the same as endo_atoms_map for
+		 * atoms of exogenous variables. */
+		vector<int> exo_atoms_map;		
+	public:
+		StaticFineAtoms() {}
+		/** Copy constructor making a new storage for atom names. */
+		StaticFineAtoms(const StaticFineAtoms& sfa);
+		/** Conversion from dynamic FineAtoms taking its outer
+		 * ordering as ordering of parameters, endogenous and
+		 * exogenous. A biproduct is an integer to integer map mapping
+		 * tree indices of the dynamic atoms to tree indices of the
+		 * static atoms. */
+		StaticFineAtoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap)
+			{StaticFineAtoms::import_atoms(fa, otree, tmap);}
+		/** Conversion from dynamic FineAtoms taking its internal
+		 * ordering as ordering of parameters, endogenous and
+		 * exogenous. A biproduct is an integer to integer map mapping
+		 * tree indices of the dynamic atoms to tree indices of the
+		 * static atoms. */
+		StaticFineAtoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap,
+						const char* dummy)
+			{StaticFineAtoms::import_atoms(fa, otree, tmap, dummy);}
+		virtual ~StaticFineAtoms() {}
+		/** This adds atoms from dynamic atoms inserting new tree
+		 * indices to the given tree and tracing the mapping from old
+		 * atoms to new atoms in tmap. The ordering of the static
+		 * atoms is the same as outer ordering of dynamic atoms. */
+		void import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap);
+		/** This adds atoms from dynamic atoms inserting new tree
+		 * indices to the given tree and tracing the mapping from old
+		 * atoms to new atoms in tmap. The ordering of the static
+		 * atoms is the same as internal ordering of dynamic atoms. */
+		void import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap,
+						  const char* dummy);
+		/** Overrides StaticAtoms::check_variable so that the error
+		 * would be raised if the variable name is not declared. A
+		 * variable is declared by inserting it to
+		 * StaticAtoms::varnames, which is done with registering
+		 * methods. This a responsibility of a subclass. */
+		int check_variable(const char* name) const;
+		/** Return an (external) ordering of parameters. */
+		const vector<const char*>& get_params() const
+			{return params;}
+		/** Return an external ordering of endogenous variables. */
+		const vector<const char*>& get_endovars() const
+			{return endovars;}
+		/** Return an external ordering of exogenous variables. */
+		const vector<const char*>& get_exovars() const
+			{return exovars;}
+		/** This constructs der_atoms, and the endo_endoms_map and
+		 * exo_atoms_map, which can be created only after the parsing
+		 * is finished. */
+		void parsing_finished();
+		/** Return the atoms with respect to which we are going to
+		 * differentiate. */
+		vector<int> variables() const
+			{return der_atoms;}
+		/** Return the endo_atoms_map. */
+		const vector<int>& get_endo_atoms_map() const
+			{return endo_atoms_map;}
+		/** Return the exo_atoms_map. */
+		const vector<int>& get_exo_atoms_map() const
+			{return endo_atoms_map;}
+		/** Return an index in the outer ordering of a given
+		 * parameter. An exception is thrown if the name is not a
+		 * parameter. */
+		int name2outer_param(const char* name) const;
+		/** Return an index in the outer ordering of a given
+		 * endogenous variable. An exception is thrown if the name is not a
+		 * and endogenous variable. */
+		int name2outer_endo(const char* name) const;
+		/** Return an index in the outer ordering of a given
+		 * exogenous variable. An exception is thrown if the name is not a
+		 * and exogenous variable. */
+		int name2outer_exo(const char* name) const;
+		/** Return the number of endogenous variables. */
+		int ny() const
+			{return endovars.size();}
+		/** Return the number of exogenous variables. */
+		int nexo() const
+			{return (int)exovars.size();}
+		/** Return the number of parameters. */
+		int np() const
+			{return (int)(params.size());}
+		/** Register unique endogenous variable name. The order of
+		 * calls defines the endo outer ordering. The method is
+		 * virtual, since a superclass may want to do some additional
+		 * action. */
+		virtual void register_uniq_endo(const char* name);
+		/** Register unique exogenous variable name. The order of
+		 * calls defines the exo outer ordering. The method is
+		 * virtual, since a superclass may want to do somem additional
+		 * action. */
+		virtual void register_uniq_exo(const char* name);
+		/** Register unique parameter name. The order of calls defines
+		 * the param outer ordering. The method is
+		 * virtual, since a superclass may want to do somem additional
+		 * action. */
+		virtual void register_uniq_param(const char* name);
+		/** Debug print. */
+		void print() const;
+	private:
+		/** Add endogenous variable name, which is already in the name
+		 * storage. */
+		void register_endo(const char* name);
+		/** Add exogenous variable name, which is already in the name
+		 * storage. */
+		void register_exo(const char* name);
+		/** Add parameter name, which is already in the name
+		 * storage. */
+		void register_param(const char* name);
+	};
+
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/tree.cpp b/dynare++/parser/cc/tree.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62157d3d27aa9c35e7caa40bdea75dd7fd2a4acc
--- /dev/null
+++ b/dynare++/parser/cc/tree.cpp
@@ -0,0 +1,912 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: tree.cpp 1762 2008-03-31 14:28:54Z kamenik $ 
+
+#include "utils/cc/exception.h"
+
+#include "tree.h"
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <cmath>
+#include <limits>
+
+using namespace ogp;
+
+
+/** Here we just implement complementary error function without
+ * declaring it for uses from outside this unit. The implementation is taken from "Numerical Recipes in C" 2nd ed. 1992 p. 221, */
+double erffc(double x)
+{
+	double z = std::abs(x);
+	double t = 1/(1+0.5*z);
+	double r = t*exp(-z*z-1.26551223+t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))));
+	return x >= 0 ? r : 2-r;
+}
+
+/** Here we initialize OperationTree to contain only zero, one, nan
+ * and two_over_pi terms. */
+OperationTree::OperationTree()
+{
+	last_nulary = -1;
+	// allocate space for the constants
+	for (int i = 0; i < num_constants; i++)
+		add_nulary();
+}
+
+int OperationTree::add_nulary()
+{
+	int op = terms.size();
+	Operation nulary;
+	terms.push_back(nulary);
+	_Tintset s;
+	s.insert(op);
+	nul_incidence.push_back(s);
+	_Tderivmap empty;
+	derivatives.push_back(empty);
+	last_nulary = op;
+	return op;
+}
+
+int OperationTree::add_unary(code_t code, int op)
+{
+	if (op == zero &&
+		(code == UMINUS ||
+		 code == SIN ||
+		 code == TAN ||
+		 code == SQRT ||
+		 code == ERF))
+		return zero;
+	if (op == zero && code == LOG || op == nan)
+		return nan;
+	if (op == zero && (code == EXP ||
+					   code == COS ||
+					   code == ERFC))
+		return one;
+
+	Operation unary(code, op);
+	_Topmap::const_iterator i = ((const _Topmap&)opmap).find(unary);
+	if (i == opmap.end()) {
+		int newop = terms.size();
+		// add to the terms
+		terms.push_back(unary);
+		// copy incidence of the operand
+		nul_incidence.push_back(nul_incidence[op]);
+		// insert it to opmap
+		opmap.insert(_Topval(unary, newop));
+		// add empty map of derivatives
+		_Tderivmap empty;
+		derivatives.push_back(empty);
+		return newop;
+	}
+	return (*i).second;
+}
+
+int OperationTree::add_binary(code_t code, int op1, int op2)
+{
+	// quick exits for special values
+	if (op1 == nan || op2 == nan)
+		return nan;
+	// for plus
+	if (code == PLUS)
+		if (op1 == zero && op2 == zero)
+			return zero;
+		else if (op1 == zero)
+			return op2;
+		else if (op2 == zero)
+			return op1;
+	// for minus
+	if (code == MINUS)
+		if (op1 == zero && op2 == zero)
+			return zero;
+		else if (op1 == zero)
+			return add_unary(UMINUS, op2);
+		else if (op2 == zero)
+			return op1;
+	// for times
+	if (code == TIMES)
+		if (op1 == zero || op2 == zero)
+			return zero;
+		else if (op1 == one)
+			return op2;
+		else if (op2 == one)
+			return op1;
+	// for divide
+	if (code == DIVIDE)
+		if (op1 == op2)
+			return one;
+		else if (op1 == zero)
+			return zero;
+		else if (op2 == zero)
+			return nan;
+	// for power
+	if (code == POWER)
+		if (op1 == zero && op2 == zero)
+			return nan;
+		else if (op1 == zero)
+			return zero;
+		else if (op2 == zero)
+			return one;
+		else if (op1 == one)
+			return one;
+		else if (op2 == one)
+			return op1;
+
+	// order operands of commutative operations
+	if (code == TIMES || code == PLUS)
+		if (op1 > op2) {
+			int tmp = op1;
+			op1 = op2;
+			op2 = tmp;
+		}
+
+	// construct operation and check/add it
+	Operation binary(code, op1, op2);
+	_Topmap::const_iterator i = ((const _Topmap&)opmap).find(binary);
+	if (i == opmap.end()) {
+		int newop = terms.size();
+		terms.push_back(binary);
+		// sum both sets of incidenting nulary operations
+		nul_incidence.push_back(nul_incidence[op1]);
+		nul_incidence.back().insert(nul_incidence[op2].begin(), nul_incidence[op2].end());
+		// add to opmap
+		opmap.insert(_Topval(binary, newop));
+		// add empty map of derivatives
+		_Tderivmap empty;
+		derivatives.push_back(empty);
+		return newop;
+	}
+	return (*i).second;
+}
+
+int OperationTree::add_derivative(int t, int v)
+{
+	if (t < 0 || t >= (int) terms.size())
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong value for tree index in OperationTree::add_derivative");
+
+	// quick returns for nulary terms or empty incidence
+	if (terms[t].nary() == 0 && t != v) {
+		return zero;
+	}
+	if (terms[t].nary() == 0 && t == v) {
+		return one;
+	}
+	if (nul_incidence[t].end() == nul_incidence[t].find(v)) {
+		return zero;
+	}
+
+	// quick return if the derivative has been registered
+	_Tderivmap::const_iterator i = derivatives[t].find(v);
+	if (i != derivatives[t].end())
+		return (*i).second;
+
+	int res = -1;
+	switch (terms[t].getCode()) {
+
+	case UMINUS:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		res = add_unary(UMINUS, tmp);
+		break;
+	}
+	case LOG:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		res = add_binary(DIVIDE, tmp, terms[t].getOp1());
+		break;
+	}
+	case EXP:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		res = add_binary(TIMES, t, tmp);
+		break;
+	}
+	case SIN:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		res = add_binary(TIMES, add_unary(COS, terms[t].getOp1()), tmp);
+		break;
+	}
+	case COS:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		res = add_unary(UMINUS, add_binary(TIMES, add_unary(SIN, terms[t].getOp1()), tmp));
+		break;
+	}
+	case TAN:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		int tmp2 = add_unary(COS, terms[t].getOp1());
+		res = add_binary(DIVIDE, tmp, add_binary(TIMES, tmp2, tmp2));
+		break;
+	}
+	case SQRT:
+	{
+		int tmp = add_derivative(terms[t].getOp1(), v);
+		res = add_binary(DIVIDE, tmp,
+						 add_binary(PLUS, t, t));
+		break;
+	}
+	case ERF:
+	{
+		int tmp = add_binary(TIMES, terms[t].getOp1(), terms[t].getOp1());
+		tmp = add_unary(UMINUS, tmp);
+		tmp = add_unary(EXP, tmp);
+		int der = add_derivative(terms[t].getOp1(), v);
+		tmp = add_binary(TIMES, tmp, der);
+		res = add_binary(TIMES, two_over_pi, tmp);
+		break;
+	}
+	case ERFC:
+	{
+		int tmp = add_binary(TIMES, terms[t].getOp1(), terms[t].getOp1());
+		tmp = add_unary(UMINUS, tmp);
+		tmp = add_unary(EXP, tmp);
+		int der = add_derivative(terms[t].getOp1(), v);
+		tmp = add_binary(TIMES, tmp, der);
+		tmp = add_binary(TIMES, two_over_pi, tmp);
+		res = add_unary(UMINUS, tmp);
+		break;
+	}
+	case PLUS:
+	{
+		int tmp1 = add_derivative(terms[t].getOp1(), v);
+		int tmp2 = add_derivative(terms[t].getOp2(), v);
+		res = add_binary(PLUS, tmp1, tmp2);
+		break;
+	}
+	case MINUS:
+	{
+		int tmp1 = add_derivative(terms[t].getOp1(), v);
+		int tmp2 = add_derivative(terms[t].getOp2(), v);
+		res = add_binary(MINUS, tmp1, tmp2);
+		break;
+	}
+	case TIMES:
+	{
+		int tmp1 = add_derivative(terms[t].getOp1(), v);
+		int tmp2 = add_derivative(terms[t].getOp2(), v);
+		int res1 = add_binary(TIMES, terms[t].getOp1(), tmp2);
+		int	res2 = add_binary(TIMES, tmp1, terms[t].getOp2());
+		res = add_binary(PLUS, res1, res2);
+		break;
+	}
+	case DIVIDE:
+	{
+		int tmp1 = add_derivative(terms[t].getOp1(), v);
+		int tmp2 = add_derivative(terms[t].getOp2(), v);
+		if (tmp2 == zero)
+			res = add_binary(DIVIDE, tmp1, terms[t].getOp2());
+		else {
+			int nom = add_binary(MINUS,
+								 add_binary(TIMES, tmp1, terms[t].getOp2()),
+								 add_binary(TIMES, tmp2, terms[t].getOp1()));
+			int den = add_binary(TIMES, terms[t].getOp2(), terms[t].getOp2());
+			res = add_binary(DIVIDE, nom, den);
+		}
+		break;
+	}
+	case POWER:
+	{
+		int tmp1 = add_derivative(terms[t].getOp1(), v);
+		int tmp2 = add_derivative(terms[t].getOp2(), v);
+		int s1 = add_binary(TIMES, tmp2,
+							add_binary(TIMES, t,
+									   add_unary(LOG, terms[t].getOp1())));
+		int s2 = add_binary(TIMES, tmp1,
+							add_binary(TIMES, terms[t].getOp2(),
+									   add_binary(POWER, terms[t].getOp1(),
+												  add_binary(MINUS, terms[t].getOp2(), one))));
+		res = add_binary(PLUS, s1, s2);
+		break;
+	}
+	case NONE:
+		break;
+	}
+
+	if (res == -1)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Unknown operation code.");
+
+	register_derivative(t, v, res);
+
+	return res;
+}
+
+int OperationTree::add_substitution(int t, const map<int,int>& subst)
+{
+	return add_substitution(t, subst, *this); 
+}
+
+int OperationTree::add_substitution(int t, const map<int,int>& subst,
+									const OperationTree& otree)
+{
+	// return substitution of t if it is in the map
+	map<int,int>::const_iterator it = subst.find(t);
+	if (subst.end() != it)
+		return (*it).second;
+
+	int nary = otree.terms[t].nary();
+	if (nary == 2) {
+		// return the binary operation of the substituted terms
+		int t1 = add_substitution(otree.terms[t].getOp1(), subst, otree);
+		int t2 = add_substitution(otree.terms[t].getOp2(), subst, otree);
+		return add_binary(otree.terms[t].getCode(), t1, t2);
+	} else if (nary == 1) {
+		// return the unary operation of the substituted term
+		int t1 = add_substitution(otree.terms[t].getOp1(), subst, otree);
+		return add_unary(otree.terms[t].getCode(), t1);
+	} else {
+		// if t is not the first num_constants, and otree is not this
+		// tree, then raise and exception. Otherwise return t, since
+		// it is either a special term (having the same semantics in
+		// both trees), or the trees are the same, hence t has the
+		// same semantics
+		if (t < num_constants || this == &otree)
+			return t;
+		else {
+			throw ogu::Exception(__FILE__,__LINE__,
+								 "Incomplete substitution map in OperationTree::add_substitution");
+			return -1;
+		}
+	}
+}
+
+
+void OperationTree::nularify(int t)
+{
+	// remove the original operation from opmap
+	_Topmap::iterator it = opmap.find(terms[t]);
+	if (it != opmap.end())
+		opmap.erase(it);
+	// turn the operation to nulary
+	Operation nulary_op;
+	terms[t] = nulary_op;
+	// update last nulary
+	if (last_nulary < t)
+		last_nulary = t;
+	// update nul_incidence information for all terms including t
+	update_nul_incidence_after_nularify(t);
+}
+
+void OperationTree::register_derivative(int t, int v, int tder)
+{
+	// todo: might check that the insert inserts a new pair
+	derivatives[t].insert(_Tderivmap::value_type(v, tder));
+}
+
+hash_set<int> OperationTree::select_terms(int t, const opselector& sel) const
+{
+	hash_set<int> subterms;
+	select_terms(t, sel, subterms);
+	return subterms;
+}
+
+void OperationTree::select_terms(int t, const opselector& sel, hash_set<int>& subterms) const
+{
+	const Operation& op = terms[t];
+
+	if (sel(t))
+		subterms.insert(t);
+	else
+		if (op.nary() == 2) {
+			select_terms(op.getOp1(), sel, subterms);
+			select_terms(op.getOp2(), sel, subterms);
+		} else if (op.nary() == 1) {
+			select_terms(op.getOp1(), sel, subterms);
+		}
+}
+
+hash_set<int> OperationTree::select_terms_inv(int t, const opselector& sel) const
+{
+	hash_set<int> subterms;
+	select_terms_inv(t, sel, subterms);
+	return subterms;
+}
+
+bool OperationTree::select_terms_inv(int t, const opselector& sel, hash_set<int>& subterms) const
+{
+	const Operation& op = terms[t];
+
+	if (op.nary() == 2) {
+		bool a1 = select_terms_inv(op.getOp1(), sel, subterms);
+		bool a2 = select_terms_inv(op.getOp2(), sel, subterms);
+		if (a1 && a2 && sel(t)) {
+			subterms.insert(t);
+			return true;
+		}
+	} else if (op.nary() == 1) {
+		bool a1 = select_terms_inv(op.getOp1(), sel, subterms);
+		if (a1 && sel(t)) {
+			subterms.insert(t);
+			return true;
+		}
+	} else {
+		if (sel(t)) {
+			subterms.insert(t);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+void OperationTree::forget_derivative_maps()
+{
+	for (unsigned int i = 0; i < derivatives.size(); i++)
+		derivatives[i].clear();
+}
+
+
+void OperationTree::print_operation_tree(int t, FILE* fd, OperationFormatter& f) const
+{
+	f.format(terms[t], t, fd);
+}
+
+void OperationTree::print_operation(int t) const
+{
+	DefaultOperationFormatter dof(*this);
+	print_operation_tree(t, stdout, dof);
+}
+
+void OperationTree::update_nul_incidence_after_nularify(int t)
+{
+	hash_set<int> updated;
+	for (int tnode = num_constants; tnode < (int)terms.size(); tnode++) {
+		const Operation& op = terms[tnode];
+		if (op.nary() == 2) {
+			int op1 = op.getOp1();
+			int op2 = op.getOp2();
+			if (op1 >= tnode || op2 >= tnode)
+				throw ogu::Exception(__FILE__,__LINE__,
+									 "Tree disorder asserted");
+			bool updated1 = (updated.end() != updated.find(op1));
+			bool updated2 = (updated.end() != updated.find(op2));
+			if (updated1 || updated2) {
+				nul_incidence[tnode] = nul_incidence[op1];
+				nul_incidence[tnode].insert(nul_incidence[op2].begin(), nul_incidence[op2].end());
+				updated.insert(tnode);
+			}
+		} else if (op.nary() == 1) {
+			int op1 = op.getOp1();
+			if (op1 >= tnode)
+				throw ogu::Exception(__FILE__,__LINE__,
+									 "Tree disorder asserted");
+			bool updated1 = (updated.end() != updated.find(op1));
+			if (updated1) {
+				nul_incidence[tnode] = nul_incidence[op1];
+				updated.insert(tnode);
+			}
+		} else if (op.nary() == 0) {
+			if (tnode == t) {
+				nul_incidence[tnode].clear();
+				nul_incidence[tnode].insert(tnode);
+				updated.insert(tnode);
+			}
+		}
+	}
+}
+
+
+EvalTree::EvalTree(const OperationTree& ot, int last)
+	: otree(ot),
+	  values(new double[(last==-1)? ot.terms.size() : last+1]),
+	  flags(new bool[(last==-1)? ot.terms.size() : last+1]),
+	  last_operation((last==-1)? ot.terms.size()-1 : last)
+{
+	if (last_operation < OperationTree::num_constants-1 ||
+		last_operation > (int)ot.terms.size()-1)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Wrong last in EvalTree constructor.");
+
+	values[0] = 0.0;
+	flags[0] = true;
+	values[1] = 1.0;
+	flags[1] = true;
+	values[2] = std::numeric_limits<double>::quiet_NaN();
+	flags[2] = true;
+	values[3] = 2.0/sqrt(M_PI);
+	flags[3] = true;
+	// this sets from num_constants on
+	reset_all();
+}
+
+void EvalTree::reset_all()
+{
+	for (int i = OperationTree::num_constants; i <= last_operation; i++)
+		flags[i] = false;
+}
+
+void EvalTree::set_nulary(int t, double val)
+{
+	if (t < 0 || t > last_operation)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "The tree index out of bounds in EvalTree::set_nulary");
+	if (t < OperationTree::num_constants || otree.terms[t].nary() != 0)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "The term is not nulary assignable in EvalTree::set_nulary");
+
+	values[t] = val;
+	flags[t] = true;
+}
+
+double EvalTree::eval(int t)
+{
+	if (t < 0 || t > last_operation)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "The tree index out of bounds in EvalTree::eval");
+	if (otree.terms[t].nary() == 0 && flags[t] == false)
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Nulary term has not been assigned a value in EvalTree::eval");
+
+	if (! flags[t]) {
+		const Operation& op = otree.terms[t];
+		if (op.nary() == 1) {
+			double r1 = eval(op.getOp1());
+			double res;
+			if (op.getCode() == UMINUS)
+				res = -r1;
+			else if (op.getCode() == LOG)
+				res = log(r1);
+			else if (op.getCode() == EXP)
+				res = exp(r1);
+			else if (op.getCode() == SIN)
+				res = sin(r1);
+			else if (op.getCode() == COS)
+				res = cos(r1);
+			else if (op.getCode() == TAN)
+				res = tan(r1);
+			else if (op.getCode() == SQRT)
+				res = sqrt(r1);
+			else if (op.getCode() == ERF)
+				res = 1-erffc(r1);
+			else if (op.getCode() == ERFC)
+				res = erffc(r1);
+			else {
+				throw ogu::Exception(__FILE__,__LINE__,
+									 "Unknown unary operation code in EvalTree::eval");
+				res = 0.0;
+			}
+			values[t] = res;
+			flags[t] = true;
+		} else if (op.nary() == 2) {
+			double res;
+			if (op.getCode() == PLUS) {
+				double r1 = eval(op.getOp1());
+				double r2 = eval(op.getOp2());
+				res = r1 + r2;
+			} else if (op.getCode() == MINUS) {
+				double r1 = eval(op.getOp1());
+				double r2 = eval(op.getOp2());
+				res = r1 - r2;
+			} else if (op.getCode() == TIMES) {
+				// pickup less complex formula first
+				unsigned int nul1 = otree.nulary_of_term(op.getOp1()).size();
+				unsigned int nul2 = otree.nulary_of_term(op.getOp2()).size();
+				if (nul1 < nul2) {
+					double r1 = eval(op.getOp1());
+					if (r1 == 0.0)
+						res = 0.0;
+					else {
+						double r2 = eval(op.getOp2());
+						res = r1 * r2;
+					}
+				} else {
+					double r2 = eval(op.getOp2());
+					if (r2 == 0)
+						res = 0.0;
+					else {
+						double r1 = eval(op.getOp1());
+						res = r1*r2;
+					}
+				}
+			} else if (op.getCode() == DIVIDE) {
+				double r1 = eval(op.getOp1());
+				if (r1 == 0)
+					res = 0.0;
+				else {
+					double r2 = eval(op.getOp2());
+					res = r1 / r2;
+				}
+			} else if (op.getCode() == POWER) {
+				// suppose that more complex is the first op in average
+				double r2 = eval(op.getOp2());
+				if (r2 == 0.0)
+					res = 1.0;
+				else {
+					double r1 = eval(op.getOp1());
+					res = pow(r1, r2);
+				}
+			} else {
+				throw ogu::Exception(__FILE__,__LINE__,
+									 "Unknown binary operation code in EvalTree::eval");
+				res = 0.0;
+			}
+			values[t] = res;
+			flags[t] = true;
+		}
+		return values[t];
+	}
+
+	// if (! std::isfinite(values[t]))
+	//	printf("Tree value t=%d is not finite = %f\n", t, values[t]);
+
+	return values[t];
+}
+
+void EvalTree::print() const
+{
+	printf("last_op=%d\n", last_operation);
+	printf("         0     1     2     3     4     5     6     7     8     9\n");
+	printf("----------------------------------------------------------------\n");
+	for (int i = 0; i <= (last_operation+1)/10; i++) {
+		printf("%-3d|", i);
+		int j = 0;
+		while (j < 10 && 10*i+j < last_operation+1) {
+			int k = 10*i+j;
+			if (flags[k])
+				printf(" %5.1g", values[k]);
+			else
+				printf(" -----");
+			j++;
+		}
+		printf("\n");
+	}
+}
+
+void DefaultOperationFormatter::format(const Operation& op, int t, FILE* fd)
+{
+	// add to the stop_set
+	if (stop_set.end() == stop_set.find(t))
+		stop_set.insert(t);
+	else
+		return;
+
+	// call recursively non-nulary terms of the operation
+	if (op.nary() == 2) {
+		int t1 = op.getOp1();
+		const Operation& op1 = otree.terms[t1];
+		int t2 = op.getOp2();
+		const Operation& op2 = otree.terms[t2];
+		if (op1.nary() > 0)
+			format(op1, t1, fd);
+		if (op2.nary() > 0)
+			format(op2, t2, fd);
+	} 
+	if (op.nary() == 1) {
+		int t1 = op.getOp1();
+		const Operation& op1 = otree.terms[t1];
+		if (op1.nary() > 0)
+			format(op1, t1, fd);
+	}
+
+	// print 'term ='
+	format_term(t, fd);
+	fprintf(fd, " = ");
+	if (op.nary() == 0) {
+		format_nulary(t, fd);
+	} else if (op.nary() == 1) {
+		int t1 = op.getOp1();
+		const Operation& op1 = otree.terms[t1];
+		const char* opname = "unknown";
+		switch (op.getCode()) {
+		case UMINUS:
+			opname = "-";
+			break;
+		case LOG:
+			opname = "log";
+			break;
+		case EXP:
+			opname = "exp";
+			break;
+		case SIN:
+			opname = "sin";
+			break;
+		case COS:
+			opname = "cos";
+			break;
+		case TAN:
+			opname = "tan";
+			break;
+		case SQRT:
+			opname = "sqrt";
+			break;
+		case ERF:
+			opname = "erf";
+			break;
+		case ERFC:
+			opname = "erfc";
+			break;
+		default:
+			break;
+		}
+		fprintf(fd, "%s(", opname);
+		if (op1.nary() == 0)
+			format_nulary(t1, fd);
+		else
+			format_term(t1, fd);
+		fprintf(fd, ")");
+	} else {
+		int t1 = op.getOp1();
+		const Operation& op1 = otree.terms[t1];
+		int t2 = op.getOp2();
+		const Operation& op2 = otree.terms[t2];
+		const char* opname = "unknown";
+		switch (op.getCode()) {
+		case PLUS:
+			opname = "+";
+			break;
+		case MINUS:
+			opname = "-";
+			break;
+		case TIMES:
+			opname = "*";
+			break;
+		case DIVIDE:
+			opname = "/";
+			break;
+		case POWER:
+			opname = "^";
+			break;
+		default:
+			break;
+		}
+		if (op1.nary() == 0)
+			format_nulary(t1, fd);
+		else
+			format_term(t1, fd);
+		fprintf(fd, " %s ", opname);
+		if (op2.nary() == 0)
+			format_nulary(t2, fd);
+		else
+			format_term(t2, fd);
+	}
+
+	print_delim(fd);
+
+}
+
+void DefaultOperationFormatter::format_term(int t, FILE* fd) const
+{
+	fprintf(fd, "$%d", t);
+}
+
+void DefaultOperationFormatter::format_nulary(int t, FILE* fd) const
+{
+	if (t == OperationTree::zero)
+		fprintf(fd, "0");
+	else if (t == OperationTree::one)
+		fprintf(fd, "1");
+	else if (t == OperationTree::nan)
+		fprintf(fd, "NaN");
+	else
+		fprintf(fd, "$%d", t);
+}
+
+void DefaultOperationFormatter::print_delim(FILE* fd) const
+{
+	fprintf(fd, ";\n");
+}
+
+std::string OperationStringConvertor::convert(const Operation& op, int t) const
+{
+	if (op.nary() == 0) {
+		if (t < OperationTree::num_constants)
+			if (t == OperationTree::zero)
+				return std::string("0");
+			else if (t == OperationTree::one)
+				return std::string("1");
+			else if (t == OperationTree::nan)
+				return std::string("NaN");
+			else if (t == OperationTree::two_over_pi) {
+				char buf[100];
+				sprintf(buf, "%20.16g", 2.0/std::sqrt(M_PI));
+				return std::string(buf);
+			} else {
+				return std::string("error!error");
+			}
+		else
+			return nulsc.convert(t);
+	} else if (op.nary() == 1) {
+		int t1 = op.getOp1();
+		const Operation& op1 = otree.operation(t1);
+		const char* opname = "unknown";
+		switch (op.getCode()) {
+		case UMINUS:
+			opname = "-";
+			break;
+		case LOG:
+			opname = "log";
+			break;
+		case EXP:
+			opname = "exp";
+			break;
+		case SIN:
+			opname = "sin";
+			break;
+		case COS:
+			opname = "cos";
+			break;
+		case TAN:
+			opname = "tan";
+			break;
+		case SQRT:
+			opname = "sqrt";
+			break;
+		case ERF:
+			opname = "erf";
+			break;
+		case ERFC:
+			opname = "erfc";
+			break;
+		default:
+			break;
+		}
+		std::string s1 = convert(op1, t1);
+		return std::string(opname) + "(" + s1 + ")";
+	} else {
+		int t1 = op.getOp1();
+		const Operation& op1 = otree.operation(t1);
+		int t2 = op.getOp2();
+		const Operation& op2 = otree.operation(t2);
+		const char* opname = "unknown";
+		switch (op.getCode()) {
+		case PLUS:
+			opname = "+";
+			break;
+		case MINUS:
+			opname = "-";
+			break;
+		case TIMES:
+			opname = "*";
+			break;
+		case DIVIDE:
+			opname = "/";
+			break;
+		case POWER:
+			opname = "^";
+			break;
+		default:
+			break;
+		}
+		// decide about parenthesis
+		bool op1_par = true;
+		bool op2_par = true;
+		if (op.getCode() == PLUS) {
+			op1_par = false;
+			op2_par = false;
+		} else if (op.getCode() == MINUS) {
+			op1_par = false;
+			if (op2.getCode() != MINUS && op2.getCode() != PLUS)
+				op2_par = false;
+		} else {
+			if (op1.nary() < 2)
+				op1_par = false;
+			if (op2.nary() < 2)
+				op2_par = false;
+		}
+
+		std::string res;
+		if (op1_par)
+			res += "(";
+		res += convert(op1, t1);
+		if (op1_par)
+			res += ")";
+		res += " ";
+		res += opname;
+		res += " ";
+		if (op2_par)
+			res += "(";
+		res += convert(op2, t2);
+		if (op2_par)
+			res += ")";
+
+		return res;
+	}
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/parser/cc/tree.h b/dynare++/parser/cc/tree.h
new file mode 100644
index 0000000000000000000000000000000000000000..a21fb7773f433d0c212bf7e3f95bb768e8cd4082
--- /dev/null
+++ b/dynare++/parser/cc/tree.h
@@ -0,0 +1,457 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: tree.h 1762 2008-03-31 14:28:54Z kamenik $
+
+#ifndef OGP_TREE_H
+#define OGP_TREE_H
+
+#include <vector>
+#include <set>
+#include <map>
+#include <ext/hash_map>
+#include <ext/hash_set>
+
+namespace ogp {
+
+	using __gnu_cxx::hash_set;
+	using __gnu_cxx::hash_map;
+	using __gnu_cxx::hash;
+	using std::vector;
+	using std::set;
+	using std::map;
+
+	/** Enumerator representing nulary, unary and binary operation
+	 * codes. For nulary, 'none' is used. When one is adding a new
+	 * codes, he should update the code of #OperationTree::add_unary,
+	 * #OperationTree::add_binary, and of course
+	 * #OperationTree::add_derivative. */
+	enum code_t {NONE, UMINUS, LOG, EXP, SIN, COS, TAN, SQRT, ERF,
+				 ERFC, PLUS, MINUS, TIMES, DIVIDE, POWER};
+
+	/** Class representing a nulary, unary, or binary operation. */
+	class Operation {
+	protected:
+		/** Code of the operation. */
+		code_t code;
+		/** First operand. If none, then it is -1. */
+		int op1;
+		/** Second operand. If none, then it is -1. */
+		int op2;
+
+	public:
+		/** Constructs a binary operation. */
+		Operation(code_t cd, int oper1, int oper2)
+			: code(cd), op1(oper1), op2(oper2) {}
+		/** Constructs a unary operation. */
+		Operation(code_t cd, int oper1)
+			: code(cd), op1(oper1), op2(-1) {}
+		/** Constructs a nulary operation. */
+		Operation()
+			: code(NONE), op1(-1), op2(-1) {}
+		/** A copy constructor. */
+		Operation(const Operation& op)
+			: code(op.code), op1(op.op1), op2(op.op2) {}
+
+		/** Operator =. */
+		const Operation& operator=(const Operation& op)
+			{
+				code = op.code;
+				op1 = op.op1;
+				op2 = op.op2;
+				return *this;
+			}
+		/** Operator ==. */
+		bool operator==(const Operation& op) const
+			{
+				return code == op.code && op1 == op.op1 && op2 == op.op2;
+			}
+		/** Operator < implementing lexicographic ordering. */
+		bool operator<(const Operation& op) const
+			{
+				return (code < op.code ||
+						code == op.code &&
+						(op1 < op.op1 || op1 == op.op1 && op2 < op.op2));
+			}
+		/** Returns a number of operands. */
+		int nary() const
+			{
+				return (op2 == -1)? ((op1 == -1) ? 0 : 1) : 2;
+			}
+		/** Returns a hash value of the operation. */
+		size_t hashval() const
+			{
+				return op2+1 + (op1+1)^15 + code^30;
+			}
+
+		code_t getCode() const
+			{ return code; }
+		int getOp1() const
+			{ return op1; }
+		int getOp2() const
+			{ return op2; }
+
+	};
+
+	/** This struct is a predicate for ordering of the operations in
+	 * OperationTree class. now obsolete */
+	struct ltoper {
+		bool operator()(const Operation& oper1, const Operation& oper2) const
+			{return oper1 < oper2;}
+	};
+
+	/** Hash function object for Operation. */
+	struct ophash {
+		size_t operator()(const Operation& op) const
+			{ return op.hashval(); }
+	};
+
+	/** This struct is a function object selecting some
+	 * operations. The operation is given by a tree index. */
+	struct opselector {
+		virtual bool operator()(int t) const = 0;
+		virtual ~opselector() {}
+	};
+
+	/** Forward declaration of OperationFormatter. */
+	class OperationFormatter;
+	class DefaultOperationFormatter;
+
+	/** Forward declaration of EvalTree to make it friend of OperationTree. */
+	class EvalTree;
+
+	/** Class representing a set of trees for terms. Each term is
+	 * given a unique non-negative integer. The terms are basically
+	 * operations whose (integer) operands point to another terms in
+	 * the tree. The terms are stored in the vector. Equivalent unary
+	 * and binary terms are stored only once. This class guarantees
+	 * the uniqueness. The uniqueness of nulary terms is guaranteed by
+	 * the caller, since at this level of Operation abstraction, one
+	 * cannot discriminate between different nulary operations
+	 * (constants, variables). The uniqueness is enforced by the
+	 * hash_map whose keys are operations and values are integers
+	 * (indices of the terms).
+
+	 * This class can also make derivatives of a given term with
+	 * respect to a given nulary term. I order to be able to quickly
+	 * recognize zero derivativates, we maintain a list of nulary
+	 * terms contained in the term. A possible zero derivative is then quickly
+	 * recognized by looking at the list. The list is implemented as a
+	 * hash_set of integers.
+	 *
+	 * In addition, many term can be differentiated multiple times wrt
+	 * one variable since they can be referenced multiple times. To
+	 * avoid this, for each term we maintain a map mapping variables
+	 * to the derivatives of the term. As the caller will
+	 * differentiate wrt more and more variables, these maps will
+	 * become richer and richer.
+	 */
+	class OperationTree {
+		friend class EvalTree;
+		friend class DefaultOperationFormatter;
+	protected:
+		/** This is the vector of the terms. An index to this vector
+		 * uniquelly determines the term. */
+		vector<Operation> terms;
+
+		/** This defines a type for a map mapping the unary and binary
+		 * operations to their indices. */
+		typedef hash_map<Operation, int, ophash> _Topmap;
+		typedef _Topmap::value_type _Topval;
+
+		/** This is the map mapping the unary and binary operations to
+		 * the indices of the terms.*/
+		_Topmap opmap;
+
+		/** This is a type for a set of integers. */
+		typedef hash_set<int> _Tintset;
+		/** This is a vector of integer sets corresponding to the
+		 * nulary terms contained in the term. */
+		vector<_Tintset> nul_incidence;
+
+		/** This is a type of the map from variables (nulary terms) to
+		 * the terms. */
+		typedef hash_map<int, int> _Tderivmap;
+		/** This is a vector of derivative mappings. For each term, it
+		 * maps variables to the derivatives of the term with respect
+		 * to the variables. */
+		vector<_Tderivmap> derivatives;
+
+		/** The tree index of the last nulary term. */
+		int last_nulary;
+	public:
+		/** This is a number of constants set in the following
+		 * enum. This number reserves space in a vector of terms for
+		 * the constants. */
+		static const int num_constants = 4;
+		/** Enumeration for special terms. We need zero, one, nan and
+		 * 2/pi.  These will be always first four terms having indices
+		 * zero, one and two, three. If adding anything to this
+		 * enumeration, make sure you have updated num_constants above.*/
+		enum {zero=0, one=1, nan=2, two_over_pi=3};
+
+		/** The unique constructor which initializes the object to
+		 * contain only zero, one and nan and two_over_pi.*/
+		OperationTree();
+
+		/** Copy constructor. */
+		OperationTree(const OperationTree& ot)
+			: terms(ot.terms), opmap(ot.opmap), nul_incidence(ot.nul_incidence),
+			  derivatives(ot.derivatives),
+			  last_nulary(ot.last_nulary)
+			{}
+
+		/** Add a nulary operation. The caller is responsible for not
+		 * inserting two semantically equivalent nulary operations.
+		 * @return newly allocated index
+		 */
+		int add_nulary();
+
+		/** Add a unary operation. The uniqness is checked, if it
+		 * already exists, then it is not added.
+		 * @param code the code of the unary operation
+		 * @param op the index of the operand
+		 * @return the index of the operation
+		*/
+		int add_unary(code_t code, int op);
+
+		/** Add a binary operation. The uniqueness is checked, if it
+		 * already exists, then it is not added. 
+		 * @param code the code of the binary operation
+		 * @param op1 the index of the first operand
+		 * @param op2 the index of the second operand
+		 * @return the index of the operation
+		 */
+		int add_binary(code_t code, int op1, int op2);
+
+		/** Add the derivative of the given term with respect to the
+		 * given nulary operation.
+		 * @param t the index of the operation being differentiated
+		 * @param v the index of the nulary operation
+		 * @return the index of the derivative
+		 */
+		int add_derivative(int t, int v);
+
+		/** Add the substitution given by the map. This adds a new
+		 * term which is equal to the given term with applied
+		 * substitutions given by the map replacing each term on the
+		 * left by a term on the right. We do not check that the terms
+		 * on the left are not subterms of the terms on the right. If
+		 * so, the substituted terms are not subject of further
+		 * substitution. */
+		int add_substitution(int t, const map<int,int>& subst);
+
+		/** Add the substitution given by the map where left sides of
+		 * substitutions come from another tree. The right sides are
+		 * from this tree. The given t is from the given otree. */
+		int add_substitution(int t, const map<int,int>& subst,
+							 const OperationTree& otree);
+
+		/** This method turns the given term to a nulary
+		 * operation. This is an only method, which changes already
+		 * existing term (all other methods add something new). User
+		 * should use this with caution and must make sure that
+		 * something similar has happened for atoms. In addition, it
+		 * does not do anything with derivatives, so it should not be
+		 * used after some derivatives were created, and derivatives
+		 * already created and saved in derivatives mappings should be
+		 * forgotten with forget_derivative_maps. */
+		void nularify(int t);
+
+		/** Return the set of nulary terms of the given term. */
+		const hash_set<int>& nulary_of_term(int t) const
+			{return nul_incidence[t];}
+
+		/** Select subterms of the given term according a given
+		 * operation selector and return the set of terms that
+		 * correspond to the compounded operations. The given term is
+		 * a compound function of the returned subterms and the
+		 * function consists only from operations which yield false in
+		 * the selector. */
+		hash_set<int> select_terms(int t, const opselector& sel) const;
+
+		/** Select subterms of the given term according a given
+		 * operation selector and return the set of terms that
+		 * correspond to the compounded operations. The given term is
+		 * a compound function of the returned subterms and the
+		 * subterms are maximal subterms consisting from operations
+		 * yielding true in the selector. */
+		hash_set<int> select_terms_inv(int t, const opselector& sel) const;
+
+		/** This forgets all the derivative mappings. It is used after
+		 * a term has been nularified, and then the derivative
+		 * mappings carry wrong information. Note that the derivatives
+		 * mappings serve only as a tool for quick returns in
+		 * add_derivative. Resseting the mappings is harmless, all the
+		 * information is rebuilt in add_derivative without any
+		 * additional nodes (trees). */
+		void forget_derivative_maps();
+
+		/** This returns an operation of a given term. */
+		const Operation& operation(int t) const
+			{return terms[t];}
+
+		/** This outputs the operation to the given file descriptor
+		 * using the given OperationFormatter. */
+		void print_operation_tree(int t, FILE* fd, OperationFormatter& f) const;
+
+		/** Debug print of a given operation: */
+		void print_operation(int t) const;
+
+		/** Return the last tree index of a nulary term. */
+		int get_last_nulary() const
+			{return last_nulary;}
+
+		/** Get the number of all operations. */
+		int get_num_op() const
+			{return (int)(terms.size());}
+	private:
+		/** This registers a calculated derivative of the term in the
+		 * #derivatives vector.
+		 * @param t the index of the term for which we register the derivative
+		 * @param v the index of the nulary term (variable) to which
+		 * respect the derivative was taken
+		 * @param tder the index of the resulting derivative
+		 */
+		void register_derivative(int t, int v, int tder);
+		/** This does the same job as select_terms with the only
+		 * difference, that it adds the terms to the given set and
+		 * hence can be used recursivelly. */
+		void select_terms(int t, const opselector& sel, hash_set<int>& subterms) const; 
+		/** This does the same job as select_terms_inv with the only
+		 * difference, that it adds the terms to the given set and
+		 * hence can be used recursivelly and returns true if the term
+		 * was selected. */
+		bool select_terms_inv(int t, const opselector& sel, hash_set<int>& subterms) const; 
+		/** This updates nul_incidence information after the term t
+		 * was turned to a nulary term in all terms. It goes through
+		 * the tree from simplest terms to teh more complex ones and
+		 * changes the nul_incidence information where necesary. It
+		 * maintains a set where the changes have been made.*/
+		void update_nul_incidence_after_nularify(int t);
+	};
+
+	/** EvalTree class allows for an evaluation of the given tree for
+	 * a given values of nulary terms. For each term in the
+	 * OperationTree the class maintains a resulting value and a flag
+	 * if the value has been calculated or set. The life cycle of the
+	 * class is the following: After it is initialized, the user must
+	 * set values for necessary nulary terms. Then the object can be
+	 * requested to evaluate particular terms. During this process,
+	 * the number of evaluated terms is increasing. Then the user can
+	 * request overall reset of evaluation flags, set the nulary terms
+	 * to new values and evaluate a number of terms.
+	 *
+	 * Note that currently the user cannot request a reset of
+	 * evaluation flags only for those terms depending on a given
+	 * nulary term. This might be added in future and handeled by a
+	 * subclasses of OperationTree and EvalTree, since we need a
+	 * support for this in OperationTree.
+	 */
+	class EvalTree {
+	protected:
+		/** Reference to the OperationTree over which all evaluations
+		 * are done. */
+		const OperationTree& otree;
+		/** The array of values. */
+		double* const values;
+		/** The array of evaluation flags. */
+		bool* const flags;
+		/** The index of last operation in the EvalTree. Length of
+		 * values and flags will be then last_operation+1. */
+		int last_operation;
+	public:
+		/** Initializes the evaluation tree for the given operation
+		 * tree. If last is greater than -1, that the evaluation tree
+		 * will contain only formulas up to the given last index
+		 * (included). */
+		EvalTree(const OperationTree& otree, int last = -1);
+		virtual ~EvalTree()
+			{ delete [] values; delete [] flags; }
+		/** Set evaluation flag to all terms (besides the first
+		 * special terms) to false. */
+		void reset_all();
+		/** Set value for a given nulary term. */
+		void set_nulary(int t, double val);
+		/** Evaluate the given term with nulary terms set so far. */
+		double eval(int t);
+		/** Debug print. */
+		void print() const;
+		/* Return the operation tree. */
+		const OperationTree& getOperationTree() const
+			{return otree;}
+	private:
+		EvalTree(const EvalTree&);
+	};
+
+	/** This is an interface describing how a given operation is
+	 * formatted for output. */
+	class OperationFormatter {
+	public:
+		/** Empty virtual destructor. */
+		virtual ~OperationFormatter() {}
+		/** Print the formatted operation op with a given tree index t
+		 * to a given descriptor. (See class OperationTree to know
+		 * what is a tree index.) This prints all the tree. This
+		 * always writes equation, left hand side is a string
+		 * represenation (a variable, temporary, whatever) of the
+		 * term, the right hand side is a string representation of the
+		 * operation (which will refer to other string representation
+		 * of subterms). */
+		virtual void format(const Operation& op, int t, FILE* fd)=0;
+	};
+
+	/** The default formatter formats the formulas with a usual syntax
+	 * (for example Matlab). A formatting of atoms and terms might be
+	 * reimplemented by a subclass. In addition, during its life, the
+	 * object maintains a set of tree indices which have been output
+	 * and they are not output any more. */
+	class DefaultOperationFormatter : public OperationFormatter {
+	protected:
+		const OperationTree& otree;
+		set<int> stop_set;
+	public:
+		DefaultOperationFormatter(const OperationTree& ot)
+			: otree(ot) {}
+		/** Format the operation with the default syntax. */
+		void format(const Operation& op, int t, FILE* fd);
+		/** This prints a string represenation of the given term, for
+		 * example 'tmp10' for term 10. In this implementation it
+		 * prints $10. */
+		virtual void format_term(int t, FILE* fd) const;
+		/** Print a string representation of the nulary term. */
+		virtual void format_nulary(int t, FILE* fd) const;
+		/** Print a delimiter between two statements. By default it is
+		 * "\n". */
+		virtual void print_delim(FILE* fd) const;
+	};
+
+	class NularyStringConvertor {
+	public:
+		virtual ~NularyStringConvertor() {}
+		/** Return the string representation of the atom with the tree
+		 * index t. */
+		virtual std::string convert(int t) const = 0;
+	};
+
+	/** This class converts the given term to its mathematical string representation. */
+	class OperationStringConvertor {
+	protected:
+		const NularyStringConvertor& nulsc;
+		const OperationTree& otree;
+	public:
+		OperationStringConvertor(const NularyStringConvertor& nsc, const OperationTree& ot)
+			: nulsc(nsc), otree(ot) {}
+		/** Empty virtual destructor. */
+		virtual ~OperationStringConvertor() {}
+		/** Convert the operation to the string mathematical
+		 * representation. This does not write any equation, just
+		 * returns a string representation of the formula. */
+		std::string convert(const Operation& op, int t) const;
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/Makefile b/dynare++/src/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..c647d40555a379de724d6d9520daeea89a110b93
--- /dev/null
+++ b/dynare++/src/Makefile
@@ -0,0 +1,220 @@
+ifeq ($(USE_ATLAS),yes)
+	LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c -lstdc++
+else ifeq ($(USE_MKL),yes)
+	LD_LIBS := -L /opt//intel/Compiler/11.0/074/mkl/lib/em64t -lmkl_intel_thread -lmkl_lapack -lmkl -lmkl_em64t -L /opt//intel/Compiler/11.0/074/lib/intel64 -lguide -lstdc++
+else
+	LD_LIBS := -llapack -lblas -lg2c -lstdc++
+endif
+
+CC_FLAGS = -Wall -I../sylv/cc -I../tl/cc -I../kord -I../integ/cc -I..
+
+DYNVERSION = 1.3.7
+tmpdir = /tmp/aaabbb123321
+pthreadGC2 = `which pthreadGC2.dll`
+mingwm10 = `which mingwm10.dll`
+
+ifeq ($(CC),)
+	CC := gcc
+endif
+
+ifneq ($(LD_LIBRARY_PATH),)	# use LD_LIBRARY_PATH from environment
+	LDFLAGS := -Wl,--library-path $(LD_LIBRARY_PATH) $(LDFLAGS)
+endif
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O3 -DPOSIX_THREADS
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+	LDFLAGS := -mno-cygwin -mthreads $(LDFLAGS) $(LD_LIBS) -lpthreadGC2
+	ARCH := w32
+	MEX_SUFFIX = dll
+else
+	LDFLAGS := $(LDFLAGS) $(LD_LIBS) -lpthread
+	ARCH := linux
+	MEX_SUFFIX = mexglx
+endif
+
+sylvcppsource := $(wildcard ../sylv/cc/*.cpp)
+sylvhsource := $(wildcard ../sylv/cc/*.h)
+sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
+
+tlcwebsource := $(wildcard ../tl/cc/*.cweb)
+tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
+tlhwebsource := $(wildcard ../tl/cc/*.hweb)
+tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
+tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
+
+kordcwebsource := $(wildcard ../kord/*.cweb)
+kordcppsource := $(patsubst %.cweb,%.cpp,$(kordcwebsource))
+kordhwebsource := $(wildcard ../kord/*.hweb)
+kordhsource := $(patsubst %.hweb,%.h,$(kordhwebsource))
+kordobjects := $(patsubst %.cweb,%.o,$(kordcwebsource))
+
+integcwebsource := $(wildcard ../integ/cc/*.cweb)
+integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
+integhwebsource := $(wildcard ../integ/cc/*.hweb)
+integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
+integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
+
+parserhsource := $(wildcard ../parser/cc/*.h)
+parsercppsource := $(wildcard ../parser/cc/*.cpp)
+
+utilshsource := $(wildcard ../utils/cc/*.h)
+utilscppsource := $(wildcard ../utils/cc/*.cpp)
+
+cppsource := $(wildcard *.cpp) $(patsubst %.y,%_ll.cc,$(wildcard *.y)) $(patsubst %.lex,%_tab.cc,$(wildcard *.lex))
+hsource := $(wildcard *.h) $(patsubst %.lex,%_tab.hh,$(wildcard *.lex))
+objects := $(patsubst %.cpp,%.o,$(wildcard *.cpp)) $(patsubst %.y,%_ll.o,$(wildcard *.y)) $(patsubst %.lex,%_tab.o,$(wildcard *.lex))
+
+all: dynare++
+
+../tl/cc/dummy.ch:
+	make -C ../tl/cc dummy.ch
+
+../tl/cc/%.cpp: ../tl/cc/%.cweb ../tl/cc/dummy.ch
+	make -C ../tl/cc $*.cpp
+
+../tl/cc/%.h: ../tl/cc/%.hweb ../tl/cc/dummy.ch
+	make -C ../tl/cc $*.h
+
+../tl/cc/%.o: ../tl/cc/%.cpp $(tlhsource)
+	make -C ../tl/cc $*.o
+
+../integ/cc/dummy.ch:
+	make -C ../integ/cc dummy.ch
+
+../integ/cc/%.cpp: ../integ/cc/%.cweb ../integ/cc/dummy.ch
+	make -C ../integ/cc $*.cpp
+
+../integ/cc/%.h: ../integ/cc/%.hweb ../integ/cc/dummy.ch
+	make -C ../integ/cc $*.h
+
+../integ/cc/%.o: ../integ/cc/%.cpp $(integhsource) $(tlhsource)
+	make -C ../integ/cc $*.o
+
+
+../sylv/cc/%.o: ../sylv/cc/%.cpp $(sylvhsource)
+	make -C ../sylv/cc $*.o
+
+../kord/dummy.ch:
+	make -C ../kord dummy.ch
+
+../kord/%.cpp: ../kord/%.cweb ../kord/dummy.ch
+	make -C ../kord $*.cpp
+
+../kord/%.h: ../kord/%.hweb ../kord/dummy.ch
+	make -C ../kord $*.h
+
+../kord/%.o: ../kord/%.cpp $(tlhsource) $(kordhsource) $(integhsource)
+	make -C ../kord $*.o
+
+# for some reason, the pattern rules didn't work here, so the rules
+# are expanded:
+dynglob_tab.cc: dynglob.y dynare_atoms.h dynare_model.h
+	bison -d -t --verbose -odynglob_tab.cc dynglob.y
+
+dynglob_tab.hh: dynglob.y dynare_atoms.h dynare_model.h
+	bison -d -t --verbose -odynglob_tab.cc dynglob.y
+
+dynglob_ll.cc: dynglob.lex dynglob_tab.hh
+	flex -i -odynglob_ll.cc dynglob.lex
+
+dynglob_ll.o: dynglob_ll.cc $(hsource)
+	$(CC) -I.. -I../sylv/cc -I../tl/cc -O3 -c dynglob_ll.cc
+
+dynglob_tab.o: dynglob_tab.cc $(hsource)
+	$(CC) -I.. -I../sylv/cc -I../tl/cc -O3 -c dynglob_tab.cc
+
+%.o: %.cpp $(hsource) $(kordhsource) $(tlhsource) $(sylvhsource) $(parserhsource)
+	$(CC) $(CC_FLAGS) -DDYNVERSION=\"$(DYNVERSION)\" -o $*.o -c $*.cpp
+
+../parser/cc/parser.a: $(parserhsource) $(parsercppsource)
+	make -C ../parser/cc parser.a
+
+../utils/cc/utils.a: $(utilshsource) $(utilscppsource)
+	make -C ../utils/cc utils.a
+
+dynare++: $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
+         $(integhwebsource) $(integcwebsoure) $(integhsource) $(integcppsource) \
+         $(kordhwebsource) $(kordcwebsoure) $(kordhsource) $(kordcppsource) \
+         $(sylvhsource) $(sylvcppsource) \
+         $(kordobjects) $(tlobjects) $(integobjects) $(sylvobjects) $(objects) \
+         ../parser/cc/parser.a ../utils/cc/utils.a
+	$(CC) -g -o dynare++ $(objects) $(kordobjects) $(integobjects) $(tlobjects) ../parser/cc/parser.a ../utils/cc/utils.a $(sylvobjects) $(LDFLAGS)
+
+../extern/matlab/dynare_simul_.$(MEX_SUFFIX):
+	make -C ../extern/matlab all
+
+srcball:
+	rm -rf $(tmpdir)
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tests
+	cp ../tests/*.mod ../tests/*.dyn $(tmpdir)/dynare++-$(DYNVERSION)/tests
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/src
+	cp *.h *.cpp *.y *.lex Makefile $(tmpdir)/dynare++-$(DYNVERSION)/src
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tl/cc
+	cp ../tl/cc/*web ../tl/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/tl/cc
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tl/testing
+	cp ../tl/testing/*.cpp ../tl/testing/*.h ../tl/testing/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/tl/testing
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/integ/cc
+	cp ../integ/cc/*web ../integ/cc/Makefile ../integ/cc/*.dat $(tmpdir)/dynare++-$(DYNVERSION)/integ/cc
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/integ/testing
+	cp ../integ/testing/*.cpp ../integ/testing/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/integ/testing
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/sylv/cc
+	cp ../sylv/cc/*.cpp ../sylv/cc/*.h ../sylv/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/sylv/cc
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/sylv/testing
+	cp ../sylv/testing/*.cpp ../sylv/testing/*.h ../sylv/testing/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/sylv/testing
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/kord
+	cp ../kord/*web ../kord/Makefile ../kord/tests.cpp $(tmpdir)/dynare++-$(DYNVERSION)/kord
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/parser/cc
+	cp ../parser/cc/*.cpp ../parser/cc/*.h ../parser/cc/*.lex ../parser/cc/*.y ../parser/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/parser/cc
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/utils/cc
+	cp ../utils/cc/*.cpp ../utils/cc/*.h ../utils/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/utils/cc
+
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/extern/matlab
+	cp ../extern/matlab/*.cpp ../extern/matlab/*.m ../extern/matlab/Makefile ../extern/matlab/*.bat $(tmpdir)/dynare++-$(DYNVERSION)/extern/matlab
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/doc
+	cp ../doc/*.tex $(tmpdir)/dynare++-$(DYNVERSION)/doc
+	cp ../change_log.html $(tmpdir)/dynare++-$(DYNVERSION)/
+	cd $(tmpdir); tar czf dynare++-$(DYNVERSION).src.tgz dynare++-$(DYNVERSION)/*
+	mv $(tmpdir)/dynare++-$(DYNVERSION).src.tgz ..
+	rm -rf $(tmpdir)
+
+binball: dynare++ ../extern/matlab/dynare_simul_.$(MEX_SUFFIX)
+	rm -rf $(tmpdir)
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tests
+	cp ../tests/*.mod ../tests/*.dyn $(tmpdir)/dynare++-$(DYNVERSION)/tests
+	mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/doc
+	cp ../doc/*.pdf $(tmpdir)/dynare++-$(DYNVERSION)/doc
+	cp dynare++* $(tmpdir)/dynare++-$(DYNVERSION)
+	cp ../extern/matlab/*.m $(tmpdir)/dynare++-$(DYNVERSION)
+	cp ../extern/matlab/dynare_simul_.$(MEX_SUFFIX) $(tmpdir)/dynare++-$(DYNVERSION)
+	cp ../change_log.html $(tmpdir)/dynare++-$(DYNVERSION)
+ifeq ($(OS),Windows_NT)
+	cp $(pthreadGC2) $(tmpdir)/dynare++-$(DYNVERSION)
+	cp $(mingwm10) $(tmpdir)/dynare++-$(DYNVERSION)
+	cd $(tmpdir); zip -r dynare++-$(DYNVERSION)-$(ARCH).ix86.zip dynare++-$(DYNVERSION)/*
+	mv $(tmpdir)/dynare++-$(DYNVERSION)-$(ARCH).ix86.zip ..
+else
+	cd $(tmpdir); tar czf dynare++-$(DYNVERSION)-$(ARCH).ix86.tgz dynare++-$(DYNVERSION)/*
+	mv $(tmpdir)/dynare++-$(DYNVERSION)-$(ARCH).ix86.tgz ..
+endif
+	rm -rf $(tmpdir)
+
+
+clear:
+	rm -f *.o
+	rm -f dynare++*
+	rm -f *_ll.cc *_tab.hh *_tab.cc *.output
+	make -C ../tl/testing clear
+	make -C ../tl/cc clear
+	make -C ../integ/testing clear
+	make -C ../integ/cc clear
+	make -C ../sylv/testing clear
+	make -C ../sylv/cc clear
+	make -C ../kord clear
+	make -C ../parser/cc clear
+	make -C ../utils/cc clear
diff --git a/dynare++/src/dynare3.cpp b/dynare++/src/dynare3.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3eea8ae3fb4368fd4f57c89e078bdbee9cc72ce4
--- /dev/null
+++ b/dynare++/src/dynare3.cpp
@@ -0,0 +1,364 @@
+
+#include "dynare3.h"
+#include "dynare_exception.h"
+#include "planner_builder.h"
+#include "forw_subst_builder.h"
+
+#include "utils/cc/memory_file.h"
+#include "utils/cc/exception.h"
+#include "parser/cc/parser_exception.h"
+#include "parser/cc/atom_substitutions.h"
+#include "../tl/cc/tl_exception.h"
+#include "../kord/kord_exception.h"
+
+#ifndef DYNVERSION
+#define DYNVERSION "unknown"
+#endif
+
+
+/**************************************************************************************/
+/*       DynareNameList class                                                         */
+/**************************************************************************************/
+vector<int> DynareNameList::selectIndices(const vector<const char*>& ns) const
+{
+	vector<int> res;
+	for (unsigned int i = 0; i < ns.size(); i++) {
+		int j = 0;
+		while (j < getNum() && strcmp(getName(j), ns[i]) != 0)
+			j++;
+		if (j == getNum())
+			throw DynareException(__FILE__, __LINE__,
+								  string("Couldn't find name for ") + ns[i] +
+								  " in DynareNameList::selectIndices");
+		res.push_back(j);
+	}
+	return res;
+}
+
+/**************************************************************************************/
+/*       Dynare class                                                                 */
+/**************************************************************************************/
+
+Dynare::Dynare(const char* modname, int ord, double sstol, Journal& jr)
+	: journal(jr), model(NULL), ysteady(NULL), md(1), dnl(NULL), denl(NULL), dsnl(NULL),
+	  fe(NULL), fde(NULL), ss_tol(sstol)
+{
+	// make memory file
+	ogu::MemoryFile mf(modname);
+	if (mf.exists()) {
+		try {
+			model = new ogdyn::DynareParser(mf.base(), mf.length(), ord);
+		} catch (const ogp::ParserException& pe) {
+			int line;
+			int col;
+			mf.line_and_col(pe.offset(), line, col);
+			throw DynareException(pe.message(), modname, line, col);
+		}
+		ysteady = new Vector(model->getAtoms().ny());
+		dnl = new DynareNameList(*this);
+		denl = new DynareExogNameList(*this);
+		dsnl = new DynareStateNameList(*this, *dnl, *denl);
+		fe = new ogp::FormulaEvaluator(model->getParser());
+		fde = new ogp::FormulaDerEvaluator(model->getParser());
+		writeModelInfo(journal);
+	} else {
+		throw DynareException(__FILE__, __LINE__, string("Could not open model file ")+modname);
+	}
+}
+
+Dynare::Dynare(const char** endo, int num_endo,
+			   const char** exo, int num_exo,
+			   const char** par, int num_par,
+			   const char* equations, int len, int ord,
+			   double sstol, Journal& jr)
+	: journal(jr), model(NULL), ysteady(NULL), md(1), dnl(NULL), denl(NULL), dsnl(NULL),
+	  fe(NULL), fde(NULL), ss_tol(sstol)
+{
+	try {
+		model = new ogdyn::DynareSPModel(endo, num_endo, exo, num_exo, par, num_par,
+										 equations, len, ord);
+	} catch (const ogp::ParserException& pe) {
+		throw DynareException(pe.message(), pe.offset());
+	}
+	ysteady = new Vector(model->getAtoms().ny());
+	dnl = new DynareNameList(*this);
+	denl = new DynareExogNameList(*this);
+	dsnl = new DynareStateNameList(*this, *dnl, *denl);
+	fe = new ogp::FormulaEvaluator(model->getParser());
+	fde = new ogp::FormulaDerEvaluator(model->getParser());
+	writeModelInfo(journal);
+}
+
+Dynare::Dynare(const Dynare& dynare)
+	: journal(dynare.journal), model(NULL),
+	  ysteady(NULL), md(dynare.md),
+	  dnl(NULL), denl(NULL), dsnl(NULL), fe(NULL), fde(NULL),
+	  ss_tol(dynare.ss_tol)
+{
+	model = dynare.model->clone();
+	ysteady = new Vector(*(dynare.ysteady));
+	dnl = new DynareNameList(*this);
+	denl = new DynareExogNameList(*this);
+	dsnl = new DynareStateNameList(*this, *dnl, *denl);
+	fe = new ogp::FormulaEvaluator(model->getParser());
+	fde = new ogp::FormulaDerEvaluator(model->getParser());
+}
+
+Dynare::~Dynare()
+{
+	if (model)
+		delete model;
+	if (ysteady)
+		delete ysteady;
+	if (dnl)
+		delete dnl;
+	if (dsnl)
+		delete dsnl;
+	if (denl)
+		delete denl;
+	if (fe)
+		delete fe;
+	if (fde)
+		delete fde;
+}
+
+void Dynare::writeMat4(FILE* fd, const char* prefix) const
+{
+	char tmp[100];
+	sprintf(tmp, "%s_vars", prefix);
+	getAllEndoNames().writeMat4(fd, tmp);
+	getAllEndoNames().writeMat4Indices(fd, prefix);
+	sprintf(tmp, "%s_state_vars", prefix);
+	getStateNames().writeMat4(fd, tmp);
+	sprintf(tmp, "%s_shocks", prefix);
+	getExogNames().writeMat4(fd, tmp);
+	getExogNames().writeMat4Indices(fd, prefix);
+	sprintf(tmp, "%s_vcov_exo", prefix);
+	model->getVcov().writeMat4(fd, tmp);
+	TwoDMatrix aux(1,1);
+	sprintf(tmp, "%s_nstat", prefix);
+	aux.get(0,0) = nstat();
+	aux.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_npred", prefix);
+	aux.get(0,0) = npred();
+	aux.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_nboth", prefix);
+	aux.get(0,0) = nboth();
+	aux.writeMat4(fd, tmp);
+	sprintf(tmp, "%s_nforw", prefix);
+	aux.get(0,0) = nforw();
+	aux.writeMat4(fd, tmp);
+}
+
+void Dynare::writeDump(const std::string&  basename) const
+{
+	std::string fname(basename);
+	fname += ".dump";
+	std::ofstream out(fname.c_str());
+	model->dump_model(out);
+	out.close();
+}
+
+void Dynare::solveDeterministicSteady(Vector& steady)
+{
+	JournalRecordPair pa(journal);
+	pa << "Non-linear solver for deterministic steady state" << endrec;
+	steady = (const Vector&) model->getInit();
+	DynareVectorFunction dvf(*this);
+	DynareJacobian dj(*this);
+	ogu::NLSolver nls(dvf, dj, 500, ss_tol, journal);
+	int iter;
+	if (! nls.solve(steady, iter))
+		throw DynareException(__FILE__, __LINE__,
+							  "Could not obtain convergence in non-linear solver");
+}
+
+// evaluate system at given y_t=y_{t+1}=y_{t-1}, and given shocks x_t
+void Dynare::evaluateSystem(Vector& out, const Vector& yy, const Vector& xx)
+{
+	ConstVector yym(yy, nstat(), nys());
+	ConstVector yyp(yy, nstat()+npred(), nyss());
+	evaluateSystem(out, yym, yy, yyp, xx);
+}
+
+// evaluate system at given y^*_{t-1}, y_t, y^{**}_{t+1} and at
+// exogenous x_t, all three vectors yym, yy, and yyp have the
+// respective lengths of y^*_{t-1}, y_t, y^{**}_{t+1}
+void Dynare::evaluateSystem(Vector& out, const Vector& yym, const Vector& yy,
+							const Vector& yyp, const Vector& xx)
+{
+	ogdyn::DynareAtomValues dav(model->getAtoms(), model->getParams(), yym, yy, yyp, xx);
+	DynareEvalLoader del(model->getAtoms(), out);
+	fe->eval(dav, del);
+}
+
+void Dynare::calcDerivatives(const Vector& yy, const Vector& xx)
+{
+	ConstVector yym(yy, nstat(), nys());
+	ConstVector yyp(yy, nstat()+npred(), nyss());
+	ogdyn::DynareAtomValues dav(model->getAtoms(), model->getParams(), yym, yy, yyp, xx);
+	DynareDerEvalLoader ddel(model->getAtoms(), md, model->getOrder());
+	for (int iord = 1; iord <= model->getOrder(); iord++)
+		fde->eval(dav, ddel, iord);
+}
+
+void Dynare::calcDerivativesAtSteady()
+{
+	Vector xx(nexog());
+	xx.zeros();
+	calcDerivatives(*ysteady, xx);
+}
+
+void Dynare::writeModelInfo(Journal& jr) const
+{
+	// write info on variables
+	{
+		JournalRecordPair rp(journal);
+		rp << "Information on variables" << endrec;
+		JournalRecord rec1(journal);
+		rec1 << "Number of endogenous:            " << ny() << endrec;
+		JournalRecord rec2(journal);
+		rec2 << "Number of exogenous:             " << nexog() << endrec;
+		JournalRecord rec3(journal);
+		rec3 << "Number of static:                " << nstat() << endrec;
+		JournalRecord rec4(journal);
+		rec4 << "Number of predetermined:         " << npred()+nboth() << endrec;
+		JournalRecord rec5(journal);
+		rec5 << "Number of forward looking:       " << nforw()+nboth() << endrec;
+		JournalRecord rec6(journal);
+		rec6 << "Number of both:                  " << nboth() << endrec;
+	}
+
+	// write info on planner variables
+	const ogdyn::PlannerInfo* pinfo = model->get_planner_info();
+	if (pinfo) {
+		JournalRecordPair rp(journal);
+		rp << "Information on planner variables" << endrec;
+		JournalRecord rec1(journal);
+		rec1 << "Number of Lagrange multipliers:  " << pinfo->num_lagrange_mults << endrec;
+		JournalRecord rec2(journal);
+		rec2 << "Number of auxiliary variables:   " << pinfo->num_aux_variables << endrec;
+		JournalRecord rec3(journal);
+		rec3 << "Number of new terms in the tree: " << pinfo->num_new_terms << endrec;
+	}
+
+	// write info on forward substitutions
+	const ogdyn::ForwSubstInfo* finfo = model->get_forw_subst_info();
+	if (finfo) {
+		JournalRecordPair rp(journal);
+		rp << "Information on forward substitutions" << endrec;
+		JournalRecord rec1(journal);
+		rec1 << "Number of affected equations:    " << finfo->num_affected_equations << endrec;
+		JournalRecord rec2(journal);
+		rec2 << "Number of substituted terms:     " << finfo->num_subst_terms << endrec;
+		JournalRecord rec3(journal);
+		rec3 << "Number of auxiliary variables:   " << finfo->num_aux_variables << endrec;
+		JournalRecord rec4(journal);
+		rec4 << "Number of new terms in the tree: " << finfo->num_new_terms << endrec;
+	}
+
+	// write info on substitutions
+	const ogp::SubstInfo* sinfo = model->get_subst_info();
+	if (sinfo) {
+		JournalRecordPair rp(journal);
+		rp << "Information on substitutions" << endrec;
+		JournalRecord rec1(journal);
+		rec1 << "Number of substitutions:         " << sinfo->num_substs << endrec;
+	}
+}
+
+DynareNameList::DynareNameList(const Dynare& dynare)
+{
+	for (int i = 0; i < dynare.ny(); i++) {
+		int j = dynare.model->getAtoms().y2outer_endo()[i];
+		const char* name = dynare.model->getAtoms().get_endovars()[j];
+		names.push_back(name);
+	}
+}
+
+DynareStateNameList::DynareStateNameList(const Dynare& dynare, const DynareNameList& dnl,
+										 const DynareExogNameList& denl)
+{
+	for (int i = 0; i < dynare.nys(); i++)
+		names.push_back(dnl.getName(i+dynare.nstat()));
+	for (int i = 0; i < dynare.nexog(); i++)
+		names.push_back(denl.getName(i));
+}
+
+DynareExogNameList::DynareExogNameList(const Dynare& dynare)
+{
+	for (int i = 0; i < dynare.nexog(); i++) {
+		int j = dynare.model->getAtoms().y2outer_exo()[i];
+		const char* name = dynare.model->getAtoms().get_exovars()[j];
+		names.push_back(name);
+	}
+}
+
+DynareEvalLoader::DynareEvalLoader(const ogp::FineAtoms& a, Vector& out)
+	: Vector(out)
+{
+	if (a.ny() != out.length())
+		throw DynareException(__FILE__, __LINE__, "Wrong length of out vector in DynareEvalLoader constructor");
+}
+
+/** This clears the container of model derivatives and initializes it
+ * inserting empty sparse tensors up to the given order. */
+DynareDerEvalLoader::DynareDerEvalLoader(const ogp::FineAtoms& a,
+										 TensorContainer<FSSparseTensor>& mod_ders,
+										 int order)
+	: atoms(a), md(mod_ders)
+{
+	md.clear();
+	for (int iord = 1; iord <= order; iord++) {
+		FSSparseTensor* t = new FSSparseTensor(iord, atoms.ny()+atoms.nys()+atoms.nyss()+atoms.nexo(), atoms.ny());
+		md.insert(t);
+	}
+}
+
+void DynareDerEvalLoader::load(int i, int iord, const int* vars, double res)
+{
+	FSSparseTensor* t = md.get(Symmetry(iord));
+	IntSequence s(iord, 0);
+	for (int j = 0; j < iord; j++)
+		s[j] = atoms.get_pos_of_all(vars[j]);
+	t->insert(s, i, res);
+}
+
+DynareJacobian::DynareJacobian(Dynare& dyn)
+	: Jacobian(dyn.ny()), d(dyn)
+{
+	zeros();
+}
+
+void DynareJacobian::eval(const Vector& yy)
+{
+	ogdyn::DynareSteadyAtomValues
+		dav(d.getModel().getAtoms(), d.getModel().getParams(), yy);
+	zeros();
+	d.fde->eval(dav, *this, 1);
+}
+
+void DynareJacobian::load(int i, int iord, const int* vars, double res)
+{
+	if (iord != 1)
+		throw DynareException(__FILE__, __LINE__,
+							  "Derivative order different from order=1 in DynareJacobian::load");
+
+	int t = vars[0];
+	int j = d.getModel().getAtoms().get_pos_of_all(t);
+	if (j < d.nyss())
+		get(i, j+d.nstat()+d.npred()) += res;
+	else if (j < d.nyss()+d.ny())
+		get(i, j-d.nyss()) += res;
+	else if (j < d.nyss()+d.ny()+d.nys())
+		get(i, j-d.nyss()-d.ny()+d.nstat()) += res;
+}
+
+void DynareVectorFunction::eval(const ConstVector& in, Vector& out)
+{
+	check_for_eval(in, out);
+	Vector xx(d.nexog());
+	xx.zeros();
+	d.evaluateSystem(out, in, xx);
+}
+
diff --git a/dynare++/src/dynare3.h b/dynare++/src/dynare3.h
new file mode 100644
index 0000000000000000000000000000000000000000..fbb836ec19468e1a82376fa514c4c110808a4337
--- /dev/null
+++ b/dynare++/src/dynare3.h
@@ -0,0 +1,194 @@
+// $Id: dynare3.h 1764 2008-03-31 14:30:55Z kamenik $
+// Copyright 2005, Ondra Kamenik
+
+#ifndef DYNARE3_H
+#define DYNARE3_H
+
+#include "../tl/cc/t_container.h"
+#include "../tl/cc/sparse_tensor.h"
+#include "../kord/decision_rule.h"
+#include "../kord/dynamic_model.h"
+
+#include "dynare_model.h"
+#include "nlsolve.h"
+
+#include <vector>
+
+class Dynare;
+
+class DynareNameList : public NameList {
+	vector<const char*> names;
+public:
+	DynareNameList(const Dynare& dynare);
+	int getNum() const
+		{return (int)names.size();}
+	const char* getName(int i) const
+		{return names[i];}
+	/** This for each string of the input vector calculates its index
+	 * in the names. And returns the resulting vector of indices. If
+	 * the name cannot be found, then an exception is raised. */
+	vector<int> selectIndices(const vector<const char*>& ns) const;
+};
+
+class DynareExogNameList : public NameList {
+	vector<const char*> names;
+public:
+	DynareExogNameList(const Dynare& dynare);
+	int getNum() const
+		{return (int)names.size();}
+	const char* getName(int i) const
+		{return names[i];}
+};
+
+class DynareStateNameList : public NameList {
+	vector<const char*> names;
+public:
+	DynareStateNameList(const Dynare& dynare, const DynareNameList& dnl,
+						const DynareExogNameList& denl);
+	int getNum() const
+		{return (int)names.size();}
+	const char* getName(int i) const
+		{return names[i];}
+};
+
+// The following only implements DynamicModel with help of ogdyn::DynareModel
+
+class DynareJacobian;
+class Dynare : public DynamicModel {
+	friend class DynareNameList;
+	friend class DynareExogNameList;
+	friend class DynareStateNameList;
+	friend class DynareJacobian;
+	Journal& journal;
+	ogdyn::DynareModel* model;
+	Vector* ysteady;
+	TensorContainer<FSSparseTensor> md;
+	DynareNameList* dnl;
+	DynareExogNameList* denl;
+	DynareStateNameList* dsnl;
+	ogp::FormulaEvaluator* fe;
+	ogp::FormulaDerEvaluator* fde;
+	const double ss_tol;
+public:
+	/** Parses the given model file and uses the given order to
+	 * override order from the model file (if it is != -1). */
+	Dynare(const char* modname, int ord, double sstol, Journal& jr);
+	/** Parses the given equations with explicitly given names. */
+	Dynare(const char** endo, int num_endo,
+		   const char** exo, int num_exo,
+		   const char** par, int num_par,
+		   const char* equations, int len, int ord,
+		   double sstol, Journal& jr);
+	/** Makes a deep copy of the object. */
+	Dynare(const Dynare& dyn);
+	DynamicModel* clone() const
+		{return new Dynare(*this);}
+	virtual ~Dynare();
+	int nstat() const
+		{return model->getAtoms().nstat();}
+	int nboth() const
+		{return model->getAtoms().nboth();}
+	int npred() const
+		{return model->getAtoms().npred();}
+	int nforw() const
+		{return model->getAtoms().nforw();}
+	int nexog() const
+		{return model->getAtoms().nexo();}
+	int nys() const
+		{return model->getAtoms().nys();}
+	int nyss() const
+		{return model->getAtoms().nyss();}
+	int ny() const
+		{return model->getAtoms().ny();}
+	int order() const
+		{return model->getOrder();}
+
+	const NameList& getAllEndoNames() const
+		{return *dnl;}
+	const NameList& getStateNames() const
+		{return *dsnl;}
+	const NameList& getExogNames() const
+		{return *denl;}
+
+	TwoDMatrix& getVcov()
+		{return model->getVcov();}
+	const TwoDMatrix& getVcov() const
+		{return model->getVcov();}
+	Vector& getParams()
+		{return model->getParams();}
+	const Vector& getParams() const
+		{return model->getParams();}
+	void setInitOuter(const Vector& x)
+		{model->setInitOuter(x);}
+
+	const TensorContainer<FSSparseTensor>& getModelDerivatives() const
+		{return md;}
+	const Vector& getSteady() const
+		{return *ysteady;}
+	Vector& getSteady()
+		{return *ysteady;}
+	const ogdyn::DynareModel& getModel() const
+		{return *model;}
+
+	// here is true public interface
+	void solveDeterministicSteady(Vector& steady);
+	void solveDeterministicSteady()
+		{solveDeterministicSteady(*ysteady);}
+	void evaluateSystem(Vector& out, const Vector& yy, const Vector& xx);
+	void evaluateSystem(Vector& out, const Vector& yym, const Vector& yy,
+						const Vector& yyp, const Vector& xx);
+	void calcDerivatives(const Vector& yy, const Vector& xx);
+	void calcDerivativesAtSteady();
+
+	void writeMat4(FILE* fd, const char* prefix) const;
+	void writeDump(const std::string& basename) const;
+private:
+	void writeModelInfo(Journal& jr) const;
+};
+
+class DynareEvalLoader : public ogp::FormulaEvalLoader, public Vector {
+public:
+	DynareEvalLoader(const ogp::FineAtoms& a, Vector& out);
+	void load(int i, double res)
+		{operator[](i) = res;}
+};
+
+class DynareDerEvalLoader : public ogp::FormulaDerEvalLoader {
+protected:
+	const ogp::FineAtoms& atoms;
+	TensorContainer<FSSparseTensor>& md;
+public:
+	DynareDerEvalLoader(const ogp::FineAtoms& a, TensorContainer<FSSparseTensor>& mod_ders,
+						int order);
+	void load(int i, int iord, const int* vars, double res);
+};
+
+class DynareJacobian : public ogu::Jacobian, public ogp::FormulaDerEvalLoader {
+protected:
+	Dynare& d;
+public:
+	DynareJacobian(Dynare& dyn);
+	virtual ~DynareJacobian() {}
+	void load(int i, int iord, const int* vars, double res);
+	void eval(const Vector& in);
+};
+
+class DynareVectorFunction : public ogu::VectorFunction {
+protected:
+	Dynare& d;
+public:
+	DynareVectorFunction(Dynare& dyn)
+		: d(dyn) {}
+	virtual ~DynareVectorFunction() {}
+	int inDim() const
+		{return d.ny();}
+	int outDim() const
+		{return d.ny();}
+	void eval(const ConstVector& in, Vector& out);
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/dynare_atoms.cpp b/dynare++/src/dynare_atoms.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e0121d5306c910dd5ea1723f43673deeae5036ff
--- /dev/null
+++ b/dynare++/src/dynare_atoms.cpp
@@ -0,0 +1,282 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: dynare_atoms.cpp 1765 2008-03-31 14:32:08Z kamenik $
+
+#include "parser/cc/parser_exception.h"
+#include "utils/cc/exception.h"
+
+#include "dynare_atoms.h"
+
+#include <string>
+#include <cmath>
+
+using namespace ogdyn;
+using std::string;
+
+void DynareStaticAtoms::register_name(const char* name)
+{
+	if (varnames.query(name))
+		throw ogp::ParserException(string("The name ")+name+" is not unique.", 0);
+	StaticAtoms::register_name(name);
+}
+
+int DynareStaticAtoms::check_variable(const char* name) const
+{
+	if (0 == varnames.query(name))
+		throw ogp::ParserException(std::string("Unknown name <")+name+">", 0);
+	Tvarmap::const_iterator it = vars.find(name);
+	if (it == vars.end())
+		return -1;
+	else
+		return (*it).second;
+}
+
+DynareDynamicAtoms::DynareDynamicAtoms(const DynareDynamicAtoms& dda)
+	: SAtoms(dda)
+{
+	// fill atom_type
+	for (Tatypemap::const_iterator it = dda.atom_type.begin();
+		 it != dda.atom_type.end(); ++it)
+		atom_type.insert(Tatypemap::value_type(varnames.query((*it).first), (*it).second));
+}
+
+void DynareDynamicAtoms::parse_variable(const char* in, std::string& out, int& ll) const
+{
+	ll = 0;
+	std::string str = in;
+	int left = str.find_first_of("({");
+	if (left != -1) {
+		out = str.substr(0, left);
+		left++;
+		int right = str.find_first_of(")}", left);
+		if ((int)string::npos == right)
+			throw ogp::ParserException(
+				string("Syntax error when parsing Dynare atom <")+in+">.", 0);
+		std::string tmp(str, left, right-left);
+		sscanf(tmp.c_str(), "%d", &ll);
+	} else {
+		out = in;
+	}
+}
+
+void DynareDynamicAtoms::register_uniq_endo(const char* name)
+{
+	FineAtoms::register_uniq_endo(name);
+	atom_type.insert(Tatypemap::value_type(varnames.query(name), endovar));
+}
+
+void DynareDynamicAtoms::register_uniq_exo(const char* name)
+{
+	FineAtoms::register_uniq_exo(name);
+	atom_type.insert(Tatypemap::value_type(varnames.query(name), exovar));
+}
+
+void DynareDynamicAtoms::register_uniq_param(const char* name)
+{
+	FineAtoms::register_uniq_param(name);
+	atom_type.insert(Tatypemap::value_type(varnames.query(name), param));
+}
+
+bool DynareDynamicAtoms::is_type(const char* name, atype tp) const
+{
+	Tatypemap::const_iterator it = atom_type.find(name);
+	if (it != atom_type.end() && (*it).second == tp)
+		return true;
+	else
+		return false;
+}
+
+void DynareDynamicAtoms::print() const
+{
+	SAtoms::print();
+	printf("Name types:\n");
+	for (Tatypemap::const_iterator it = atom_type.begin();
+		 it != atom_type.end(); ++it)
+		printf("name=%s type=%s\n", (*it).first,
+			   ((*it).second == endovar) ? "endovar" : (((*it).second == exovar)? "exovar" : "param"));
+}
+
+std::string DynareDynamicAtoms::convert(int t) const
+{
+	if (t < ogp::OperationTree::num_constants) {
+		throw ogu::Exception(__FILE__,__LINE__,
+							 "Tree index is a built-in constant in DynareDynamicAtoms::convert");
+		return std::string();
+	}
+	if (is_constant(t)) {
+		double v = get_constant_value(t);
+		char buf[100];
+		sprintf(buf, "%20.16g", v);
+		const char* s = buf;
+		while (*s == ' ')
+			++s;
+		return std::string(s);
+	}
+	
+	const char* s = name(t);
+	if (is_type(s, endovar)) {
+		int ll = lead(t);
+		char buf[100];
+		if (ll)
+			sprintf(buf, "%s(%d)", s, ll);
+		else
+			sprintf(buf, "%s", s);
+		return std::string(buf);
+	}
+
+	return std::string(s);
+}
+
+
+void DynareAtomValues::setValues(ogp::EvalTree& et) const
+{
+	// set constants
+	atoms.setValues(et);
+
+	// set parameteres
+	for (unsigned int i = 0; i < atoms.get_params().size(); i++) {
+		try {
+			const ogp::DynamicAtoms::Tlagmap& lmap = atoms.lagmap(atoms.get_params()[i]);
+			for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+				 it != lmap.end(); ++it) {
+				int t = (*it).second;
+				et.set_nulary(t, paramvals[i]);
+			}
+		} catch (const ogu::Exception& e) {
+			// ignore non-referenced parameters; there is no
+			// lagmap for them
+		}
+	}
+
+	// set endogenous
+	for (unsigned int outer_i = 0; outer_i < atoms.get_endovars().size(); outer_i++) {
+		try {
+			const ogp::DynamicAtoms::Tlagmap& lmap = atoms.lagmap(atoms.get_endovars()[outer_i]);
+			for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+				 it != lmap.end(); ++it) {
+				int ll = (*it).first;
+				int t = (*it).second;
+				int i = atoms.outer2y_endo()[outer_i];
+				if (ll == -1) {
+					et.set_nulary(t, yym[i-atoms.nstat()]);
+				}
+				else if (ll == 0)
+					et.set_nulary(t, yy[i]);
+				else
+					et.set_nulary(t, yyp[i-atoms.nstat()-atoms.npred()]);
+			}
+		} catch (const ogu::Exception& e) {
+			// ignore non-referenced endogenous variables; there is no
+			// lagmap for them
+		}
+	}
+
+	// set exogenous
+	for (unsigned int outer_i = 0; outer_i < atoms.get_exovars().size(); outer_i++) {
+		try {
+			const ogp::DynamicAtoms::Tlagmap& lmap = atoms.lagmap(atoms.get_exovars()[outer_i]);
+			for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+				 it != lmap.end(); ++it) {
+				int ll = (*it).first;
+				if (ll == 0) { // this is always true because of checks
+					int t = (*it).second;
+					int i = atoms.outer2y_exo()[outer_i];			
+					et.set_nulary(t, xx[i]);
+				}
+			}
+		} catch (const ogu::Exception& e) {
+			// ignore non-referenced variables
+		}
+	}
+}
+
+void DynareStaticSteadyAtomValues::setValues(ogp::EvalTree& et) const
+{
+	// set constants
+	atoms_static.setValues(et);
+
+	// set parameters
+	for (unsigned int i = 0; i < atoms_static.get_params().size(); i++) {
+		const char* name = atoms_static.get_params()[i];
+		int t = atoms_static.index(name);
+		if (t != -1) {
+			int idyn = atoms.name2outer_param(name);
+			et.set_nulary(t, paramvals[idyn]);
+		}
+	}
+
+	// set endogenous
+	for (unsigned int i = 0; i < atoms_static.get_endovars().size(); i++) {
+		const char* name = atoms_static.get_endovars()[i];
+		int t = atoms_static.index(name);
+		if (t != -1) {
+			int idyn = atoms.outer2y_endo()[atoms.name2outer_endo(name)];
+			et.set_nulary(t, yy[idyn]);
+		}
+	}
+
+	// set exogenous
+	for (unsigned int i = 0; i < atoms_static.get_exovars().size(); i++) {
+		const char* name = atoms_static.get_exovars()[i];
+		int t = atoms_static.index(name);
+		if (t != -1)
+			et.set_nulary(t, 0.0);
+	}
+}
+
+DynareSteadySubstitutions::DynareSteadySubstitutions(const ogp::FineAtoms& a,
+													 const ogp::OperationTree& tree,
+													 const Tsubstmap& subst,
+													 const Vector& pvals, Vector& yy)
+	: atoms(a), y(yy)
+{
+	// fill the vector of left and right hand sides
+	for (Tsubstmap::const_iterator it = subst.begin();
+		 it != subst.end(); ++it) {
+		left_hand_sides.push_back((*it).first);
+		right_hand_sides.push_back((*it).second);
+	}
+
+	// evaluate right hand sides
+	DynareSteadyAtomValues dsav(atoms, pvals, y);
+	ogp::FormulaCustomEvaluator fe(tree, right_hand_sides);
+	fe.eval(dsav, *this);
+}
+
+void DynareSteadySubstitutions::load(int i, double res)
+{
+	const char* name = left_hand_sides[i];
+	int iouter = atoms.name2outer_endo(name);
+	int iy = atoms.outer2y_endo()[iouter];
+	if (! std::isfinite(y[iy]))
+		y[iy] = res;
+}
+
+DynareStaticSteadySubstitutions::
+DynareStaticSteadySubstitutions(const ogp::FineAtoms& a, const ogp::StaticFineAtoms& sa,
+								const ogp::OperationTree& tree,
+								const Tsubstmap& subst,
+								const Vector& pvals, Vector& yy)
+	: atoms(a), atoms_static(sa), y(yy)
+{
+	// fill the vector of left and right hand sides
+	for (Tsubstmap::const_iterator it = subst.begin();
+		 it != subst.end(); ++it) {
+		left_hand_sides.push_back((*it).first);
+		right_hand_sides.push_back((*it).second);
+	}
+
+	// evaluate right hand sides
+	DynareStaticSteadyAtomValues dsav(atoms, atoms_static, pvals, y);
+	ogp::FormulaCustomEvaluator fe(tree, right_hand_sides);
+	fe.eval(dsav, *this);
+}
+
+void DynareStaticSteadySubstitutions::load(int i, double res)
+{
+	const char* name = left_hand_sides[i];
+	int iouter = atoms.name2outer_endo(name);
+	int iy = atoms.outer2y_endo()[iouter];
+	if (! std::isfinite(y[iy]))
+		y[iy] = res;
+}
diff --git a/dynare++/src/dynare_atoms.h b/dynare++/src/dynare_atoms.h
new file mode 100644
index 0000000000000000000000000000000000000000..7cdcce6c249392b49a97096c8e1f0458e1c0ef3b
--- /dev/null
+++ b/dynare++/src/dynare_atoms.h
@@ -0,0 +1,212 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: dynare_atoms.h 1765 2008-03-31 14:32:08Z kamenik $
+
+#ifndef OGDYN_DYNARE_ATOMS_H
+#define OGDYN_DYNARE_ATOMS_H
+
+#include "sylv/cc/Vector.h"
+
+#include "parser/cc/static_atoms.h"
+#include "parser/cc/static_fine_atoms.h"
+#include "parser/cc/atom_substitutions.h"
+#include "parser/cc/tree.h"
+
+#include <map>
+#include <vector>
+
+namespace ogdyn {
+
+	using std::map;
+	using std::vector;
+
+	/** A definition of a type mapping a string to an integer. Used as
+	 * a substitution map, saying what names are substituted for what
+	 * expressions represented by tree indices. */
+	typedef map<const char*, int, ogp::ltstr> Tsubstmap;
+
+	class DynareStaticAtoms : public ogp::StaticAtoms {
+	public:
+		DynareStaticAtoms()
+			: StaticAtoms() {}
+		DynareStaticAtoms(const DynareStaticAtoms& a)
+			: StaticAtoms(a) {}
+		virtual ~DynareStaticAtoms() {}
+		/** This registers a unique varname identifier. It throws an
+		 * exception if the variable name is duplicate. It checks the
+		 * uniqueness and then it calls StaticAtoms::register_name. */
+		void register_name(const char* name);
+	protected:
+		/** This returns a tree index of the given variable, and if
+		 * the variable has not been registered, it throws an
+		 * exception. */
+		int check_variable(const char* name) const;
+	};
+
+
+	class DynareDynamicAtoms : public ogp::SAtoms, public ogp::NularyStringConvertor {
+	public:
+		enum atype {endovar, exovar, param};
+	protected:
+		typedef map<const char*, atype, ogp::ltstr> Tatypemap;
+		/** The map assigining a type to each name. */
+		Tatypemap atom_type;
+	public:
+		DynareDynamicAtoms()
+			: ogp::SAtoms() {}
+		DynareDynamicAtoms(const DynareDynamicAtoms& dda);
+		virtual ~DynareDynamicAtoms() {}
+		/** This parses a variable of the forms: varname(+3),
+		 * varname(3), varname, varname(-3), varname(0), varname(+0),
+		 * varname(-0). */
+		virtual void parse_variable(const char* in, std::string& out, int& ll) const;
+		/** Registers unique name of endogenous variable. */
+		void register_uniq_endo(const char* name);
+		/** Registers unique name of exogenous variable. */
+		void register_uniq_exo(const char* name);
+		/** Registers unique name of parameter. */
+		void register_uniq_param(const char* name);
+		/** Return true if the name is a given type. */
+		bool is_type(const char* name, atype tp) const;
+		/** Debug print. */
+		void print() const;
+		/** Implement NularyStringConvertor::convert. */
+		std::string convert(int t) const;
+	};
+
+
+	/** This class represents the atom values for dynare, where
+	 * exogenous variables can occur only at time t, and endogenous at
+	 * times t-1, t, and t+1. */
+	class DynareAtomValues : public ogp::AtomValues {
+	protected:
+		/** Reference to the atoms (we suppose that they are only at
+		 * t-1,t,t+1. */
+		const ogp::FineAtoms& atoms;
+		/** De facto reference to the values of parameters. */
+		const ConstVector paramvals;
+		/** De facto reference to the values of endogenous at time t-1. Only
+		 * predetermined and both part. */
+		const ConstVector yym;
+		/** De facto reference to the values of endogenous at time t. Ordering
+		 * given by the atoms. */
+		const ConstVector yy;
+		/** De facto reference to the values of endogenous at time t+1. Only
+		 * both and forward looking part. */
+		const ConstVector yyp;
+		/** De facto reference to the values of exogenous at time t. */
+		const ConstVector xx;
+	public:
+		DynareAtomValues(const ogp::FineAtoms& a, const Vector& pvals, const Vector& ym,
+						 const Vector& y, const Vector& yp, const Vector& x)
+			: atoms(a), paramvals(pvals), yym(ym), yy(y), yyp(yp), xx(x) {}
+		DynareAtomValues(const ogp::FineAtoms& a, const Vector& pvals, const ConstVector& ym,
+						 const Vector& y, const ConstVector& yp, const Vector& x)
+			: atoms(a), paramvals(pvals), yym(ym), yy(y), yyp(yp), xx(x) {}
+		void setValues(ogp::EvalTree& et) const;
+	};
+
+	/** This class represents the atom values at the steady state. It
+	 * makes only appropriate subvector yym and yyp of the y vector,
+	 * makes a vector of zero exogenous variables and uses
+	 * DynareAtomValues with more general interface. */ 
+	class DynareSteadyAtomValues : public ogp::AtomValues {
+	protected:
+		/** Subvector of yy. */
+		const ConstVector yym;
+		/** Subvector of yy. */
+		const ConstVector yyp;
+		/** Vector of zeros for exogenous variables. */
+		Vector xx;
+		/** Atom values using this yym, yyp and xx. */
+		DynareAtomValues av;
+	public:
+		DynareSteadyAtomValues(const ogp::FineAtoms& a, const Vector& pvals, const Vector& y)
+			: yym(y, a.nstat(), a.nys()),
+			  yyp(y, a.nstat()+a.npred(), a.nyss()),
+			  xx(a.nexo()),
+			  av(a, pvals, yym, y, yyp, xx)
+			{xx.zeros();}
+		void setValues(ogp::EvalTree& et) const
+			{av.setValues(et);}
+	};
+
+	class DynareStaticSteadyAtomValues : public ogp::AtomValues {
+	protected:
+		/** Reference to static atoms over which the tree, where the
+		 * values go, is defined. */
+		const ogp::StaticFineAtoms& atoms_static;
+		/** Reference to dynamic atoms for which the class gets input
+		 * data. */
+		const ogp::FineAtoms& atoms;
+		/** De facto reference to input data, this is a vector of
+		 * endogenous variables in internal ordering of the dynamic
+		 * atoms. */
+		ConstVector yy;
+		/** De facto reference to input parameters corresponding to
+		 * ordering defined by the dynamic atoms. */
+		ConstVector paramvals;
+	public:
+		/** Construct the object. */
+		DynareStaticSteadyAtomValues(const ogp::FineAtoms& a, const ogp::StaticFineAtoms& sa,
+									 const Vector& pvals, const Vector& yyy)
+			: atoms_static(sa),
+			  atoms(a),
+			  yy(yyy),
+			  paramvals(pvals) {}
+		/** Set the values to the tree defined over the static atoms. */
+		void setValues(ogp::EvalTree& et) const;
+	};
+
+	/** This class takes a vector of endogenous variables and a
+	 * substitution map. It supposes that variables at the right hand
+	 * sides of the substitutions are set in the endogenous vector. It
+	 * evaluates the substitutions and if the variables corresponding
+	 * to left hand sides are not set in the endogenous vector it sets
+	 * them to calculated values. If a variable is already set, it
+	 * does not override its value. It has no methods, everything is
+	 * done in the constructor. */ 
+	class DynareSteadySubstitutions : public ogp::FormulaEvalLoader {
+	protected:
+		const ogp::FineAtoms& atoms;
+	public:
+		DynareSteadySubstitutions(const ogp::FineAtoms& a, const ogp::OperationTree& tree,
+								  const Tsubstmap& subst,
+								  const Vector& pvals, Vector& yy);
+		void load(int i, double res);
+	protected:
+		Vector& y;
+		vector<const char*> left_hand_sides;
+		vector<int> right_hand_sides;
+	};
+
+	/** This class is a static version of DynareSteadySustitutions. It
+	 * works for static atoms and static tree and substitution map
+	 * over the static tree. It also needs dynamic version of the
+	 * atoms, since it defines ordering of the vectors pvals, and
+	 * yy. */ 
+	class DynareStaticSteadySubstitutions : public ogp::FormulaEvalLoader {
+	protected:
+		const ogp::FineAtoms& atoms;
+		const ogp::StaticFineAtoms& atoms_static;
+	public:
+		DynareStaticSteadySubstitutions(const ogp::FineAtoms& a,
+										const ogp::StaticFineAtoms& sa,
+										const ogp::OperationTree& tree,
+										const Tsubstmap& subst,
+										const Vector& pvals, Vector& yy);
+		void load(int i, double res);
+	protected:
+		Vector& y;
+		vector<const char*> left_hand_sides;
+		vector<int> right_hand_sides;
+	};
+
+};
+
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/dynare_exception.h b/dynare++/src/dynare_exception.h
new file mode 100644
index 0000000000000000000000000000000000000000..c91048a3a8a528656a5da1a44eaa460e864ce181
--- /dev/null
+++ b/dynare++/src/dynare_exception.h
@@ -0,0 +1,41 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: dynare_exception.h 853 2006-08-01 08:42:42Z kamenik $
+
+#ifndef DYNARE_EXCEPTION_H
+#define DYNARE_EXCEPTION_H
+
+#include <string>
+
+class DynareException {
+	char* mes;
+public:
+	DynareException(const char* m, const char* fname, int line, int col)
+		{
+			mes = new char[strlen(m) + strlen(fname) + 100];
+			sprintf(mes, "Parse error at %s, line %d, column %d: %s", fname, line, col, m);
+		}
+	DynareException(const char* fname, int line, const std::string& m)
+		{
+			mes = new char[m.size() + strlen(fname) + 50];
+			sprintf(mes, "%s:%d: %s", fname, line, m.c_str());
+		}
+	DynareException(const char* m, int offset)
+		{
+			mes = new char[strlen(m) + 100];
+			sprintf(mes, "Parse error in provided string at offset %d: %s", offset, m);
+		}
+	DynareException(const DynareException& e)
+		: mes(new char[strlen(e.mes)+1])
+		{strcpy(mes, e.mes);}
+	virtual ~DynareException()
+		{delete [] mes;}
+	const char* message() const
+		{return mes;}
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/dynare_model.cpp b/dynare++/src/dynare_model.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..570bb2a65df68557bc016d3c0e696e57ef96aa43
--- /dev/null
+++ b/dynare++/src/dynare_model.cpp
@@ -0,0 +1,959 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: dynare_model.cpp 2269 2008-11-23 14:33:22Z michel $
+
+#include "parser/cc/parser_exception.h"
+#include "parser/cc/location.h"
+#include "utils/cc/exception.h"
+#include "dynare_model.h"
+#include "dynare_exception.h"
+#include "planner_builder.h"
+#include "forw_subst_builder.h"
+
+#include <stdlib.h>
+
+#include <string>
+#include <cmath>
+#include <climits>
+
+using namespace ogdyn;
+
+ParsedMatrix::ParsedMatrix(const ogp::MatrixParser& mp)
+	: TwoDMatrix(mp.nrows(), mp.ncols())
+{
+	zeros();
+	for (ogp::MPIterator it = mp.begin(); it != mp.end(); ++it)
+		get(it.row(), it.col()) = *it;
+}
+
+DynareModel::DynareModel()
+	: atoms(), eqs(atoms), order(-1),
+	  param_vals(0), init_vals(0), vcov_mat(0),
+	  t_plobjective(-1), t_pldiscount(-1),
+	  pbuilder(NULL), fbuilder(NULL),
+	  atom_substs(NULL), old_atoms(NULL)
+{}
+
+DynareModel::DynareModel(const DynareModel& dm)
+	: atoms(dm.atoms), eqs(dm.eqs, atoms), order(dm.order),
+	  param_vals(0), init_vals(0), vcov_mat(0),
+	  t_plobjective(dm.t_plobjective),
+	  t_pldiscount(dm.t_pldiscount),
+	  pbuilder(NULL), fbuilder(NULL),
+	  atom_substs(NULL), old_atoms(NULL)
+{
+	if (dm.param_vals)
+		param_vals = new Vector((const Vector&)*(dm.param_vals));
+	if (dm.init_vals)
+		init_vals = new Vector((const Vector&)*(dm.init_vals));
+	if (dm.vcov_mat)
+		vcov_mat = new TwoDMatrix((const TwoDMatrix&)*(dm.vcov_mat));
+	if (dm.old_atoms)
+		old_atoms = new DynareDynamicAtoms((const DynareDynamicAtoms&)*(dm.old_atoms));
+	if (dm.atom_substs)
+		atom_substs = new ogp::AtomSubstitutions((*dm.atom_substs), *old_atoms, atoms);
+	if (dm.pbuilder)
+		pbuilder = new PlannerBuilder(*(dm.pbuilder), *this);
+	if (dm.fbuilder)
+		fbuilder = new ForwSubstBuilder(*(dm.fbuilder), *this);
+}
+
+DynareModel::~DynareModel()
+{
+	if (param_vals)
+		delete param_vals;
+	if (init_vals)
+		delete init_vals;
+	if (vcov_mat)
+		delete vcov_mat;
+	if (old_atoms)
+		delete old_atoms;
+	if (atom_substs)
+		delete atom_substs;
+	if (pbuilder)
+		delete pbuilder;
+	if (fbuilder)
+		delete fbuilder;
+}
+
+const PlannerInfo* DynareModel::get_planner_info() const
+{
+	if (pbuilder)
+		return &(pbuilder->get_info());
+	return NULL;
+}
+
+const ForwSubstInfo* DynareModel::get_forw_subst_info() const
+{
+	if (fbuilder)
+		return &(fbuilder->get_info());
+	return NULL;
+}
+
+const ogp::SubstInfo* DynareModel::get_subst_info() const
+{
+	if (atom_substs)
+		return &(atom_substs->get_info());
+	return NULL;
+}
+
+void DynareModel::setInitOuter(const Vector& x)
+{
+	if (x.length() != atoms.ny())
+		throw DynareException(__FILE__, __LINE__,
+							  "Wrong length of vector in DynareModel::setInitOuter");
+	for (int i = 0; i < atoms.ny(); i++)
+		(*init_vals)[i] = x[atoms.y2outer_endo()[i]];
+}
+
+void DynareModel::print() const
+{
+	printf("all atoms:\n");
+	atoms.print();
+	printf("formulas:\n");
+	DebugOperationFormatter dof(*this);
+	for (int i = 0; i < eqs.nformulas(); i++) {
+		int tf = eqs.formula(i);
+		printf("formula %d:\n", tf);
+		eqs.getTree().print_operation_tree(tf, stdout, dof);
+	}
+}
+
+void DynareModel::dump_model(std::ostream& os) const
+{
+	// endogenous variable declaration
+	os << "var";
+	for (int i = 0; i < (int)atoms.get_endovars().size(); i++)
+		os << " " << atoms.get_endovars()[i];
+	os << ";\n\n";
+
+	// exogenous variables
+	os << "varexo";
+	for (int i = 0; i < (int)atoms.get_exovars().size(); i++)
+		os << " " << atoms.get_exovars()[i];
+	os << ";\n\n";
+
+	// parameters
+	os << "parameters";
+	for (int i = 0; i < (int)atoms.get_params().size(); i++)
+		os << " " << atoms.get_params()[i];
+	os << ";\n\n";
+
+	// parameter values
+	os.precision(16);
+	for (int i = 0; i < (int)atoms.get_params().size(); i++)
+		os << atoms.get_params()[i] << "=" << getParams()[i] << ";\n";
+	os << "\n\n";
+
+	// model section
+	ogp::OperationStringConvertor osc(atoms, getParser().getTree());
+	os << "model;\n";
+	for (int i = 0; i < getParser().nformulas(); i++) {
+		os << "// Equation " << i << "\n0 = ";
+		int t = getParser().formula(i);
+		os << osc.convert(getParser().getTree().operation(t), t);
+		os << ";\n";
+	}
+	os << "end;\n";
+
+	// initval as steady state
+	os << "initval;\n";
+	for (int i = 0; i < (int)atoms.get_endovars().size(); i++)
+		os << atoms.get_endovars()[atoms.y2outer_endo()[i]] << "=" << getInit()[i] << ";\n";
+	os << "end;\n";
+}
+
+void DynareModel::add_name(const char* name, int flag)
+{
+	if (flag == 1) {
+		// endogenous
+		atoms.register_uniq_endo(name);
+	} else if (flag == 2) {
+		// exogenous
+		atoms.register_uniq_exo(name);
+	} else if (flag == 3) {
+		// parameter
+		atoms.register_uniq_param(name);
+	} else {
+		throw DynareException(__FILE__, __LINE__,
+							  "Unrecognized flag value.");
+	}
+}
+
+void DynareModel::check_model() const
+{
+	if (order == -1)
+		throw DynareException(__FILE__,__LINE__,
+							  "Order of approximation not set in DynareModel::check_model");
+
+	if (atoms.ny() != eqs.nformulas()) {
+		char mes[1000];
+		sprintf(mes, "Model has %d equations for %d endogenous variables", eqs.nformulas(), atoms.ny());
+		throw DynareException(__FILE__, __LINE__, mes);
+	}
+	
+	// check whether all nulary terms of all formulas in eqs are
+	// either constant or assigned to a name
+	for (int i = 0; i < eqs.nformulas(); i++) {
+		int ft = eqs.formula(i);
+		const hash_set<int>& nuls = eqs.nulary_of_term(ft);
+		for (hash_set<int>::const_iterator it = nuls.begin();
+			 it != nuls.end(); ++it)
+			if (! atoms.is_constant(*it) && ! atoms.is_named_atom(*it))
+				throw DynareException(__FILE__,__LINE__,
+									  "Dangling nulary term found, internal error.");
+	}
+
+	int mlag, mlead;
+	atoms.exovarspan(mlead, mlag);
+	if (atoms.nexo() > 0 && (mlead != 0 || mlag != 0))
+		throw DynareException(__FILE__,__LINE__,
+							  "The model contains occurrences of lagged/leaded exogenous variables");
+
+	atoms.endovarspan(mlead, mlag);
+	if (mlead > 1 || mlag < -1)
+		throw DynareException(__FILE__,__LINE__,
+							  "The model contains occurrences of too lagged/leaded endogenous variables");
+
+	// check the dimension of vcov matrix
+	if (getAtoms().nexo() != getVcov().nrows())
+		throw DynareException(__FILE__,__LINE__,
+							  "Dimension of VCOV matrix does not correspond to the shocks");
+}
+
+int DynareModel::variable_shift(int t, int tshift)
+{
+	const char* name = atoms.name(t);
+	if (atoms.is_type(name, DynareDynamicAtoms::param) ||
+		atoms.is_constant(t))
+		throw DynareException(__FILE__, __LINE__,
+							  "The tree index is not a variable in DynareModel::variable_shift");
+	int ll = atoms.lead(t) + tshift;
+	int res = atoms.index(name, ll);
+	if (res == -1) {
+		std::string str(name);
+		str += '(';
+		char tmp[50];
+		sprintf(tmp,"%d",ll);
+		str += tmp;
+		str += ')';
+		res = eqs.add_nulary(str.c_str());
+	}
+	return res;
+}
+
+void DynareModel::variable_shift_map(const hash_set<int>& a_set, int tshift,
+									 map<int,int>& s_map)
+{
+	s_map.clear();
+	for (hash_set<int>::const_iterator it = a_set.begin();
+		 it != a_set.end(); ++it) {
+		int t = *it;
+		// make shift map only for non-constants and non-parameters
+		if (! atoms.is_constant(t)) {
+			const char* name = atoms.name(t);
+			if (atoms.is_type(name, DynareDynamicAtoms::endovar) ||
+				atoms.is_type(name, DynareDynamicAtoms::exovar)) {
+				int tt = variable_shift(t, tshift);
+				s_map.insert(map<int,int>::value_type(t,tt));
+			}
+		}
+	}
+}
+
+void DynareModel::termspan(int t, int& mlead, int& mlag) const
+{
+	mlead = INT_MIN;
+	mlag = INT_MAX;
+	const hash_set<int>& nul_terms = eqs.nulary_of_term(t);
+	for (hash_set<int>::const_iterator ni = nul_terms.begin();
+		 ni != nul_terms.end(); ++ni) {
+		if (!atoms.is_constant(*ni) &&
+			(atoms.is_type(atoms.name(*ni), DynareDynamicAtoms::endovar) ||
+			 atoms.is_type(atoms.name(*ni), DynareDynamicAtoms::exovar))) {
+			int ll = atoms.lead(*ni);
+			if (ll < mlag)
+				mlag = ll;
+			if (ll > mlead)
+				mlead = ll;
+		}
+	}
+}
+
+bool DynareModel::is_constant_term(int t) const
+{
+	const hash_set<int>& nul_terms = eqs.nulary_of_term(t);
+	for (hash_set<int>::const_iterator ni = nul_terms.begin();
+		 ni != nul_terms.end(); ++ni)
+		if (! atoms.is_constant(*ni) &&
+			! atoms.is_type(atoms.name(*ni), DynareDynamicAtoms::param))
+			return false;
+	return true;
+}
+
+hash_set<int> DynareModel::get_nonlinear_subterms(int t) const
+{
+	NLSelector nls(*this);
+	return eqs.getTree().select_terms(t, nls);
+}
+
+void DynareModel::substitute_atom_for_term(const char* name, int ll, int t)
+{
+	// if the term t is itself a named atom (parameter, exo, endo),
+	// then we have to unassign it first
+	if (atoms.is_named_atom(t))
+		atoms.unassign_variable(atoms.name(t), atoms.lead(t), t);
+	// assign allocated tree index
+	// for the term now to name(ll)
+	atoms.assign_variable(name, ll, t);
+	// make operation t nulary in operation tree
+	eqs.nularify(t);
+}
+
+void DynareModel::final_job()
+{
+	if (t_plobjective != -1 && t_pldiscount != -1) {
+		// at this moment include all equations and all variables; in
+		// future we will exclude purely exogenous processes; todo:
+		PlannerBuilder::Tvarset vset;
+		for (int i = 0; i < atoms.ny(); i++)
+			vset.insert(atoms.get_endovars()[i]);
+		PlannerBuilder::Teqset eset;
+		for (int i = 0; i < eqs.nformulas(); i++)
+			eset.push_back(i);
+
+		// construct the planner builder, this adds a lot of stuff to
+		// the model
+		if (pbuilder)
+			delete pbuilder;
+		pbuilder = new PlannerBuilder(*this, vset, eset);
+	}
+
+	// construct ForwSubstBuilder
+	if (fbuilder)
+		delete fbuilder;
+	fbuilder = new ForwSubstBuilder(*this);
+
+	// call parsing_finished (this will define an outer ordering of all variables)
+	atoms.parsing_finished(ogp::VarOrdering::bfspbfpb);
+    // make a copy of atoms and name it old_atoms
+	if (old_atoms)
+		delete old_atoms;
+	old_atoms = new DynareDynamicAtoms(atoms);
+	// construct empty substitutions from old_atoms to atoms
+	if (atom_substs)
+		delete atom_substs;
+	atom_substs = new ogp::AtomSubstitutions(*old_atoms, atoms);
+	// do the actual substitution, it will also call
+	// parsing_finished for atoms which creates internal orderings
+	atoms.substituteAllLagsAndExo1Leads(eqs, *atom_substs);
+}
+
+extern ogp::location_type dynglob_lloc;
+
+DynareParser::DynareParser(const char* stream, int len, int ord)
+	: DynareModel(),
+	  pa_atoms(), paramset(pa_atoms),
+      ia_atoms(), initval(ia_atoms), vcov(),
+	  model_beg(0), model_end(-1),
+	  paramset_beg(0), paramset_end(-1),
+	  initval_beg(0), initval_end(-1),
+	  vcov_beg(0), vcov_end(-1),
+	  order_beg(0), order_end(-1),
+	  plobjective_beg(0), plobjective_end(-1),
+	  pldiscount_beg(0), pldiscount_end(-1)
+{
+	// global parse
+	try {
+		parse_glob(len, stream);
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, dynglob_lloc.off);
+	}
+	// setting parameters parse
+	try {
+		if (paramset_end > paramset_beg)
+			paramset.parse(paramset_end-paramset_beg, stream+paramset_beg);
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, paramset_beg);
+	}
+	// model parse
+	try {
+		if (model_end > model_beg)
+			eqs.parse(model_end-model_beg, stream+model_beg);
+		else
+			throw ogp::ParserException("Model section not found.", 0);
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, model_beg);
+	}
+	// initval setting parse
+	try {
+		if (initval_end > initval_beg)
+			initval.parse(initval_end-initval_beg, stream+initval_beg);
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, initval_beg);
+	}
+	// vcov parse
+	try {
+		if (vcov_end > vcov_beg) {
+			vcov.parse(vcov_end-vcov_beg, stream+vcov_beg);
+		}
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, vcov_beg);
+	}
+	// planner objective parse
+	try {
+		if (plobjective_end > plobjective_beg) {
+			eqs.parse(plobjective_end-plobjective_beg, stream+plobjective_beg);
+			t_plobjective = eqs.pop_last_formula();
+		}
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, plobjective_beg);
+	}
+	// planner discount parse
+	try {
+		if (pldiscount_end > pldiscount_beg) {
+			t_pldiscount = parse_pldiscount(pldiscount_end - pldiscount_beg,
+											stream + pldiscount_beg);
+		}
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, pldiscount_beg);
+	}
+	// order parse
+	try {
+		if (order_end > order_beg) {
+			order = parse_order(order_end > order_beg, stream + order_beg);
+		}
+	} catch (const ogp::ParserException& e) {
+		throw ogp::ParserException(e, order_beg);
+	}
+
+	// check the overridden order
+	if (ord != -1)
+		order = ord;
+
+	// end parsing job, add planner's FOCs, make substitutions
+	DynareModel::final_job();
+
+	// calculate parameters
+	calc_params();
+	// calculate initial values
+	calc_init();
+
+	if (vcov_end > vcov_beg)
+		vcov_mat = new ParsedMatrix(vcov);
+	else {
+		// vcov has not been asserted, set it to unit matrix
+		vcov_mat = new TwoDMatrix(atoms.nexo(), atoms.nexo());
+		vcov_mat->unit();
+	}
+
+	// check the model
+	check_model();
+
+	// differentiate
+	if (order >= 1)
+		eqs.differentiate(order);
+}
+
+DynareParser::DynareParser(const DynareParser& dp)
+	: DynareModel(dp),
+	  pa_atoms(dp.pa_atoms), paramset(dp.paramset, pa_atoms),
+	  ia_atoms(dp.ia_atoms), initval(dp.initval, ia_atoms), vcov(dp.vcov),
+	  model_beg(dp.model_beg), model_end(dp.model_end),
+	  paramset_beg(dp.paramset_beg), paramset_end(dp.paramset_end),
+	  initval_beg(dp.initval_beg), initval_end(dp.initval_end),
+	  vcov_beg(dp.vcov_beg), vcov_end(dp.vcov_end),
+	  order_beg(dp.order_beg), order_end(dp.order_end),
+	  plobjective_beg(dp.plobjective_beg), plobjective_end(dp.plobjective_end),
+	  pldiscount_beg(dp.pldiscount_beg), pldiscount_end(dp.pldiscount_end)
+{
+}
+
+DynareParser::~DynareParser()
+{
+}
+
+void DynareParser::add_name(const char* name, int flag)
+{
+	DynareModel::add_name(name, flag);
+	// register with static atoms used for atom assignements
+	if (flag == 1) {
+		// endogenous
+		ia_atoms.register_name(name);
+	} else if (flag == 2) {
+		// exogenous
+		ia_atoms.register_name(name);
+	} else if (flag == 3) {
+		// parameter
+		pa_atoms.register_name(name);
+		ia_atoms.register_name(name);
+	} else {
+		throw DynareException(__FILE__, __LINE__,
+							  "Unrecognized flag value.");
+	}
+}
+
+void DynareParser::error(const char* mes)
+{
+	// throwing zero offset since this exception will be caugth at
+	// constructor
+	throw ogp::ParserException(mes, 0);
+}
+
+void DynareParser::print() const
+{
+	DynareModel::print();
+	printf("parameter atoms:\n");
+	paramset.print();
+	printf("initval atoms:\n");
+	initval.print();
+	printf("model position: %d %d\n", model_beg, model_end);
+	printf("paramset position: %d %d\n", paramset_beg, paramset_end);
+	printf("initval position: %d %d\n", initval_beg, initval_end);
+}
+
+/** A global symbol for passing info to the DynareParser from
+ * parser. */
+DynareParser* dynare_parser;
+
+/** The declarations of functions defined in dynglob_ll.cc and
+ * dynglob_tab.cc generated from dynglob.lex and dynglob.y */
+void* dynglob__scan_buffer(char*, size_t);
+void dynglob__destroy_buffer(void*);
+void dynglob_parse();
+extern ogp::location_type dynglob_lloc;
+
+void DynareParser::parse_glob(int length, const char* stream)
+{
+	char* buffer = new char[length+2];
+	strncpy(buffer, stream, length);
+	buffer[length] = '\0';
+	buffer[length+1] = '\0';
+	void* p = dynglob__scan_buffer(buffer, (unsigned int)length+2);
+	dynare_parser = this;
+	dynglob_parse();
+	delete [] buffer;
+	dynglob__destroy_buffer(p);
+}
+
+
+int DynareParser::parse_order(int len, const char* str)
+{
+	char* buf = new char[len+1];
+	strncpy(buf, str, len);
+	buf[len] = '\0';
+	int res;
+	sscanf(buf, "%d", &res);
+	delete [] buf;
+	return res;
+}
+
+int DynareParser::parse_pldiscount(int len, const char* str)
+{
+	char* buf = new char[len+1];
+	strncpy(buf, str, len);
+	buf[len] = '\0';
+	if (! atoms.is_type(buf, DynareDynamicAtoms::param))
+		throw ogp::ParserException(std::string("Name ") + buf + " is not a parameter", 0);
+
+	int t = atoms.index(buf, 0);
+	if (t == -1)
+		t = eqs.add_nulary(buf);
+
+	delete [] buf;
+	return t;
+}	
+
+void DynareParser::calc_params()
+{
+	if (param_vals)
+		delete param_vals;
+
+	param_vals = new Vector(atoms.np());
+	ogp::AtomAsgnEvaluator aae(paramset);
+	aae.eval();
+	for (int i = 0; i < atoms.np(); i++)
+		(*param_vals)[i] = aae.get_value(atoms.get_params()[i]);
+
+	for (unsigned int i = 0; i < atoms.get_params().size(); i++)
+		if (! std::isfinite((*param_vals)[i]))
+			printf("dynare++: warning: value for parameter %s is not finite\n",
+				   atoms.get_params()[i]);
+}
+
+void DynareParser::calc_init()
+{
+    // update initval atoms assignings according to substitutions
+	if (atom_substs)
+		initval.apply_subst(atom_substs->get_old2new());
+
+	// calculate the vector of initial values
+	if (init_vals)
+		delete init_vals;
+	init_vals = new Vector(atoms.ny());
+	ogp::AtomAsgnEvaluator aae(initval);
+	// set parameters
+	for (int ip = 0; ip < atoms.np(); ip++)
+		aae.set_user_value(atoms.get_params()[ip], (*param_vals)[ip]);
+	// set exogenous to zeros
+	for (int ie = 0; ie < atoms.nexo(); ie++)
+		aae.set_user_value(atoms.get_exovars()[ie], 0.0);
+	// evaluate
+	aae.eval();
+	// set results to internally ordered vector init_vals
+	for (int outer = 0; outer < atoms.ny(); outer++) {
+		int i = atoms.outer2y_endo()[outer];
+		(*init_vals)[i] = aae.get_value(atoms.get_endovars()[outer]);
+	}
+
+	// if the planner's FOCs have been added, then add estimate of
+	// Lagrange multipliers to the vector
+	if (pbuilder) {
+		MultInitSS mis(*pbuilder, *param_vals, *init_vals);
+	}
+
+	// if forward substitution builder has been created, we have to
+	// its substitutions and evaluate them
+	if (fbuilder)
+		ogdyn::DynareSteadySubstitutions dss(atoms, eqs.getTree(),
+											 fbuilder->get_aux_map(), *param_vals, *init_vals);
+
+	for (unsigned int i = 0; i < atoms.get_endovars().size(); i++)
+		if (! std::isfinite((*init_vals)[i]))
+			printf("dynare++: warning: initval for <%s> is not finite\n",
+				   atoms.get_endovars()[atoms.y2outer_endo()[i]]);
+}
+
+// this returns false for linear functions
+bool NLSelector::operator()(int t) const
+{
+	const ogp::Operation& op = model.getParser().getTree().operation(t);
+	const DynareDynamicAtoms& atoms = model.getAtoms();
+	// if the term is constant, return false
+	if (model.is_constant_term(t))
+		return false;
+ 	int nary = op.nary();
+	if (nary == 0) {
+		if (atoms.is_type(atoms.name(t), DynareDynamicAtoms::endovar) ||
+			atoms.is_type(atoms.name(t), DynareDynamicAtoms::exovar))
+			return true;
+		else
+			return false;
+	} else if (nary == 1) {
+		if (op.getCode() == ogp::UMINUS)
+			return false;
+		else
+			return true;
+	} else {
+		if (op.getCode() == ogp::TIMES)
+			// if at least one operand is constant, than the TIMES is linear
+			if (model.is_constant_term(op.getOp1()) ||
+				model.is_constant_term(op.getOp2()))
+				return false;
+			else
+				return true;
+			// both PLUS and MINUS are linear
+		if (op.getCode() == ogp::PLUS ||
+			op.getCode() == ogp::MINUS)
+			return false;
+		// POWER is linear if exponent or base is 0 or one
+		if (op.getCode() == ogp::POWER &&
+			(op.getOp1() == ogp::OperationTree::zero ||
+			 op.getOp1() == ogp::OperationTree::one ||
+			 op.getOp2() == ogp::OperationTree::zero ||
+			 op.getOp2() == ogp::OperationTree::one))
+			return false;
+		else
+			return true;
+		// DIVIDE is linear if the denominator is constant, or if
+		// the nominator is zero
+		if (op.getCode() == ogp::DIVIDE &&
+			(op.getOp1() == ogp::OperationTree::zero ||
+			 model.is_constant_term(op.getOp2())))
+			return false;
+		else
+			return true;
+	}
+
+	throw DynareException(__FILE__, __LINE__,
+						  "Wrong operation in operation tree");
+	return false;
+}
+
+DynareSPModel::DynareSPModel(const char** endo, int num_endo,
+							 const char** exo, int num_exo,
+							 const char** par, int num_par,
+							 const char* equations, int len,
+							 int ord)
+	: DynareModel()
+{
+	// set the order
+	order = ord;
+
+	// add names
+	for (int i = 0; i < num_endo; i++)
+		add_name(endo[i], 1);
+	for (int i = 0; i < num_exo; i++)
+		add_name(exo[i], 2);
+	for (int i = 0; i < num_par; i++)
+		add_name(par[i], 3);
+
+	// parse the equations
+	eqs.parse(len, equations);
+
+	// parsing finished
+	atoms.parsing_finished(ogp::VarOrdering::bfspbfpb);
+
+	// create what has to be created from DynareModel
+	param_vals = new Vector(atoms.np());
+	init_vals = new Vector(atoms.ny());
+	vcov_mat = new TwoDMatrix(atoms.nexo(), atoms.nexo());
+
+	// check the model
+	check_model();
+
+	// differentiate
+	if (order >= 1)
+		eqs.differentiate(order);
+}
+
+void ModelSSWriter::write_der0(FILE* fd)
+{
+	write_der0_preamble(fd);
+	write_atom_assignment(fd);
+
+	stop_set.clear();
+	for (int fi = 0; fi < model.eqs.nformulas(); fi++)
+		otree.print_operation_tree(model.eqs.formula(fi), fd, *this);
+
+	write_der0_assignment(fd);
+}
+
+void ModelSSWriter::write_der1(FILE* fd)
+{
+	write_der1_preamble(fd);
+	write_atom_assignment(fd);
+
+	stop_set.clear();
+
+	const vector<int>& variables = model.getAtoms().variables();
+	const vector<int>& eam = model.getAtoms().get_endo_atoms_map();
+	for (int i = 0; i < model.getParser().nformulas(); i++) {
+		const ogp::FormulaDerivatives& fder = model.getParser().derivatives(i);
+		for (unsigned int j = 0; j < eam.size(); j++) {
+			int t = fder.derivative(ogp::FoldMultiIndex(variables.size(), 1, eam[j]));
+			if (t > 0)
+				otree.print_operation_tree(t, fd, *this);
+		}
+	}
+
+	write_der1_assignment(fd);
+}
+
+MatlabSSWriter::MatlabSSWriter(const DynareModel& dm, const char* idd)
+	: ModelSSWriter(dm), id(new char[strlen(idd)+1])
+{
+	strcpy(id, idd);
+}
+
+
+void MatlabSSWriter::write_der0_preamble(FILE* fd) const
+{
+	fprintf(fd,
+			"%% Usage:\n"
+			"%%       out = %s_f(params, y)\n"
+			"%%   where\n"
+			"%%       out    is a (%d,1) column vector of the residuals\n"
+            "%%              of the static system\n",
+			id, model.getAtoms().ny());
+	write_common1_preamble(fd);
+	fprintf(fd,
+			"function out = %s_f(params, y)\n", id);
+	write_common2_preamble(fd);
+}
+
+void MatlabSSWriter::write_der1_preamble(FILE* fd) const
+{
+	fprintf(fd,
+			"%% Usage:\n"
+			"%%       out = %s_ff(params, y)\n"
+			"%%   where\n"
+			"%%       out    is a (%d,%d) matrix of the first order\n"
+			"%%              derivatives of the static system residuals\n"
+			"%%              columns correspond to endo variables in\n"
+            "%%              the ordering as declared\n",
+			id, model.getAtoms().ny(), model.getAtoms().ny());
+	write_common1_preamble(fd);
+	fprintf(fd,
+			"function out = %s_ff(params, y)\n", id);
+	write_common2_preamble(fd);
+}
+
+void MatlabSSWriter::write_common1_preamble(FILE* fd) const
+{
+	fprintf(fd,
+			"%%       params is a (%d,1) vector of parameter values\n"
+			"%%              in the ordering as declared\n"
+			"%%       y      is a (%d,1) vector of endogenous variables\n"
+			"%%              in the ordering as declared\n"
+			"%%\n"
+			"%% Created by Dynare++ v. %s\n", model.getAtoms().np(),
+			model.getAtoms().ny(), DYNVERSION);
+	// write ordering of parameters
+	fprintf(fd, "\n%% params ordering\n%% =====================\n");
+	for (unsigned int ip = 0; ip < model.getAtoms().get_params().size(); ip++) {
+		const char* parname = model.getAtoms().get_params()[ip];
+		fprintf(fd, "%% %s\n", parname);
+	}
+	// write endogenous variables
+	fprintf(fd, "%%\n%% y ordering\n%% =====================\n");
+	for (unsigned int ie = 0; ie < model.getAtoms().get_endovars().size(); ie++) {
+		const char* endoname = model.getAtoms().get_endovars()[ie];
+		fprintf(fd, "%% %s\n", endoname);
+	}
+	fprintf(fd,"\n");
+}
+
+void MatlabSSWriter::write_common2_preamble(FILE* fd) const
+{
+	fprintf(fd, "if size(y) ~= [%d,1]\n\terror('Wrong size of y, must be [%d,1]');\nend\n",
+			model.getAtoms().ny(), model.getAtoms().ny());
+	fprintf(fd, "if size(params) ~= [%d,1]\n\terror('Wrong size of params, must be [%d,1]');\nend\n\n",
+			model.getAtoms().np(), model.getAtoms().np());
+}
+
+void MatlabSSWriter::write_atom_assignment(FILE* fd) const
+{
+	// write OperationTree::num_constants
+	fprintf(fd, "%% hardwired constants\n");
+	ogp::EvalTree etree(model.getParser().getTree(), ogp::OperationTree::num_constants-1);
+	for (int i = 0; i < ogp::OperationTree::num_constants; i++) {
+		format_nulary(i, fd);
+		double g = etree.eval(i);
+		if (std::isnan(g))
+			fprintf(fd, " = NaN;\n");
+		else
+			fprintf(fd, " = %12.8g;\n", etree.eval(i));		
+	}
+	// write numerical constants
+	fprintf(fd, "%% numerical constants\n");
+	const ogp::Constants::Tconstantmap& cmap = model.getAtoms().get_constantmap();
+	for (ogp::Constants::Tconstantmap::const_iterator it = cmap.begin();
+		 it != cmap.end(); ++it) {
+		format_nulary((*it).first, fd);
+		fprintf(fd, " = %12.8g;\n", (*it).second);
+	}
+	// write parameters
+	fprintf(fd, "%% parameter values\n");
+	for (unsigned int ip = 0; ip < model.getAtoms().get_params().size(); ip++) {
+		const char* parname = model.getAtoms().get_params()[ip];
+		int t = model.getAtoms().index(parname, 0);
+		if (t == -1) {
+			fprintf(fd, "%% %s not used in the model\n", parname);
+		} else {
+			format_nulary(t, fd);
+			fprintf(fd, " = params(%d); %% %s\n", ip+1, parname);
+		}
+	}
+	// write exogenous variables
+	fprintf(fd, "%% exogenous variables to zeros\n");
+	for (unsigned int ie = 0; ie < model.getAtoms().get_exovars().size(); ie++) {
+		const char* exoname = model.getAtoms().get_exovars()[ie];
+		try {
+			const ogp::DynamicAtoms::Tlagmap& lmap = model.getAtoms().lagmap(exoname);
+			for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+				 it != lmap.end(); ++it) {
+				format_nulary((*it).second, fd);
+				fprintf(fd, " = 0.0; %% %s\n", exoname);
+			}
+		} catch (const ogu::Exception& e) {
+			// ignore the error of not found variable in the tree
+		}
+	}
+	// write endogenous variables
+	fprintf(fd, "%% endogenous variables to y\n");
+	for (unsigned int ie = 0; ie < model.getAtoms().get_endovars().size(); ie++) {
+		const char* endoname = model.getAtoms().get_endovars()[ie];
+		const ogp::DynamicAtoms::Tlagmap& lmap = model.getAtoms().lagmap(endoname);
+		for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
+			 it != lmap.end(); ++it) {
+			format_nulary((*it).second, fd);
+			fprintf(fd, " = y(%d); %% %s\n", ie+1, endoname);
+		}
+	}
+	fprintf(fd,"\n");
+}
+
+void MatlabSSWriter::write_der0_assignment(FILE* fd) const
+{
+
+	// initialize out variable
+	fprintf(fd, "%% setting the output variable\n");
+	fprintf(fd, "out = zeros(%d, 1);\n", model.getParser().nformulas());
+
+	// fill out with the terms
+	for (int i = 0; i < model.getParser().nformulas(); i++) {
+		fprintf(fd, "out(%d) = ", i+1);
+		format_term(model.getParser().formula(i), fd);
+		fprintf(fd, ";\n");
+	}
+}
+
+void MatlabSSWriter::write_der1_assignment(FILE* fd) const
+{
+	// initialize out variable
+	fprintf(fd, "%% setting the output variable\n");
+	fprintf(fd, "out = zeros(%d, %d);\n", model.getParser().nformulas(), model.getAtoms().ny());
+
+	// fill out with the terms
+	const vector<int>& variables = model.getAtoms().variables();
+	const vector<int>& eam = model.getAtoms().get_endo_atoms_map();
+	for (int i = 0; i < model.getParser().nformulas(); i++) {
+		const ogp::FormulaDerivatives& fder = model.getParser().derivatives(i);
+		for (unsigned int j = 0; j < eam.size(); j++) {
+			int tvar = variables[eam[j]];
+			const char* name = model.getAtoms().name(tvar);
+			int yi = model.getAtoms().name2outer_endo(name);
+			int t = fder.derivative(ogp::FoldMultiIndex(variables.size(), 1, eam[j]));
+			if (t != ogp::OperationTree::zero) {
+				fprintf(fd, "out(%d,%d) = out(%d,%d) + ", i+1, yi+1, i+1, yi+1);
+				format_term(t, fd);
+				fprintf(fd, "; %% %s(%d)\n", name, model.getAtoms().lead(tvar));
+			}
+		}
+	}
+}
+
+void MatlabSSWriter::format_term(int t, FILE* fd) const
+{
+	fprintf(fd, "t%d", t);
+}
+
+void MatlabSSWriter::format_nulary(int t, FILE* fd) const
+{
+	fprintf(fd, "a%d", t);
+}
+
+void DebugOperationFormatter::format_nulary(int t, FILE* fd) const
+{
+	const DynareDynamicAtoms& a = model.getAtoms();
+
+	if (t == ogp::OperationTree::zero)
+		fprintf(fd, "0");
+	else if (t == ogp::OperationTree::one)
+		fprintf(fd, "1");
+	else if (t == ogp::OperationTree::nan)
+		fprintf(fd, "NaN");
+	else if (t == ogp::OperationTree::two_over_pi)
+		fprintf(fd, "2/sqrt(PI)");
+	else if (a.is_constant(t))
+		fprintf(fd, "%g", a.get_constant_value(t));
+	else {
+		int ll = a.lead(t);
+		const char* name = a.name(t);
+		if (ll == 0)
+			fprintf(fd, "%s", name);
+		else
+			fprintf(fd, "%s(%d)", name, ll);
+	}
+}
diff --git a/dynare++/src/dynare_model.h b/dynare++/src/dynare_model.h
new file mode 100644
index 0000000000000000000000000000000000000000..fa916abb62278b8de8523cc0c88d9a1e6affb507
--- /dev/null
+++ b/dynare++/src/dynare_model.h
@@ -0,0 +1,397 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: dynare_model.h 1766 2008-03-31 14:33:02Z kamenik $
+
+#ifndef OGDYN_DYNARE_MODEL
+#define OGDYN_DYNARE_MODEL
+
+#include "parser/cc/matrix_parser.h"
+#include "parser/cc/atom_assignings.h"
+
+#include "dynare_atoms.h"
+#include "twod_matrix.h"
+
+#include "Vector.h"
+#include "GeneralMatrix.h"
+
+#include <map>
+#include <ext/hash_set>
+
+namespace ogdyn {
+	using __gnu_cxx::hash_set;
+	using std::map;
+
+	/** This represents an interval in a string by the pair of
+	 * positions (including the first, excluding the second). A
+	 * position is given by the line and the column within the line
+	 * (both starting from 1). */
+	struct PosInterval {
+		int fl;
+		int fc;
+		int ll;
+		int lc;
+		PosInterval() {}
+		PosInterval(int ifl, int ifc, int ill, int ilc)
+			: fl(ifl), fc(ifc), ll(ill), lc(ilc) {}
+		const PosInterval& operator=(const PosInterval& pi)
+			{fl = pi.fl; fc = pi.fc; ll = pi.ll; lc = pi.lc; return *this;}
+		/** This returns the interval beginning and interval length
+		 * within the given string. */
+		void translate(const char* beg, int len, const char*& ibeg, int& ilen) const;
+		/** Debug print. */
+		void print() const
+			{printf("fl=%d fc=%d ll=%d lc=%d\n",fl,fc,ll,lc);}
+	};
+
+	/** This class is basically a GeneralMatrix but is created from
+	 * parsed matrix data. */
+	class ParsedMatrix : public TwoDMatrix {
+	public:
+		/** Construct the object from the parsed data of ogp::MatrixParser. */
+		ParsedMatrix(const ogp::MatrixParser& mp);
+	};
+
+
+	class PlannerBuilder;
+	class PlannerInfo;
+	class ForwSubstBuilder;
+	class ForwSubstInfo;
+	class MultInitSS;
+	class ModelSSWriter;
+
+	/** A subclass is responsible for creating param_vals, init_vals,
+	 * and vcov_mat. */
+	class DynareModel {
+		friend class PlannerBuilder;
+		friend class ForwSubstBuilder;
+		friend class MultInitSS;
+		friend class ModelSSWriter;
+	protected:
+		/** All atoms for whole model. */
+		DynareDynamicAtoms atoms;
+		/** Parsed model equations. */
+		ogp::FormulaParser eqs;
+		/** Order of approximation. */
+		int order;
+		/** A vector of parameters values created by a subclass. It
+		 * is stored with natural ordering (outer) of the parameters
+		 * given by atoms. */
+		Vector* param_vals;
+		/** A vector of initial values created by a subclass. It is
+		 * stored with internal ordering given by atoms. */
+		Vector* init_vals;
+		/** A matrix for vcov. It is created by a subclass. */
+		TwoDMatrix* vcov_mat;
+		/** Tree index of the planner objective. If there was no
+		 * planner objective keyword, the value is set to -1. */
+		int t_plobjective;
+		/** Tree index of the planner discount. If there was no
+		 * planner discount keyword, the value is set to -1. */
+		int t_pldiscount;
+		/** Pointer to PlannerBuilder, which is created only if the
+		 * planner's FOC are added to the model. */
+		PlannerBuilder* pbuilder;
+		/** Pointer to an object which builds auxiliary variables and
+		 * equations to rewrite a model containing multiple leads to
+		 * an equivalent model having only +1 leads. */
+		ForwSubstBuilder* fbuilder;
+		/** Pointer to AtomSubstitutions which are created when the
+		 * atoms are being substituted because of multiple lags
+		 * etc. It uses also an old copy of atoms, which is
+		 * created. */
+		ogp::AtomSubstitutions* atom_substs;
+		/** Pointer to a copy of original atoms before substitutions
+		 * took place. */
+		ogp::SAtoms* old_atoms;
+	public:
+		/** Initializes the object to an empty state. */
+		DynareModel();
+		/** Construct a new deep copy. */
+		DynareModel(const DynareModel& dm);
+		virtual ~DynareModel();		
+		virtual DynareModel* clone() const = 0;
+		const DynareDynamicAtoms& getAtoms() const
+			{return atoms;}
+		const ogp::FormulaParser& getParser() const
+			{return eqs;}
+		int getOrder() const
+			{return order;}
+		/** Return the vector of parameter values. */
+		const Vector& getParams() const
+			{return *param_vals;}
+		Vector& getParams()
+			{return *param_vals;}
+		/** Return the vector of initial values of endo variables. */
+		const Vector& getInit() const
+			{return *init_vals;} 
+		Vector& getInit()
+			{return *init_vals;}
+		/** Return the vcov matrix. */
+		const TwoDMatrix& getVcov() const
+			{return *vcov_mat;}
+		TwoDMatrix& getVcov()
+			{return *vcov_mat;}
+		/** Return planner info. */
+		const PlannerInfo* get_planner_info() const;
+		/** Return forward substitutions info. */
+		const ForwSubstInfo* get_forw_subst_info() const;
+		/** Return substitutions info. */
+		const ogp::SubstInfo* get_subst_info() const;
+		/** This sets initial values given in outer ordering. */
+		void setInitOuter(const Vector& x);
+		/** This returns true if the given term is a function of
+		 * hardwired constants, numerical constants and parameters. */
+		bool is_constant_term(int t) const;
+		/** Debug print. */
+		void print() const;
+		/** Dump the model to the output stream. This includes
+		 * variable declarations, parameter values, model code,
+		 * initval, vcov and order. */
+		void dump_model(std::ostream& os) const;
+	protected:
+		/** Adds a name of endogenous, exogenous or a parameter. The
+		 * sort is governed by the flag. See dynglob.y for values of
+		 * the flag. This is used by a subclass when declaring the
+		 * names. */
+		void add_name(const char* name, int flag);
+		/** This checks the model consistency. Thus includes: number
+		 * of endo variables and number of equations, min and max lag
+		 * of endogenous variables and occurrrences of exogenous
+		 * variables. It throws an exception, if there is a problem. */
+		void check_model() const;
+		/** This shifts the given variable identified by the tree
+		 * index in time. So if the given tree index represents a(+3)
+		 * and the tshift is -4, the method returns tree index of the
+		 * a(-1). If a(-1) doesn't exist, it is added to the tree. If
+		 * it exists, its tree index is returned. If the tree index
+		 * doesn't correspond to an endogenous nor exogenous variable,
+		 * an exception is thrown. */
+		int variable_shift(int t, int tshift);
+		/** For the given set of atoms identified by tree indices and
+		 * given time shift, this method returns a map mapping each
+		 * variable in the given set to its time shifted variable. The
+		 * map is passed through the reference and is cleared in the
+		 * beginning. */
+		void variable_shift_map(const hash_set<int>& a_set, int tshift,
+								map<int,int>& s_map);
+		/** This returns maximum lead and minimum lag of an endogenous
+		 * or exogenous variable in the given term. If there are no
+		 * endo or exo variables, than it returns the least integer as
+		 * max lead and the greatest integer as min lag. */ 
+		void termspan(int t, int& mlead, int& mlag) const;
+		/** This function returns a set of non-linear subterms of the
+		 * given term, these are terms whose linear combination
+		 * constitutes the given term. */
+		hash_set<int> get_nonlinear_subterms(int t) const;
+		/** This method assigns already used tree index of some term
+		 * to the not-yet used atom name with the given lead/lag. In
+		 * this way, all occurrences of term t are substituted with
+		 * the atom name(ll). The method handles also rewriting
+		 * operation tree including derivatives of the term t. */
+		void substitute_atom_for_term(const char* name, int ll, int t);
+		/** This performs a final job after the model is parsed. It
+		 * creates the PlannerBuilder object if the planner's FOC are
+		 * needed, then it creates ForwSubstBuilder handling multiple
+		 * leads and finally it creates the substitution object saving
+		 * old atoms and performs the substitutions. */
+		void final_job();
+	};
+
+	/** This class constructs DynareModel from dynare++ model file. It
+	 * parses variable declarations, model equations, parameter
+	 * assignments, initval assignments, vcov matrix and order of
+	 * approximation. */
+	class DynareParser : public DynareModel {
+	protected:
+		/** Static atoms for parameter assignments. */
+		DynareStaticAtoms pa_atoms;
+		/** Assignments for the parameters. */
+		ogp::AtomAssignings paramset;
+		/** Static atoms for initval assignments. */
+		DynareStaticAtoms ia_atoms;
+		/** Assignments for the initval. */
+		ogp::AtomAssignings initval;
+		/** Matrix parser for vcov. */
+		ogp::MatrixParser vcov;
+	public:
+		/** This, in fact, creates DynareModel from the given string
+		 * of the given length corresponding to the Dynare++ model
+		 * file. If the given ord is not -1, then it overrides setting
+		 * in the model file. */
+		DynareParser(const char* str, int len, int ord);
+		DynareParser(const DynareParser& p);
+		virtual ~DynareParser();
+		DynareModel* clone() const
+			{return new DynareParser(*this);}
+		/** Adds a name of endogenous, exogenous or a parameter. This
+		 * addss the name to the parent class DynareModel and also
+		 * registers the name to either paramset, or initval. */
+		void add_name(const char* name, int flag);
+		/** Sets position of the model section. Called from
+		 * dynglob.y. */
+		void set_model_pos(int off1, int off2)
+			{model_beg = off1; model_end = off2;}
+		/** Sets position of the section setting parameters. Called
+		 * from dynglob.y. */
+		void set_paramset_pos(int off1, int off2)
+			{paramset_beg = off1; paramset_end = off2;}
+		/** Sets position of the initval section. Called from
+		 * dynglob.y. */
+		void set_initval_pos(int off1, int off2)
+			{initval_beg = off1; initval_end = off2;}
+		/** Sets position of the vcov section. Called from
+		 * dynglob.y. */
+		void set_vcov_pos(int off1, int off2)
+			{vcov_beg = off1; vcov_end = off2;}
+		/** Parser the given string as integer and set to as the
+		 * order. */
+		void set_order_pos(int off1, int off2)
+			{order_beg = off1; order_end = off2;}
+		/** Sets position of the planner_objective section. Called
+		 * from dynglob.y. */
+		void set_pl_objective_pos(int off1, int off2)
+			{plobjective_beg = off1; plobjective_end = off2;}
+		/** Sets position of the planner_discount section. Called from
+		 * dynglob.y. */
+		void set_pl_discount_pos(int off1, int off2)
+			{pldiscount_beg = off1; pldiscount_end = off2;}		
+		/** Processes a syntax error from bison. */
+		void error(const char* mes);
+		/** Debug print. */
+		void print() const;
+	protected:
+		void parse_glob(int length, const char* stream);
+		int parse_order(int length, const char* stream);
+		int parse_pldiscount(int length, const char* stream);
+		/** Evaluate paramset assignings and set param_vals. */
+		void calc_params();
+		/** Evaluate initval assignings and set init_vals. */
+		void calc_init();
+		/** Do the final job. This includes building the planner
+		 * problem (if any) and substituting for multiple lags, and
+		 * one period leads of exogenous variables, and calculating
+		 * initial guess of lagrange multipliers in the social planner
+		 * problem. Precondtion: everything parsed and calculated
+		 * parameters, postcondition: calculated initvals vector and
+		 * parsing_finished for expanded vectors. */
+		void final_job();
+	private:
+		int model_beg, model_end;
+		int paramset_beg, paramset_end;
+		int initval_beg, initval_end;
+		int vcov_beg, vcov_end;
+		int order_beg, order_end;
+		int plobjective_beg, plobjective_end;
+		int pldiscount_beg, pldiscount_end;
+	};
+
+	/** Semiparsed model. The equations are given by a string,
+	 * everything other by C/C++ objects. The initial values are set
+	 * manually after the creation of this object. This implies that
+	 * no automatic substitutions cannot be done here, which in turn
+	 * implies that we cannot do here a social planner nor substitutions
+	 * of multiple lags. */
+	class DynareSPModel : public DynareModel {
+	public:
+		DynareSPModel(const char** endo, int num_endo,
+					  const char** exo, int num_exo,
+					  const char** par, int num_par,
+					  const char* equations, int len, int ord);
+		DynareSPModel(const DynareSPModel& dm)
+			: DynareModel(dm) {}
+		~DynareSPModel() {}
+		virtual DynareModel* clone() const
+			{return new DynareSPModel(*this);}
+	};
+
+	/** This class implements a selector of operations which correspond
+	 * to non-linear functions. This inherits from ogp::opselector and
+	 * is used to calculate non-linear subterms in
+	 * DynareModel::get_nonlinear_subterms(). */
+	class NLSelector : public ogp::opselector {
+	private:
+		const DynareModel& model;
+	public:
+		NLSelector(const DynareModel& m) : model(m) {}
+		bool operator()(int t) const;
+	};
+
+	/** This class writes a mathematical code evaluating the system of
+	 * equations and the first derivatives at zero shocks and at the
+	 * given (static) state. Static means that lags and leads are
+	 * ignored. */
+	class ModelSSWriter : public ogp::DefaultOperationFormatter {
+	protected:
+		const DynareModel& model;
+	public:
+		ModelSSWriter(const DynareModel& m)
+			: DefaultOperationFormatter(m.eqs.getTree()),
+			  model(m) {}
+		/** This writes the evaluation of the system. It calls pure
+		 * virtual methods for writing a preamble, then assignment of
+		 * atoms, and then assignment for resulting object. These are
+		 * language dependent and are implemented in the subclass. */
+		void write_der0(FILE* fd);
+		/** This writes the evaluation of the first order derivative of
+		the system. It calls pure virtual methods for writing a
+		preamble, assignment, and assignemnt of the resulting
+		objects. */
+		void write_der1(FILE* fd);
+	protected:
+		virtual void write_der0_preamble(FILE* fd) const =0;
+		virtual void write_der1_preamble(FILE* fd) const =0;
+		virtual void write_atom_assignment(FILE* fd) const =0;
+		virtual void write_der0_assignment(FILE* fd) const =0;
+		virtual void write_der1_assignment(FILE* fd) const =0;
+	};
+
+
+	class MatlabSSWriter : public ModelSSWriter {
+	protected:
+		/** Identifier used in function names. */
+		char* id;
+	public:
+		MatlabSSWriter(const DynareModel& dm, const char* idd);
+		virtual ~MatlabSSWriter()
+			{delete [] id;}
+	protected:
+		// from ModelSSWriter
+		void write_der0_preamble(FILE* fd) const;
+		void write_der1_preamble(FILE* fd) const;
+		/** This writes atom assignments. We have four kinds of atoms
+		 * set here: endogenous vars coming from one parameter,
+		 * parameter values given by the second parameter, constants,
+		 * and the OperationTree::num_constants hardwired constants in
+		 * ogp::OperationTree. */
+		void write_atom_assignment(FILE* fd) const;
+		void write_der0_assignment(FILE* fd) const;
+		void write_der1_assignment(FILE* fd) const;
+		/** This prints t10 for t=10. */
+		void format_term(int t, FILE* fd) const;
+		/** This prints a10 for t=10. The atoms a10 are supposed to be
+		 * set by write_atom_assignments(). */
+		void format_nulary(int t, FILE* fd) const;
+	private:
+		void write_common1_preamble(FILE* fd) const;
+		void write_common2_preamble(FILE* fd) const;
+	};
+
+	/** This class implements OperationFormatter for debugging
+	 * purposes. It renders atoms in a more friendly way than the
+	 * ogp::DefaulOperationFormatter. */
+	class DebugOperationFormatter : public ogp::DefaultOperationFormatter {
+	protected:
+		const DynareModel& model;
+	public:
+		DebugOperationFormatter(const DynareModel& m)
+			: DefaultOperationFormatter(m.getParser().getTree()),
+			  model(m) {}
+		void format_nulary(int t, FILE* fd) const;
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/dynare_params.cpp b/dynare++/src/dynare_params.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..55283413f03a8eb64decacf4558d8c406cadd005
--- /dev/null
+++ b/dynare++/src/dynare_params.cpp
@@ -0,0 +1,262 @@
+// $Id: dynare_params.cpp 2348 2009-03-24 11:55:16Z kamenik $
+//Copyright 2004, Ondra Kamenik
+
+#include "dynare_params.h"
+
+#include <getopt.h>
+#include <stdio.h>
+#include <string.h>
+
+const char* help_str = 
+"usage: dynare++ [--help] [--version] [options] <model file>\n"
+"\n"
+"    --help               print this message and return\n"
+"    --version            print version and return\n"
+"\n"
+"options:\n"
+"    --per <num>          number of periods simulated [100]\n"
+"    --sim <num>          number of simulations [80]\n"
+"    --rtper <num>        number of RT periods simulated [0]\n"
+"    --rtsim <num>        number of RT simulations [0]\n"
+"    --condper <num>      number of periods in cond. simulations [0]\n"
+"    --condsim <num>      number of conditional simulations [0]\n"
+"    --steps <num>        steps towards stoch. SS [0=deter.]\n"
+"    --centralize         centralize the rule [do centralize]\n"
+"    --no-centralize      do not centralize the rule [do centralize]\n"
+"    --prefix <string>    prefix of variables in Mat-4 file [\"dyn\"]\n"
+"    --seed <num>         random number generator seed [934098]\n"
+"    --order <num>        order of approximation [no default]\n"
+"    --threads <num>      number of max parallel threads [2]\n"
+"    --ss-tol <num>       steady state calcs tolerance [1.e-13]\n"
+"    --check pesPES       check model residuals [no checks]\n"
+"                         lower/upper case switches off/on\n"
+"                           pP  checking along simulation path\n"
+"                           eE  checking on ellipse\n"
+"                           sS  checking along shocks\n"
+"    --check-evals <num>  max number of evals per residual [1000]\n"
+"    --check-num <num>    number of checked points [10]\n"
+"    --check-scale <num>  scaling of checked points [2.0]\n"
+"    --no-irfs            shuts down IRF simulations [do IRFs]\n"
+"    --irfs               performs IRF simulations [do IRFs]\n"
+"    --qz-criterium <num> treshold for stable eigenvalues [1.000001]\n"
+"\n\n";
+
+// returns the pointer to the first character after the last slash or
+// backslash in the string
+const char* dyn_basename(const char* str);
+
+DynareParams::DynareParams(int argc, char** argv)
+	: modname(NULL), num_per(100), num_sim(80), 
+	  num_rtper(0), num_rtsim(0),
+	  num_condper(0), num_condsim(0),
+	  num_threads(2), num_steps(0),
+	  prefix("dyn"), seed(934098), order(-1), ss_tol(1.e-13),
+	  check_along_path(false), check_along_shocks(false),
+	  check_on_ellipse(false), check_evals(1000), check_num(10), check_scale(2.0),
+	  do_irfs_all(true), do_centralize(true), qz_criterium(1.0+1e-6),
+	  help(false), version(false)
+{
+	if (argc == 1 || !strcmp(argv[1],"--help")) {
+		help = true;
+		return;
+	}
+	if (argc == 1 || !strcmp(argv[1],"--version")) {
+		version = true;
+		return;
+	}
+
+	modname = argv[argc-1];
+	argc--;
+
+	struct option const opts [] = {
+		{"periods", required_argument, NULL, opt_per},
+		{"per", required_argument, NULL, opt_per},
+		{"simulations", required_argument, NULL, opt_sim},
+		{"sim", required_argument, NULL, opt_sim},
+		{"rtperiods", required_argument, NULL, opt_rtper},
+		{"rtper", required_argument, NULL, opt_rtper},
+		{"rtsimulations", required_argument, NULL, opt_rtsim},
+		{"rtsim", required_argument, NULL, opt_rtsim},
+		{"condperiods", required_argument, NULL, opt_condper},
+		{"condper", required_argument, NULL, opt_condper},
+		{"condsimulations", required_argument, NULL, opt_condsim},
+		{"condsim", required_argument, NULL, opt_condsim},
+		{"prefix", required_argument, NULL, opt_prefix},
+		{"threads", required_argument, NULL, opt_threads},
+		{"steps", required_argument, NULL, opt_steps},
+		{"seed", required_argument, NULL, opt_seed},
+		{"order", required_argument, NULL, opt_order},
+		{"ss-tol", required_argument, NULL, opt_ss_tol},
+		{"check", required_argument, NULL, opt_check},
+		{"check-scale", required_argument, NULL, opt_check_scale},
+		{"check-evals", required_argument, NULL, opt_check_evals},
+		{"check-num", required_argument, NULL, opt_check_num},
+		{"qz-criterium",required_argument, NULL, opt_qz_criterium},
+		{"no-irfs", no_argument, NULL, opt_noirfs},
+		{"irfs", no_argument, NULL, opt_irfs},
+		{"centralize", no_argument, NULL, opt_centralize},
+		{"no-centralize", no_argument, NULL, opt_no_centralize},
+		{"help", no_argument, NULL, opt_help},
+		{"version", no_argument, NULL, opt_version},
+		{NULL, 0, NULL, 0}
+	};
+
+	int ret;
+	int index;
+	while (-1 != (ret = getopt_long(argc, argv, "", opts, &index))) {
+		switch (ret) {
+		case opt_per:
+			if (1 != sscanf(optarg, "%d", &num_per))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_sim:
+			if (1 != sscanf(optarg, "%d", &num_sim))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_rtper:
+			if (1 != sscanf(optarg, "%d", &num_rtper))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_rtsim:
+			if (1 != sscanf(optarg, "%d", &num_rtsim))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_condper:
+			if (1 != sscanf(optarg, "%d", &num_condper))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_condsim:
+			if (1 != sscanf(optarg, "%d", &num_condsim))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_prefix:
+			prefix = optarg;
+			break;
+		case opt_threads:
+			if (1 != sscanf(optarg, "%d", &num_threads))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_steps:
+			if (1 != sscanf(optarg, "%d", &num_steps))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_seed:
+			if (1 != sscanf(optarg, "%d", &seed))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_order:
+			if (1 != sscanf(optarg, "%d", &order))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_ss_tol:
+			if (1 != sscanf(optarg, "%lf", &ss_tol))
+				fprintf(stderr, "Couldn't parse float %s, ignored\n", optarg);
+			break;
+		case opt_check:
+			processCheckFlags(optarg);
+			break;
+		case opt_check_scale:
+			if (1 != sscanf(optarg, "%lf", &check_scale))
+				fprintf(stderr, "Couldn't parse float %s, ignored\n", optarg);
+			break;
+		case opt_check_evals:
+			if (1 != sscanf(optarg, "%d", &check_evals))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_check_num:
+			if (1 != sscanf(optarg, "%d", &check_num))
+				fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
+			break;
+		case opt_noirfs:
+			irf_list.clear();
+			do_irfs_all = false;
+			break;
+		case opt_irfs:
+			processIRFList(argc, argv);
+			if (irf_list.empty())
+				do_irfs_all = true;
+			else
+				do_irfs_all = false;
+			break;
+		case opt_centralize:
+			do_centralize = true;
+			break;
+		case opt_no_centralize:
+			do_centralize = false;
+			break;
+		case opt_qz_criterium:
+			if (1 != sscanf(optarg, "%lf", &qz_criterium))
+				fprintf(stderr, "Couldn't parse float %s, ignored\n", optarg);
+			break;
+		case opt_help:
+			help = true;
+			break;
+		case opt_version:
+			version = true;
+			break;
+		case '?':
+			fprintf(stderr, "Unknown option, ignored\n");
+			break;
+		}
+	}
+
+	// make basename (get rid of the extension)
+	basename = dyn_basename(modname);
+	std::string::size_type i = basename.rfind('.');
+	if (i != std::string::npos)
+		basename.erase(i);
+}
+
+void DynareParams::printHelp() const
+{
+	printf("%s", help_str);
+}
+
+void DynareParams::processCheckFlags(const char* flags)
+{
+	for (unsigned int i = 0; i < strlen(flags); i++) {
+		switch (flags[i]) {
+		case 'p':
+			check_along_path = false;
+			break;
+		case 'P':
+			check_along_path = true;
+			break;
+		case 'e':
+			check_on_ellipse = false;
+			break;
+		case 'E':
+			check_on_ellipse = true;
+			break;
+		case 's':
+			check_along_shocks = false;
+			break;
+		case 'S':
+			check_along_shocks = true;
+			break;
+		default:
+			fprintf(stderr, "Unknown check type selection character <%c>, ignored.\n", flags[i]);
+		}
+	}
+}
+
+void DynareParams::processIRFList(int argc, char** argv)
+{
+	irf_list.clear();
+	while (optind < argc && *(argv[optind]) != '-') {
+		irf_list.push_back(argv[optind]);
+		optind++;
+	}
+}
+
+const char* dyn_basename(const char* str)
+{
+	int i = strlen(str);
+	while (i > 0 && str[i-1] != '/' && str[i-1] != '\\')
+		i--;
+	return str+i;
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/dynare_params.h b/dynare++/src/dynare_params.h
new file mode 100644
index 0000000000000000000000000000000000000000..3c6a65adc833d47327cd4b5250ff7f2e2623b9cb
--- /dev/null
+++ b/dynare++/src/dynare_params.h
@@ -0,0 +1,73 @@
+// $Id: dynare_params.h 2347 2009-03-24 11:54:29Z kamenik $
+
+// Copyright 2004, Ondra Kamenik
+
+/*
+along shocks: m    mult    max_evals
+ellipse:      m    mult    max_evals  (10*m) (0.5*mult)
+simul:        m            max_evals  (10*m)
+
+--check-scale 2.0 --check-evals 1000 --check-num 10 --check PES
+ */
+
+#include <vector>
+#include <string>
+
+struct DynareParams {
+	const char* modname;
+	std::string basename;
+	int num_per;
+	int num_sim;
+	int num_rtper;
+	int num_rtsim;
+	int num_condper;
+	int num_condsim;
+	int num_threads;
+	int num_steps;
+	const char* prefix;
+	int seed;
+	int order;
+	/** Tolerance used for steady state calcs. */
+	double ss_tol;
+	bool check_along_path;
+	bool check_along_shocks;
+	bool check_on_ellipse;
+	int check_evals;
+	int check_num;
+	double check_scale;
+	/** Flag for doing IRFs even if the irf_list is empty. */
+	bool do_irfs_all;
+	/** List of shocks for which IRF will be calculated. */
+	std::vector<const char*> irf_list;
+	bool do_centralize;
+	double qz_criterium;
+	bool help;
+	bool version;
+	DynareParams(int argc, char** argv);
+	void printHelp() const;
+	int getCheckShockPoints() const
+		{return check_num;}
+	double getCheckShockScale() const
+		{return check_scale;}
+	int getCheckEllipsePoints() const
+		{return 10*check_num;}
+	double getCheckEllipseScale() const
+		{return 0.5*check_scale;}
+	int getCheckPathPoints() const
+		{return 10*check_num;}
+private:
+	enum {opt_per, opt_sim, opt_rtper, opt_rtsim, opt_condper, opt_condsim, opt_prefix, opt_threads,
+		  opt_steps, opt_seed, opt_order, opt_ss_tol, opt_check,
+		  opt_check_along_path, opt_check_along_shocks, opt_check_on_ellipse,
+		  opt_check_evals, opt_check_scale, opt_check_num, opt_noirfs, opt_irfs,
+                  opt_help, opt_version, opt_centralize, opt_no_centralize, opt_qz_criterium};
+	void processCheckFlags(const char* flags);
+	/** This gathers strings from argv[optind] and on not starting
+	 * with '-' to the irf_list. It stops one item before the end,
+	 * since this is the model file. */  
+	void processIRFList(int argc, char** argv);
+};
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/dynglob.lex b/dynare++/src/dynglob.lex
new file mode 100644
index 0000000000000000000000000000000000000000..aaf4e4a5f264341799b316a43e18bce88cdf671b
--- /dev/null
+++ b/dynare++/src/dynglob.lex
@@ -0,0 +1,68 @@
+%{
+#include "parser/cc/location.h"
+#include "dynglob_tab.hh"
+
+	extern YYLTYPE dynglob_lloc;
+
+#define YY_USER_ACTION SET_LLOC(dynglob_);
+%}
+
+%option nounput
+%option noyy_top_state
+%option stack
+%option yylineno
+%option prefix="dynglob_"
+%option never-interactive
+%x CMT
+
+%%
+
+ /* comments */
+<*>"/*"            {yy_push_state(CMT);}
+<CMT>[^*\n]*
+<CMT>"*"+[^*/\n]*
+<CMT>"*"+"/"       {yy_pop_state();}
+<CMT>[\n]
+"//".*\n
+
+ /* initial spaces or tabs are ignored */
+
+[ \t\r\n\0]
+var                {return VAR;}
+varexo             {return VAREXO;}
+parameters         {return PARAMETERS;}
+model              {return MODEL;}
+end                {return END;}
+initval            {return INITVAL;}
+order              {return ORDER;}
+vcov               {return VCOV;}
+planner_objective  {return PLANNEROBJECTIVE;}
+planner_discount   {return PLANNERDISCOUNT;}
+
+ /* names */
+[A-Za-z_][A-Za-z0-9_]* {
+	dynglob_lval.string = dynglob_text;
+	return NAME;
+}
+
+;                  {return SEMICOLON;}
+,                  {return COMMA;}
+=                  {return EQUAL_SIGN;}
+\[                 {return LEFT_BRACKET;}
+\]                 {return RIGHT_BRACKET;}
+. {
+	dynglob_lval.character = dynglob_text[0];
+	return CHARACTER;
+}
+
+%%
+
+int dynglob_wrap()
+{
+	return 1;
+}
+
+void dynglob__destroy_buffer(void* p)
+{
+	dynglob__delete_buffer((YY_BUFFER_STATE)p);
+}
diff --git a/dynare++/src/dynglob.y b/dynare++/src/dynglob.y
new file mode 100644
index 0000000000000000000000000000000000000000..1b609008ebb8113ec615e6bd90a9a0511140dd05
--- /dev/null
+++ b/dynare++/src/dynglob.y
@@ -0,0 +1,119 @@
+%{
+#include "parser/cc/location.h"
+#include "dynare_model.h"
+#include "dynglob_tab.hh"
+
+#include <stdio.h>
+
+	int dynglob_error(char*);
+	int dynglob_lex(void);
+	extern int dynglob_lineno;
+	extern ogdyn::DynareParser* dynare_parser;
+	int symblist_flag;
+
+	static void print_token_value1 (FILE *, int, YYSTYPE);
+#define YYPRINT(file, type, value) print_token_value1 (file, type, value)
+
+%}
+
+%union {
+	int integer;
+	char *string;
+	char character;
+}
+
+%token  END INITVAL MODEL PARAMETERS VAR VAREXO SEMICOLON COMMA EQUAL_SIGN CHARACTER
+%token  VCOV LEFT_BRACKET RIGHT_BRACKET ORDER PLANNEROBJECTIVE PLANNERDISCOUNT
+%token <string> NAME;
+
+%name-prefix="dynglob_"
+
+%locations
+%error-verbose
+
+%%
+
+dynare_file : preamble paramset model rest {
+	dynare_parser->set_paramset_pos(@2.off, @3.off);}
+  | preamble model rest {
+	dynare_parser->set_paramset_pos(0, 0);}
+  | preamble paramset planner model rest {
+	dynare_parser->set_paramset_pos(@2.off, @3.off);}
+  ;
+
+preamble : preamble preamble_statement | preamble_statement;
+
+preamble_statement : var | varexo | parameters;
+
+var : VAR {symblist_flag=1;} symblist SEMICOLON;
+
+varexo : VAREXO {symblist_flag=2;} symblist SEMICOLON;
+
+parameters : PARAMETERS {symblist_flag=3;} symblist SEMICOLON;
+
+
+symblist : symblist NAME          {dynare_parser->add_name($2,symblist_flag);}
+     | symblist COMMA NAME        {dynare_parser->add_name($3,symblist_flag);}
+     | NAME                       {dynare_parser->add_name($1,symblist_flag);}
+     ;
+
+paramset : recnameset;
+
+recnameset : recnameset onenameset | onenameset;
+
+onenameset : NAME EQUAL_SIGN material SEMICOLON;
+
+material : material CHARACTER | material NAME | NAME | CHARACTER;
+
+model : MODEL SEMICOLON equations END SEMICOLON {
+	dynare_parser->set_model_pos(@3.off, @4.off);
+};
+
+equations : equations equation | equation;
+
+equation : material EQUAL_SIGN material SEMICOLON | material SEMICOLON;
+
+rest : rest_statement | rest rest_statement;
+
+rest_statement : initval | vcov | order | planner;
+
+initval : INITVAL SEMICOLON recnameset END SEMICOLON {
+	dynare_parser->set_initval_pos(@3.off, @4.off);
+};
+
+vcov : VCOV EQUAL_SIGN LEFT_BRACKET m_material RIGHT_BRACKET SEMICOLON {
+	dynare_parser->set_vcov_pos(@4.off, @5.off);
+};
+
+m_material : m_material CHARACTER | m_material NAME | m_material SEMICOLON | m_material COMMA | CHARACTER | NAME | SEMICOLON | COMMA; 
+
+order : ORDER EQUAL_SIGN material SEMICOLON {
+    dynare_parser->set_order_pos(@3.off, @4.off);
+};
+
+planner : planner_objective planner_discount
+  | planner_discount planner_objective
+;
+
+planner_objective : PLANNEROBJECTIVE material SEMICOLON {
+	dynare_parser->set_pl_objective_pos(@2.off, @3.off);
+};
+
+planner_discount : PLANNERDISCOUNT NAME SEMICOLON {
+	dynare_parser->set_pl_discount_pos(@2.off, @3.off);
+};
+
+%%
+
+int dynglob_error(char* mes)
+{
+	dynare_parser->error(mes);
+}
+
+static void print_token_value1(FILE* file, int type, YYSTYPE value)
+{
+	if (type == NAME)
+		fprintf(file, "%s", value.string);
+	if (type == CHARACTER)
+		fprintf(file, "%c", value.character);
+}
diff --git a/dynare++/src/forw_subst_builder.cpp b/dynare++/src/forw_subst_builder.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..430245e1cc104da2488ff96628c05bc2da0a74ac
--- /dev/null
+++ b/dynare++/src/forw_subst_builder.cpp
@@ -0,0 +1,122 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id$
+
+#include "forw_subst_builder.h"
+
+using namespace ogdyn;
+
+ForwSubstBuilder::ForwSubstBuilder(DynareModel& m)
+	: model(m)
+{
+	info.num_new_terms -= model.getParser().getTree().get_num_op();
+
+	// go through all equations
+	int neq = model.eqs.nformulas();
+	for (int i = 0; i < neq; i++) {
+		int ft = model.eqs.formula(i);
+		int mlead, mlag;
+		model.termspan(ft, mlead, mlag);
+		// if equation is too forward looking
+		if (mlead > 1) {
+			info.num_affected_equations++;
+			// break it to non-linear terms
+			hash_set<int> nlt = model.get_nonlinear_subterms(ft);
+			int j = 0; // indexes subterms
+			// and make substitutions for all these non-linear subterms
+			for (hash_set<int>::const_iterator it = nlt.begin();
+				 it != nlt.end(); ++it, ++j)
+				substitute_for_term(*it, i, j);
+		}
+	}
+	// unassign all variables with lead greater than 1
+	unassign_gt_1_leads();
+
+	// forget the derivatives in the tree because some variables could
+	// have been unassigned
+	model.eqs.getTree().forget_derivative_maps();
+
+	info.num_new_terms += model.getParser().getTree().get_num_op();
+}
+
+void ForwSubstBuilder::substitute_for_term(int t, int i, int j)
+{
+	int mlead, mlag;
+	model.termspan(t, mlead, mlag);
+	if (mlead > 1) {
+		info.num_subst_terms++;
+		// Example for comments: let t = f(x(+4))
+		// first make lagsubst be substitution setting f(x(+4)) to f(x(+1))
+		// this is lag = -3 (1-mlead)
+		map<int,int> lagsubst;
+		model.variable_shift_map(model.eqs.nulary_of_term(t), 1-mlead, lagsubst);
+		int lagt = model.eqs.add_substitution(t, lagsubst);
+		// now maxlead of lagt is +1
+		// add AUXLD_*_*_1 = f(x(+1)) to the model
+		char name[100];
+		sprintf(name, "AUXLD_%d_%d_%d", i, j, 1);
+		model.atoms.register_uniq_endo(name);
+		info.num_aux_variables++;
+		const char* ss = model.atoms.get_name_storage().query(name);
+		int auxt = model.eqs.add_nulary(name);
+		model.eqs.add_formula(model.eqs.add_binary(ogp::MINUS, auxt, lagt));
+		aux_map.insert(Tsubstmap::value_type(ss, lagt));
+		// now add variables and equations
+		// AUXLD_*_*_2 = AUXLD_*_*_1(+1) through
+		// AUXLD_*_*_{mlead-1} = AUXLD_*_*_{mlead-2}(+1)
+		for (int ll = 1; ll <= mlead-2; ll++) {
+			// create AUXLD_*_*_{ll}(+1)
+			sprintf(name, "AUXLD_%d_%d_%d(+1)", i, j, ll);
+			int lastauxt_lead = model.eqs.add_nulary(name);
+			// create AUXLD_*_*{ll+1}
+			sprintf(name, "AUXLD_%d_%d_%d", i, j, ll+1);
+			model.atoms.register_uniq_endo(name);
+			info.num_aux_variables++;
+			ss = model.atoms.get_name_storage().query(name);
+			auxt = model.eqs.add_nulary(name);
+			// add AUXLD_*_*_{ll+1} = AUXLD_*_*_{ll}(+1)
+			model.eqs.add_formula(model.eqs.add_binary(ogp::MINUS, auxt, lastauxt_lead));
+			// add substitution to the map; todo: this
+			// works well because in the context where
+			// aux_map is used the timing doesn't matter,
+			// however, it is misleading, needs to be
+			// changed
+			aux_map.insert(Tsubstmap::value_type(ss, lagt));
+		}
+
+		// now we have to substitute AUXLEAD_*_*{mlead-1}(+1) for t
+		model.substitute_atom_for_term(ss, +1, t);
+	}
+}
+
+void ForwSubstBuilder::unassign_gt_1_leads(const char* name)
+{
+	const char* ss = model.atoms.get_name_storage().query(name);
+	int mlead, mlag;
+	model.atoms.varspan(name, mlead, mlag);
+	for (int ll = 2; ll <= mlead; ll++) {
+		int t = model.atoms.index(ss, ll);
+		if (t != -1)
+			model.atoms.unassign_variable(ss, ll, t);
+	}
+}
+
+void ForwSubstBuilder::unassign_gt_1_leads()
+{
+	const vector<const char*>& endovars = model.atoms.get_endovars();
+	for (unsigned int i = 0; i < endovars.size(); i++)
+		unassign_gt_1_leads(endovars[i]);
+	const vector<const char*>& exovars = model.atoms.get_exovars();
+	for (unsigned int i = 0; i < exovars.size(); i++)
+		unassign_gt_1_leads(exovars[i]);	
+}
+
+ForwSubstBuilder::ForwSubstBuilder(const ForwSubstBuilder& b, DynareModel& m)
+	: model(m)
+{
+	for (Tsubstmap::const_iterator it = b.aux_map.begin();
+		 it != b.aux_map.end(); ++it) {
+		const char* ss = m.atoms.get_name_storage().query((*it).first);
+		aux_map.insert(Tsubstmap::value_type(ss, (*it).second));
+	}
+}
diff --git a/dynare++/src/forw_subst_builder.h b/dynare++/src/forw_subst_builder.h
new file mode 100644
index 0000000000000000000000000000000000000000..7f703092d8d7bb6d9aa7749ea7d90f79e7000be6
--- /dev/null
+++ b/dynare++/src/forw_subst_builder.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id$
+
+#ifndef FORW_SUBST_BUILDER_H
+#define FORW_SUBST_BUILDER_H
+
+
+#include "dynare_model.h"
+
+namespace ogdyn {
+
+	/** This struct encapsulates information about the process of
+	 * forward substitutions. */
+	struct ForwSubstInfo {
+		int num_affected_equations;
+		int num_subst_terms;
+		int num_aux_variables;
+		int num_new_terms;
+		ForwSubstInfo()
+			: num_affected_equations(0),
+			  num_subst_terms(0),
+			  num_aux_variables(0),
+			  num_new_terms(0) {}
+	};
+
+	class ForwSubstBuilder {
+		typedef map<int, const char*> Ttermauxmap;
+	protected:
+		/** Reference to the model, to which we will add equations and
+		 * change some equations. */
+		DynareModel& model;
+		/** A map mapping new auxiliary variables to the terms in the
+		 * tree in the DynareModel. */
+		Tsubstmap aux_map;
+		/** Information about the substitutions. */
+		ForwSubstInfo info;
+	public:
+		/** Do all the jobs needed. This scans all equations in the
+		 * model, and for equations containing forward looking
+		 * variables greater than 1 lead, it makes corresponding
+		 * substitutions. Basically, it breaks each equation to its
+		 * non-linear components and creates substitutions for these
+		 * components, not for whole equation. This is because the
+		 * expectation operator can go through the linear part of the
+		 * function. This will save us many occurrences of other
+		 * variables involved in the equation. */
+		ForwSubstBuilder(DynareModel& m);
+		/** Copy constructor with a new instance of the model. */
+		ForwSubstBuilder(const ForwSubstBuilder& b, DynareModel& m);
+		/** Return the auxiliary variable mapping. */
+		const Tsubstmap& get_aux_map() const
+			{return aux_map;}
+		/** Return the information. */
+		const ForwSubstInfo& get_info() const
+			{return info;}
+	private:
+		ForwSubstBuilder(const ForwSubstBuilder& b);
+		/** This method takes a nonlinear term t, and if it has leads
+		 * of greater than 1, then it substitutes the term for the new
+		 * variable (or string of variables). Note that the
+		 * substitution is done by DynamicAtoms::assign_variable. This
+		 * means that the substitution is made for all other
+		 * ocurrences of t in the model. So there is no need of
+		 * tracking already substituted terms. The other two
+		 * parameters are just for identification of the new auxiliary
+		 * variables. When called from the constructor, i is an
+		 * equation number, j is an order of the non-linear term in
+		 * the equation. */
+		void substitute_for_term(int t, int i, int j);
+		/** This is called just at the end of the job. It unassigns
+		 * all nulary terms with a lead greater than 1. */
+		void unassign_gt_1_leads();
+		/** This unassigns all leads greater than 1 of the given name. */
+		void unassign_gt_1_leads(const char* name);
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/main.cpp b/dynare++/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..71ed9c28d292d55b4dcdf4149c4bb722a128a8b1
--- /dev/null
+++ b/dynare++/src/main.cpp
@@ -0,0 +1,186 @@
+#include "dynare3.h"
+#include "dynare_exception.h"
+#include "dynare_params.h"
+
+#include "utils/cc/exception.h"
+#include "parser/cc/parser_exception.h"
+#include "../sylv/cc/SylvException.h"
+#include "../kord/random.h"
+#include "../kord/global_check.h"
+#include "../kord/approximation.h"
+
+int main(int argc, char** argv)
+{
+	DynareParams params(argc, argv);
+	if (params.help) {
+		params.printHelp();
+		return 0;
+	}
+	if (params.version) {
+		printf("Dynare++ v. %s. Copyright (C) 2004,2005,2006 Ondra Kamenik\n",
+			   DYNVERSION);
+		printf("Dynare++ comes with ABSOLUTELY NO WARRANTY and is distributed under\n");
+		printf("GPL: modules integ, tl, kord, sylv, src, extern and documentation\n");
+		printf("LGPL: modules parser, utils\n");
+		printf(" for GPL  see http://www.gnu.org/licenses/gpl.html\n");
+		printf(" for LGPL see http://www.gnu.org/licenses/lgpl.html\n");
+		return 0;
+	}
+	THREAD_GROUP::max_parallel_threads = params.num_threads;
+
+	try {
+		// make journal name and journal
+		std::string jname(params.basename);
+		jname += ".jnl";
+		Journal journal(jname.c_str());
+
+		// make dynare object
+		Dynare dynare(params.modname, params.order, params.ss_tol, journal);
+		// make list of shocks for which we will do IRFs
+        vector<int> irf_list_ind;
+		if (params.do_irfs_all)
+			for (int i = 0; i < dynare.nexog(); i++)
+				irf_list_ind.push_back(i);
+		else
+			irf_list_ind = ((const DynareNameList&)dynare.getExogNames()).selectIndices(params.irf_list);
+
+		// write matlab files
+		FILE* mfd;
+		std::string mfile1(params.basename);
+		mfile1 += "_f.m";
+		if (NULL == (mfd=fopen(mfile1.c_str(), "w"))) {
+			fprintf(stderr, "Couldn't open %s for writing.\n", mfile1.c_str());
+			exit(1);
+		}
+		ogdyn::MatlabSSWriter writer0(dynare.getModel(), params.basename.c_str());
+		writer0.write_der0(mfd);
+		fclose(mfd);
+
+		std::string mfile2(params.basename);
+		mfile2 += "_ff.m";
+		if (NULL == (mfd=fopen(mfile2.c_str(), "w"))) {
+			fprintf(stderr, "Couldn't open %s for writing.\n", mfile2.c_str());
+			exit(1);
+		}
+		ogdyn::MatlabSSWriter writer1(dynare.getModel(), params.basename.c_str());
+		writer1.write_der1(mfd);
+		fclose(mfd);
+
+		// open mat file
+		std::string matfile(params.basename);
+		matfile += ".mat";
+		FILE* matfd = NULL;
+		if (NULL == (matfd=fopen(matfile.c_str(), "wb"))) {
+			fprintf(stderr, "Couldn't open %s for writing.\n", matfile.c_str());
+			exit(1);
+		}
+
+		// write info about the model (dimensions and variables)
+		dynare.writeMat4(matfd, params.prefix);
+		// write the dump file corresponding to the input
+		dynare.writeDump(params.basename);
+
+
+		system_random_generator.initSeed(params.seed);
+
+		tls.init(dynare.order(),
+				 dynare.nstat()+2*dynare.npred()+3*dynare.nboth()+
+				 2*dynare.nforw()+dynare.nexog());
+
+		Approximation app(dynare, journal, params.num_steps, params.do_centralize, params.qz_criterium);
+		try {
+			app.walkStochSteady();
+		} catch (const KordException& e) {
+			// tell about the exception and continue
+			printf("Caught (not yet fatal) Kord exception: ");
+			e.print();
+			JournalRecord rec(journal);
+			rec << "Solution routine not finished (" << e.get_message()
+				<< "), see what happens" << endrec; 
+		}
+
+		std::string ss_matrix_name(params.prefix);
+		ss_matrix_name += "_steady_states";
+		ConstTwoDMatrix(app.getSS()).writeMat4(matfd, ss_matrix_name.c_str());
+
+		// check the approximation
+		if (params.check_along_path || params.check_along_shocks
+			|| params.check_on_ellipse) {
+			GlobalChecker gcheck(app, THREAD_GROUP::max_parallel_threads, journal);
+			if (params.check_along_shocks)
+				gcheck.checkAlongShocksAndSave(matfd, params.prefix,
+											   params.getCheckShockPoints(),
+											   params.getCheckShockScale(),
+											   params.check_evals);
+			if (params.check_on_ellipse)
+				gcheck.checkOnEllipseAndSave(matfd, params.prefix,
+											 params.getCheckEllipsePoints(),
+											 params.getCheckEllipseScale(),
+											 params.check_evals);
+			if (params.check_along_path)
+				gcheck.checkAlongSimulationAndSave(matfd, params.prefix,
+												   params.getCheckPathPoints(),
+												   params.check_evals);
+		}
+
+		// write the folded decision rule to the Mat-4 file
+		app.getFoldDecisionRule().writeMat4(matfd, params.prefix);
+
+		// simulate conditional
+		if (params.num_condper > 0 && params.num_condsim > 0) {
+			SimResultsDynamicStats rescond(dynare.numeq(), params.num_condper);
+			ConstVector det_ss(app.getSS(),0);
+			rescond.simulate(params.num_condsim, app.getFoldDecisionRule(), det_ss, dynare.getVcov(), journal);
+			rescond.writeMat4(matfd, params.prefix);
+		}
+
+		// simulate unconditional
+		//const DecisionRule& dr = app.getUnfoldDecisionRule();
+		const DecisionRule& dr = app.getFoldDecisionRule();
+		if (params.num_per > 0 && params.num_sim > 0) {
+			SimResultsStats res(dynare.numeq(), params.num_per);
+			res.simulate(params.num_sim, dr, dynare.getSteady(), dynare.getVcov(), journal);
+			res.writeMat4(matfd, params.prefix);
+			
+			// impulse response functions
+			if (! irf_list_ind.empty()) {
+				IRFResults irf(dynare, dr, res, irf_list_ind, journal);
+				irf.writeMat4(matfd, params.prefix);
+			}
+		}
+
+		// simulate with real-time statistics
+		if (params.num_rtper > 0 && params.num_rtsim > 0) {
+			RTSimResultsStats rtres(dynare.numeq(), params.num_rtper);
+			rtres.simulate(params.num_rtsim, dr, dynare.getSteady(), dynare.getVcov(), journal);
+			rtres.writeMat4(matfd, params.prefix);
+		}
+
+		fclose(matfd);
+
+	} catch (const KordException& e) {
+		printf("Caugth Kord exception: ");
+		e.print();
+		return e.code();
+	} catch (const TLException& e) {
+		printf("Caugth TL exception: ");
+		e.print();
+		return 255;
+	} catch (SylvException& e) {
+		printf("Caught Sylv exception: ");
+		e.printMessage();
+		return 255;
+	} catch (const DynareException& e) {
+		printf("Caught Dynare exception: %s\n", e.message());
+		return 255;
+	} catch (const ogu::Exception& e) {
+		printf("Caught ogu::Exception: ");
+		e.print();
+		return 255;
+	} catch (const ogp::ParserException& e) {
+		printf("Caught parser exception: %s\n", e.message());
+		return 255;
+	}
+
+  return 0;
+}
diff --git a/dynare++/src/nlsolve.cpp b/dynare++/src/nlsolve.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c335d8fde735ffb80261f71a0db9d55dfeb4fc96
--- /dev/null
+++ b/dynare++/src/nlsolve.cpp
@@ -0,0 +1,230 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: nlsolve.cpp 762 2006-05-22 13:00:07Z kamenik $
+
+#include "nlsolve.h"
+#include "dynare_exception.h"
+
+#include <cmath>
+
+using namespace ogu;
+
+/** This should not be greater than DBL_EPSILON^(1/2). */
+double GoldenSectionSearch::tol = 1.e-4;
+
+/** This is equal to the golden section ratio. */
+double GoldenSectionSearch::golden = (3.-std::sqrt(5.))/2;
+
+double GoldenSectionSearch::search(OneDFunction& f, double x1, double x2)
+{
+	double b;
+	if (init_bracket(f, x1, x2, b)) {
+		double fb = f.eval(b);
+		double f1 = f.eval(x1);
+		double f2 = f.eval(x2);
+		double dx;
+		do {
+			double w = (b-x1)/(x2-x1);
+			dx = std::abs((1-2*w)*(x2-x1));
+			double x;
+			if (b-x1 > x2-b)
+				x = b - dx;
+			else
+				x = b + dx;
+			double fx = f.eval(x);
+			if (! std::isfinite(fx))
+				return x1;
+			if (b-x1 > x2-b) {
+				// x is on the left from b
+				if (f1 > fx && fx < fb) {
+					// pickup bracket [f1,fx,fb]
+					f2 = fb;
+					x2 = b;
+					fb = fx;
+					b = x;
+				} else {
+					// pickup bracket [fx,fb,fx2]
+					f1 = fx;
+					x1 = x;
+				}
+			} else {
+				// x is on the right from b
+				if (f1 > fb && fb < fx) {
+					// pickup bracket [f1,fb,fx]
+					f2 = fx;
+					x2 = x;
+				} else {
+					// pickup bracket [fb,fx,f2]
+					f1 = fb;
+					x1 = b;
+					fb = fx;
+					b = x;
+				}
+			}
+		} while(dx > tol);
+	}
+	return b;
+}
+
+bool GoldenSectionSearch::init_bracket(OneDFunction& f, double x1, double& x2, double& b)
+{
+	double f1 = f.eval(x1);
+	if (! std::isfinite(f1))
+		throw DynareException(__FILE__, __LINE__,
+							  "Safer point not finite in GoldenSectionSearch::init_bracket");
+
+	int cnt = 0;
+	bool bracket_found = false;
+	do {
+		bool finite_found = search_for_finite(f, x1, x2, b);
+		if (! finite_found) {
+			b = x1;
+			return false;
+		}
+		double f2 = f.eval(x2);
+		double fb = f.eval(b);
+		double bsym = 2*x2 - b;
+		double fbsym = f.eval(bsym);
+		// now we know that f1, f2, and fb are finite
+		if (std::isfinite(fbsym)) {
+			// we have four numbers f1, fb, f2, fbsym, we test for the
+			// following combinations to find the bracket:
+			// [f1,f2,fbsym], [f1,fb,fbsym] and [f1,fb,fbsym]
+			if (f1 > f2 && f2 < fbsym) {
+				bracket_found = true;
+				b = x2;
+				x2 = bsym;
+			} else if (f1 > fb && fb < fbsym) {
+				bracket_found = true;
+				x2 = bsym;
+			} else if (f1 > fb && fb < f2) {
+				bracket_found = true;
+			} else {
+				double newx2 = b;
+				// choose the smallest value in case we end
+				if (f1 > fbsym) {
+					// the smallest value is on the other end, we do
+					// not want to continue
+					b = bsym;
+					return false;
+				} else
+					b = x1;
+					// move x2 to b in case we continue 
+					x2 = newx2;
+			}
+		} else {
+			// we have only three numbers, we test for the bracket,
+			// and if not found, we set b as potential result and
+			// shorten x2 as potential init value for next cycle
+			if (f1 > fb && fb < f2)
+				bracket_found = true;
+			else {
+				double newx2 = b;
+				// choose the smaller value in case we end
+				if (f1 > f2)
+					b = x2;
+				else
+					b = x1;
+				// move x2 to b in case we continue
+				x2 = newx2;
+			}
+		}
+		cnt++;
+	} while (! bracket_found && cnt < 5);
+	
+	return bracket_found;
+}
+
+/** This moves x2 toward to x1 until the function at x2 is finite and
+ * b as a golden section between x1 and x2 yields also finite f. */
+bool GoldenSectionSearch::search_for_finite(OneDFunction& f, double x1, double& x2, double&b)
+{
+	int cnt = 0;
+	bool found = false;
+	do {
+		double f2 = f.eval(x2);
+		b = (1-golden)*x1 + golden*x2;
+		double fb = f.eval(b);
+		found = std::isfinite(f2) && std::isfinite(fb);
+		if (! found)
+			x2 = b;
+		cnt++;
+	} while (! found && cnt < 5);
+
+	return found;
+}
+
+void VectorFunction::check_for_eval(const ConstVector& in, Vector& out) const
+{
+	if (inDim() != in.length() || outDim() != out.length())
+		throw DynareException(__FILE__, __LINE__,
+							  "Wrong dimensions in VectorFunction::check_for_eval");
+}
+
+double NLSolver::eval(double lambda)
+{
+	Vector xx((const Vector&)x);
+	xx.add(1-lambda, xcauchy);
+	xx.add(lambda, xnewton);
+	Vector ff(func.outDim());
+	func.eval(xx, ff);
+	return ff.dot(ff);
+}
+
+bool NLSolver::solve(Vector& xx, int& iter)
+{
+	JournalRecord rec(journal);
+	rec << "Iter   lambda      residual" << endrec;
+	JournalRecord rec1(journal);
+	rec1 << "---------------------------" << endrec;
+	char tmpbuf[14];
+
+	x = (const Vector&)xx;
+	iter = 0;
+	// setup fx
+	Vector fx(func.outDim());
+	func.eval(x, fx);
+	if (!fx.isFinite())
+		throw DynareException(__FILE__,__LINE__,
+							  "Initial guess does not yield finite residual in NLSolver::solve");
+	bool converged = fx.getMax() < tol;
+	JournalRecord rec2(journal);
+	sprintf(tmpbuf, "%10.6g", fx.getMax());
+	rec2 << iter << "         N/A   " << tmpbuf << endrec;
+	while (! converged && iter < max_iter) {
+		// setup Jacobian
+		jacob.eval(x);
+		// calculate cauchy step
+		Vector g(func.inDim());
+		g.zeros();
+		ConstTwoDMatrix(jacob).multaVecTrans(g, fx);
+		Vector Jg(func.inDim());
+		Jg.zeros();
+		ConstTwoDMatrix(jacob).multaVec(Jg, g);
+		double m = -g.dot(g)/Jg.dot(Jg);
+		xcauchy = (const Vector&) g;
+		xcauchy.mult(m);
+		// calculate newton step
+		xnewton = (const Vector&) fx;
+		ConstTwoDMatrix(jacob).multInvLeft(xnewton);
+		xnewton.mult(-1);
+
+		// line search
+		double lambda = GoldenSectionSearch::search(*this, 0, 1);
+		x.add(1-lambda, xcauchy);
+		x.add(lambda, xnewton);
+		// evaluate func
+		func.eval(x, fx);
+		converged = fx.getMax() < tol;
+
+		// iter
+		iter++;
+
+		JournalRecord rec3(journal);
+		sprintf(tmpbuf, "%10.6g", fx.getMax());
+		rec3 << iter << "    " << lambda << "   " << tmpbuf << endrec;
+	}
+	xx = (const Vector&)x;
+
+	return converged;
+}
diff --git a/dynare++/src/nlsolve.h b/dynare++/src/nlsolve.h
new file mode 100644
index 0000000000000000000000000000000000000000..0cd19b1f3d4d3f6986ade8ea5dd921bc19dd0487
--- /dev/null
+++ b/dynare++/src/nlsolve.h
@@ -0,0 +1,94 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id: nlsolve.h 762 2006-05-22 13:00:07Z kamenik $
+
+#ifndef OGU_NLSOLVE_H
+#define OGU_NLSOLVE_H
+
+#include "twod_matrix.h"
+#include "journal.h"
+
+namespace ogu {
+
+	class OneDFunction {
+	public:
+		virtual ~OneDFunction() {}
+		virtual double eval(double) = 0;
+	};
+
+	class GoldenSectionSearch {
+	protected:
+		static double tol;
+		static double golden;
+	public:
+		static double search(OneDFunction& f, double x1, double x2);
+	protected:
+		/** This initializes a bracket by moving x2 and b (as a golden
+		 * section of x1,x2) so that f(x1)>f(b) && f(b)<f(x2). The point
+		 * x1 is not moved, since it is considered as reliable and f(x1)
+		 * is supposed to be finite. If initialization of a bracket
+		 * succeeded, then [x1, b, x2] is the bracket and true is
+		 * returned. Otherwise, b is the minimum found and false is
+		 * returned. */
+		static bool init_bracket(OneDFunction& f, double x1, double& x2, double& b);
+		/** This supposes that f(x1) is finite and it moves x2 toward x1
+		 * until x2 and b (as a golden section of x1,x2) are finite. If
+		 * succeeded, the routine returns true and x2, and b. Otherwise,
+		 * it returns false. */
+		static bool search_for_finite(OneDFunction& f, double x1, double& x2, double& b);
+	};
+
+	class VectorFunction {
+	public:
+		VectorFunction() {}
+		virtual ~VectorFunction() {}
+		virtual int inDim() const = 0;
+		virtual int outDim() const = 0;
+		/** Check dimensions of eval parameters. */
+		void check_for_eval(const ConstVector& in, Vector& out) const;
+		/** Evaluate the vector function. */
+		virtual void eval(const ConstVector& in, Vector& out) = 0;
+	};
+
+	class Jacobian : public TwoDMatrix {
+	public:
+		Jacobian(int n)
+			: TwoDMatrix(n,n) {}
+		virtual ~Jacobian() {}
+		virtual void eval(const Vector& in) = 0;
+	};
+
+	class NLSolver : public OneDFunction {
+	protected:
+		Journal& journal;
+		VectorFunction& func;
+		Jacobian& jacob;
+		const int max_iter;
+		const double tol;
+	private:
+		Vector xnewton;
+		Vector xcauchy;
+		Vector x;
+	public:
+		NLSolver(VectorFunction& f, Jacobian& j, int maxit, double tl, Journal& jr)
+			: journal(jr), func(f), jacob(j), max_iter(maxit), tol(tl),
+			  xnewton(f.inDim()), xcauchy(f.inDim()), x(f.inDim())
+			{xnewton.zeros(); xcauchy.zeros(); x.zeros();}
+		virtual ~NLSolver() {}
+		/** Returns true if the problem has converged. xx as input is the
+		 * starting value, as output it is a solution. */
+		bool solve(Vector& xx, int& iter);
+		/** To implement OneDFunction interface. It returns
+		 * func(xx)^T*func(xx), where
+		 * xx=x+lambda*xcauchy+(1-lambda)*xnewton. It is non-const only
+		 * because it calls func, x, xnewton, xcauchy is not changed. */
+		double eval(double lambda);
+	};
+
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/src/planner_builder.cpp b/dynare++/src/planner_builder.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d2a38dfd4f14d3f612da128c2e2b4494acc6f2b5
--- /dev/null
+++ b/dynare++/src/planner_builder.cpp
@@ -0,0 +1,385 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id$
+
+#include "planner_builder.h"
+#include "dynare_exception.h"
+
+#include <cmath>
+
+using namespace ogdyn;
+
+const IntegerMatrix& IntegerMatrix::operator=(const IntegerMatrix& im)
+{
+	if (nr != im.nr || nc != im.nc)
+		throw DynareException(__FILE__,__LINE__,
+							  "Matrices have different dimensions in IntegerMatrix::operator=");
+	memcpy(data, im.data, nr*nc*sizeof(int));
+	return *this;
+}
+
+const IntegerArray3& IntegerArray3::operator=(const IntegerArray3& ia3)
+{
+	if (n1 != ia3.n1 || n2 != ia3.n2 || n3 != ia3.n3)
+		throw DynareException(__FILE__,__LINE__,
+							  "Arrays have different dimensions in IntegerArray3::operator=");
+	memcpy(data, ia3.data, n1*n2*n3*sizeof(int));
+	return *this;
+}
+
+
+PlannerBuilder::PlannerBuilder(DynareModel& m, const Tvarset& yyset,
+							   const Teqset& ffset)
+	: yset(), fset(ffset), model(m),
+	  tb(model.t_plobjective), tbeta(model.t_pldiscount),
+	  maxlead(model.atoms.get_maxlead()),
+	  minlag(model.atoms.get_minlag()),
+	  diff_b(yyset.size(), 1-minlag),
+	  diff_f(yyset.size(), fset.size(), 1+maxlead-minlag),
+	  static_atoms(),
+	  static_tree(),
+	  diff_b_static(yyset.size(), 1-minlag),
+	  diff_f_static(yyset.size(), fset.size(), 1+maxlead-minlag)
+{
+	info.num_new_terms -= model.getParser().getTree().get_num_op();
+
+	fill_yset(m.atoms.get_name_storage(), yyset);
+
+	add_derivatives_of_b();
+	add_derivatives_of_f();
+	shift_derivatives_of_b();
+	shift_derivatives_of_f();
+	beta_multiply_b();
+	beta_multiply_f();
+	make_static_version();
+	lagrange_mult_f();
+	form_equations();
+
+	info.num_new_terms += model.getParser().getTree().get_num_op();
+}
+
+PlannerBuilder::PlannerBuilder(const PlannerBuilder& pb, ogdyn::DynareModel& m)
+	: yset(), fset(pb.fset), model(m),
+	  tb(pb.tb), tbeta(pb.tbeta),
+	  maxlead(pb.maxlead), minlag(pb.minlag),
+	  diff_b(pb.diff_b), diff_f(pb.diff_f),
+	  static_atoms(pb.static_atoms),
+	  static_tree(pb.static_tree),
+	  diff_b_static(pb.diff_b_static),
+	  diff_f_static(pb.diff_f_static),
+	  aux_map(), static_aux_map()
+	
+{
+	fill_yset(m.atoms.get_name_storage(), pb.yset);
+	fill_aux_map(m.atoms.get_name_storage(), pb.aux_map, pb.static_aux_map);
+}
+
+void PlannerBuilder::add_derivatives_of_b()
+{
+	int yi = 0;
+	for (Tvarset::const_iterator yname = yset.begin();
+		 yname != yset.end(); ++yname, yi++)
+		for (int ll = minlag; ll <= 0; ll++) {
+			int yt = model.atoms.index(*yname, ll);
+			if (yt != -1)
+				diff_b(yi, ll-minlag) = model.eqs.add_derivative(tb, yt);
+			else
+				diff_b(yi, ll-minlag) = ogp::OperationTree::zero;
+		}
+}
+
+void PlannerBuilder::add_derivatives_of_f()
+{
+	int yi = 0;
+	for (Tvarset::const_iterator yname = yset.begin();
+		 yname != yset.end(); ++yname, yi++)
+		for (unsigned int fi = 0; fi < fset.size(); fi++)
+			for (int ll = minlag; ll <= maxlead; ll++) {
+				int yt = model.atoms.index(*yname, ll);
+				if (yt != -1)
+					diff_f(yi, fi, ll-minlag) =
+						model.eqs.add_derivative(model.eqs.formula(fset[fi]), yt);
+				else
+					diff_f(yi, fi, ll-minlag) = ogp::OperationTree::zero;
+			}
+}
+
+void PlannerBuilder::shift_derivatives_of_b()
+{
+	map<int,int> subst;
+	for (int yi = 0; yi < diff_b.nrows(); yi++)
+		for (int ll = minlag; ll < 0; ll++)
+			if (diff_b(yi, ll-minlag) != ogp::OperationTree::zero) {
+				model.variable_shift_map(model.eqs.nulary_of_term(diff_b(yi, ll-minlag)),
+										 -ll, subst);
+				diff_b(yi, ll-minlag) = model.eqs.add_substitution(diff_b(yi, ll-minlag), subst);
+			}
+}
+
+void PlannerBuilder::shift_derivatives_of_f()
+{
+	map<int,int> subst;
+	for (int yi = 0; yi < diff_f.dim1(); yi++)
+		for (int fi = 0; fi < diff_f.dim2(); fi++) {
+			// first do it leads which are put under expectation before t: no problem
+			for (int ll = 0; ll <= maxlead; ll++)
+				if (diff_f(yi, fi, ll-minlag) != ogp::OperationTree::zero) {
+					model.variable_shift_map(model.eqs.nulary_of_term(diff_f(yi, fi, ll-minlag)),
+											 -ll, subst);
+					diff_f(yi, fi, ll-minlag) =
+						model.eqs.add_substitution(diff_f(yi, fi, ll-minlag), subst);
+				}
+			// now do it for lags, these are put as leads under
+			// expectations after time t, so we have to introduce
+			// auxiliary variables at time t, and make leads of them here
+			for (int ll = minlag; ll < 0; ll++) {
+				int ft = diff_f(yi, fi, ll-minlag);
+				if (ft != ogp::OperationTree::zero) {
+					// if the ft term has a lead, than we need to
+					// introduce an auxiliary variable z_t, define it
+					// as E_t[ft] and put z_{t-ll} to the
+					// equation. Otherwise, we just put leaded ft to
+					// the equation directly.
+					int ft_maxlead, ft_minlag;
+					model.termspan(ft, ft_maxlead, ft_minlag);
+					if (ft_maxlead > 0) {
+						// make an auxiliary variable
+						char name[100];
+						sprintf(name, "AUX_%d_%d_%d", yi, fset[fi], -ll);
+						model.atoms.register_uniq_endo(name);
+						info.num_aux_variables++;
+						int taux = model.eqs.add_nulary(name);
+						sprintf(name, "AUX_%d_%d_%d(%d)", yi, fset[fi], -ll, -ll);
+						int taux_leaded = model.eqs.add_nulary(name);
+						// put aux_leaded to the equation
+						diff_f(yi, fi, ll-minlag) = taux_leaded;
+						// save auxiliary variable and the term
+						aux_map.insert(Tsubstmap::value_type(model.atoms.name(taux), ft));
+					} else {
+						// no auxiliary variable is needed and the
+						// term ft can be leaded in place
+						model.variable_shift_map(model.eqs.nulary_of_term(ft), -ll, subst);
+						diff_f(yi, fi, ll-minlag) =
+							model.eqs.add_substitution(ft, subst);
+					}
+				}
+			}
+		}
+}
+
+void PlannerBuilder::beta_multiply_b()
+{
+	int beta_pow = ogp::OperationTree::one;
+	for (int ll = 0; ll >= minlag; ll--,
+			 beta_pow = model.eqs.add_binary(ogp::TIMES, beta_pow, tbeta))
+		for (int yi = 0; yi < diff_b.nrows(); yi++)
+			if (diff_b(yi, ll-minlag) != ogp::OperationTree::zero)
+				diff_b(yi, ll-minlag) =
+					model.eqs.add_binary(ogp::TIMES, beta_pow, diff_b(yi, ll-minlag));
+}
+
+void PlannerBuilder::beta_multiply_f()
+{
+	int beta_pow = ogp::OperationTree::one;
+	for (int ll = 0; ll <= maxlead; ll++,
+			 beta_pow = model.eqs.add_binary(ogp::DIVIDE, beta_pow, tbeta))
+		for (int yi = 0; yi < diff_f.dim1(); yi++)
+			for (int fi = 0; fi < diff_f.dim2(); fi++)
+				if (diff_f(yi, fi, ll-minlag) != ogp::OperationTree::zero)
+					diff_f(yi, fi, ll-minlag) =
+						model.eqs.add_binary(ogp::TIMES, beta_pow, diff_f(yi, fi, ll-minlag));
+
+	beta_pow = ogp::OperationTree::one;
+	for (int ll = 0; ll >= minlag; ll--,
+			 beta_pow = model.eqs.add_binary(ogp::TIMES, beta_pow, tbeta))
+		for (int yi = 0; yi < diff_f.dim1(); yi++)
+			for (int fi = 0; fi < diff_f.dim2(); fi++)
+				if (diff_f(yi, fi, ll-minlag) != ogp::OperationTree::zero)
+					diff_f(yi, fi, ll-minlag) =
+						model.eqs.add_binary(ogp::TIMES, beta_pow, diff_f(yi, fi, ll-minlag));
+}
+
+void PlannerBuilder::make_static_version()
+{
+	// map holding substitutions from dynamic to static
+	ogp::StaticFineAtoms::Tintintmap tmap;
+
+	// fill static atoms with outer ordering
+	static_atoms.import_atoms(model.atoms, static_tree, tmap);
+
+	// go through diff_b and fill diff_b_static
+	for (int ll = minlag; ll <= 0; ll++)
+		for (int yi = 0; yi < diff_b.nrows(); yi++)
+			diff_b_static(yi, ll-minlag) =
+				static_tree.add_substitution(diff_b(yi, ll-minlag),
+											 tmap,  model.eqs.getTree());
+
+	// go through diff_f and fill diff_f_static
+	for (int ll = minlag; ll <= maxlead; ll++)
+		for (int yi = 0; yi < diff_f.dim1(); yi++)
+			for (int fi = 0; fi < diff_f.dim2(); fi++)
+				diff_f_static(yi, fi, ll-minlag) =
+					static_tree.add_substitution(diff_f(yi, fi, ll-minlag),
+												 tmap, model.eqs.getTree());
+
+	// go through aux_map and fill static_aux_map
+	for (Tsubstmap::const_iterator it = aux_map.begin();
+		 it != aux_map.end(); ++it) {
+		int tstatic = static_tree.add_substitution((*it).second, tmap, model.eqs.getTree());
+		const char* name = static_atoms.get_name_storage().query((*it).first);
+		static_aux_map.insert(Tsubstmap::value_type(name, tstatic));
+	}
+}
+
+
+void PlannerBuilder::lagrange_mult_f()
+{
+	// register multipliers
+	char mult_name[100];
+	for (int fi = 0; fi < diff_f.dim2(); fi++) {	
+		sprintf(mult_name, "MULT%d", fset[fi]);
+		model.atoms.register_uniq_endo(mult_name);
+		info.num_lagrange_mults++;
+	}
+	// multiply with the multipliers
+	for (int yi = 0; yi < diff_f.dim1(); yi++)
+		for (int fi = 0; fi < diff_f.dim2(); fi++)
+			for (int ll = minlag; ll <= maxlead; ll++)
+				if (diff_f(yi, fi, ll-minlag) != ogp::OperationTree::zero) {
+					sprintf(mult_name, "MULT%d(%d)", fset[fi], -ll);
+					int tm = model.eqs.add_nulary(mult_name);
+					diff_f(yi, fi, ll-minlag) =
+						model.eqs.add_binary(ogp::TIMES, tm, diff_f(yi, fi, ll-minlag));
+				}
+}
+
+void PlannerBuilder::form_equations()
+{
+	// add planner's FOCs
+	for (int yi = 0; yi < diff_f.dim1(); yi++) {
+		int eq = ogp::OperationTree::zero;
+		for (int ll = minlag; ll <= 0; ll++)
+			eq = model.eqs.add_binary(ogp::PLUS, eq, diff_b(yi, ll-minlag));
+		for (int fi = 0; fi < diff_f.dim2(); fi++)
+			for (int ll = minlag; ll <= maxlead; ll++)
+				eq = model.eqs.add_binary(ogp::PLUS, eq, diff_f(yi, fi, ll-minlag));
+		model.eqs.add_formula(eq);
+	}
+
+	// add equations for auxiliary variables
+	for (Tsubstmap::const_iterator it = aux_map.begin();
+		 it != aux_map.end(); ++it) {
+		int t = model.atoms.index((*it).first, 0);
+		model.eqs.add_formula(model.eqs.add_binary(ogp::MINUS, t, (*it).second));
+	}
+}
+
+void PlannerBuilder::fill_yset(const ogp::NameStorage& ns,
+							   const PlannerBuilder::Tvarset& yyset)
+{
+	for (Tvarset::const_iterator it = yyset.begin(); it != yyset.end(); ++it)
+		yset.insert(ns.query(*it));
+}
+
+void PlannerBuilder::fill_aux_map(const ogp::NameStorage& ns, const Tsubstmap& aaux_map,
+								  const Tsubstmap& astatic_aux_map)
+{
+	// fill aux_map
+	for (Tsubstmap::const_iterator it = aaux_map.begin();
+		 it != aaux_map.end(); ++it)
+		aux_map.insert(Tsubstmap::value_type(ns.query((*it).first), (*it).second));
+
+	// fill static_aux_map
+	for (Tsubstmap::const_iterator it = astatic_aux_map.begin();
+		 it != astatic_aux_map.end(); ++it)
+		static_aux_map.insert(Tsubstmap::value_type(static_atoms.get_name_storage().query((*it).first),
+													(*it).second));
+}
+
+MultInitSS::MultInitSS(const PlannerBuilder& pb, const Vector& pvals, Vector& yy)
+	: builder(pb), b(builder.diff_b_static.nrows()),
+	  F(builder.diff_f_static.dim1(), builder.diff_f_static.dim2())
+{
+	b.zeros();
+	F.zeros();
+
+	// first evaluate substitutions (auxiliary variables) from the builder
+	ogdyn::DynareStaticSteadySubstitutions dss(builder.model.atoms, builder.static_atoms,
+											   builder.static_tree,
+											   builder.static_aux_map, pvals, yy);
+
+	// gather all the terms from builder.diff_b_static and
+	// builder.diff_f_static to the vector, the ordering is important,
+	// since the index of this vector will have to be decoded to the
+	// position in b and F.
+	vector<int> terms;
+	for (int yi = 0; yi < builder.diff_b_static.nrows(); yi++)
+		for (int l = 0; l < builder.diff_b_static.ncols(); l++)
+			terms.push_back(builder.diff_b_static(yi, l));
+	for (int yi = 0; yi < builder.diff_f_static.dim1(); yi++)
+		for (int fi = 0; fi < builder.diff_f_static.dim2(); fi++)
+			for (int l = 0; l < builder.diff_f_static.dim3(); l++)
+				terms.push_back(builder.diff_f_static(yi, fi, l));
+
+	// evaluate the terms, it will call a series of load(i,res), which
+	// sum the results through lags/leads to b and F
+	DynareStaticSteadyAtomValues dssav(builder.model.atoms, builder.static_atoms, pvals, yy);
+	ogp::FormulaCustomEvaluator fe(builder.static_tree, terms);
+	fe.eval(dssav, *this);
+
+	// solve overdetermined system b+F*lambda=0 => lambda=-(F^T*F)^{-1}*F^T*b
+	GeneralMatrix FtF(F, "transpose", F);
+	Vector lambda(builder.diff_f_static.dim2());
+	F.multVecTrans(0.0, lambda, -1.0, b);
+	ConstGeneralMatrix(FtF).multInvLeft(lambda);
+
+	// take values of lambda and put it to yy
+	for (int fi = 0; fi < builder.diff_f_static.dim2(); fi++) {
+		char mult_name[100];
+		sprintf(mult_name, "MULT%d", builder.fset[fi]);
+		int iouter = builder.model.atoms.name2outer_endo(mult_name);
+		int iy = builder.model.atoms.outer2y_endo()[iouter];
+		if (! std::isfinite(yy[iy]))
+			yy[iy] = lambda[fi];
+
+		// go through all substitutions of the multiplier and set them
+		// as well
+		if (builder.model.atom_substs) {
+			const ogp::AtomSubstitutions::Toldnamemap& old2new =
+				builder.model.atom_substs->get_old2new();
+			const ogp::AtomSubstitutions::Toldnamemap::const_iterator it =
+				old2new.find(mult_name);
+			if (it != old2new.end()) {
+				const ogp::AtomSubstitutions::Tshiftnameset& sset = (*it).second;
+				for (ogp::AtomSubstitutions::Tshiftnameset::const_iterator itt = sset.begin();
+					 itt != sset.end(); ++itt) {
+					const char* newname = (*itt).first;
+					int iouter = builder.model.atoms.name2outer_endo(newname);
+					int iy = builder.model.atoms.outer2y_endo()[iouter];
+					if (! std::isfinite(yy[iy]))
+						yy[iy] = lambda[fi];
+				}
+			}
+		}
+	}
+}
+
+void MultInitSS::load(int i, double res)
+{
+	// we can afford it, since the evaluator sets res to exact zero if
+	// the term is zero
+	if (res == 0)
+		return;
+	// decode i and add to either b or F 
+	if (i < builder.diff_b_static.nrows()*builder.diff_b_static.ncols()) {
+		// add to b
+		b[i / builder.diff_b_static.ncols()] += res;
+	} else {
+		// add to F
+		i -= builder.diff_b_static.nrows()*builder.diff_b_static.ncols();
+		int yifi = i / builder.diff_f_static.dim3();
+		int yi = yifi / builder.diff_f_static.dim2();
+		int fi = yifi % builder.diff_f_static.dim2();
+		F.get(yi, fi) += res;
+	}
+}
diff --git a/dynare++/src/planner_builder.h b/dynare++/src/planner_builder.h
new file mode 100644
index 0000000000000000000000000000000000000000..c8c730aac4a66439a2e456ba3e7f38894a9f20c2
--- /dev/null
+++ b/dynare++/src/planner_builder.h
@@ -0,0 +1,281 @@
+// Copyright (C) 2006, Ondra Kamenik
+
+// $Id$
+
+#ifndef PLANNER_BUILDER_H
+#define PLANNER_BUILDER_H
+
+#include "dynare_model.h"
+
+namespace ogdyn {
+
+	using __gnu_cxx::hash_set;
+	using std::map;
+	using std::vector;
+
+	/** This is a two dimensional array of integers. Nothing
+	 * difficult. */ 
+	class IntegerMatrix {
+	protected:
+		/** Number of rows. */
+		int nr;
+		/** Number of columns. */
+		int nc;
+		/** The pointer to the data. */
+		int* data;
+	public:
+		/** Construct uninitialized array. */
+		IntegerMatrix(int nrr, int ncc)
+			: nr(nrr), nc(ncc), data(new int[nr*nc]) {}
+		/** Copy constructor. */
+		IntegerMatrix(const IntegerMatrix& im)
+			: nr(im.nr), nc(im.nc), data(new int[nr*nc])
+			{memcpy(data, im.data, nr*nc*sizeof(int));}
+		virtual ~IntegerMatrix()
+			{delete [] data;}
+		/** Assignment operator. It can only assing array with the
+		 * same dimensions. */
+		const IntegerMatrix& operator=(const IntegerMatrix& im);
+		int& operator()(int i, int j)
+			{return data[i+j*nr];}
+		const int& operator()(int i, int j) const
+			{return data[i+j*nr];}
+		int nrows() const
+			{return nr;}
+		int ncols() const
+			{return nc;}
+	};
+
+	/** The three dimensional array of integers. Nothing difficult. */
+	class IntegerArray3 {
+	protected:
+		/** First dimension. */
+		int n1;
+		/** Second dimension. */
+		int n2;
+		/** Third dimension. */
+		int n3;
+		/** The data. */
+		int* data;
+	public:
+		/** Constrcut unitialized array. */
+		IntegerArray3(int nn1, int nn2, int nn3)
+			: n1(nn1), n2(nn2), n3(nn3), data(new int[n1*n2*n3]) {}
+		/** Copy constructor. */
+		IntegerArray3(const IntegerArray3& ia3)
+			: n1(ia3.n1), n2(ia3.n2), n3(ia3.n3), data(new int[n1*n2*n3])
+			{memcpy(data, ia3.data, n1*n2*n3*sizeof(int));}
+		virtual ~IntegerArray3()
+			{delete [] data;}
+		/** Assignment operator assigning the arrays with the same dimensions. */
+		const IntegerArray3& operator=(const IntegerArray3& ia3);
+		int& operator()(int i, int j, int k)
+			{return data[i+j*n1+k*n1*n2];}
+		const int& operator()(int i, int j, int k) const
+			{return data[i+j*n1+k*n1*n2];}
+		int dim1() const
+			{return n1;}
+		int dim2() const
+			{return n2;}
+		int dim3() const
+			{return n3;}
+	};
+
+	/** This struct encapsulates information about the building of a
+	 * planner's problem. */
+	struct PlannerInfo {
+		int num_lagrange_mults;
+		int num_aux_variables;
+		int num_new_terms;
+		PlannerInfo()
+			: num_lagrange_mults(0),
+			  num_aux_variables(0),
+			  num_new_terms(0) {}
+	};
+
+	class MultInitSS;
+
+	/** This class builds the first order conditions of the social
+	 * planner problem with constraints being the equations in the
+	 * model. The model is non-const parameter to the constructor
+	 * which adds appropriate FOCs to the system. It also allows for
+	 * an estimation of the lagrange multipliers given all other
+	 * endogenous variables of the static system. For this purpose we
+	 * need to create static atoms and static versions of all the tree
+	 * index matrices. The algorithm and algebra are documented in
+	 * dynare++-ramsey.pdf. */  
+	class PlannerBuilder {
+		friend class MultInitSS;
+	public:
+		/** Type for a set of variable names. */
+		typedef hash_set<const char*> Tvarset;
+		/** Type for a set of equations. An equation is identified by
+		 * an index to an equation in the equation vector given by
+		 * DynareModel::eqs. The tree index of the i-th formula is
+		 * retrieved as DynareModel::egs.formula(i). */
+		typedef vector<int> Teqset;
+	protected:
+		/** This is a set of variables wrt which the planner
+		 * optimizes. These could be all endogenous variables, but it
+		 * is beneficial to exclude all variables which are
+		 * deterministic transformations of past exogenous variables,
+		 * since the planner cannot influence them. This could save a
+		 * few equations. This is not changed after it is constructed,
+		 * but it is constructed manually, so it cannot be declared as
+		 * const. */
+		Tvarset yset;
+		/** These are the equation indices constituing the constraints
+		 * for the planner. Again, it is beneficial to exclude all
+		 * equations defining exogenous variables excluded from
+		 * yset. */
+		const Teqset fset;
+		/** Reference to the model. */ 
+		ogdyn::DynareModel& model;
+		/** Tree index of the planner objective. */
+		int tb;
+		/** Tree index of the planner discount parameter. */
+		int tbeta;
+		/** The maximum lead in the model including the planner's
+		 * objective before building the planner's FOCs. */
+		const int maxlead;
+		/** The minimum lag in the model including the planner's objective
+		 * before building the planner's FOCs. */
+		const int minlag;
+		/** Tree indices of formulas in the planner FOCs involving
+		 * derivatives of the planner's objective. Rows correspond to the
+		 * endogenous variables, columns correspond to lags in the
+		 * objective function. The contents of the matrix will evolve as
+		 * the algorithm proceeds. */
+		IntegerMatrix diff_b;
+		/** Tree indices of formulas in the planner FOCs involving
+		 * derivatives of the model equations (constraints). The first
+		 * dimension corresponds to endogenous variables, the second to
+		 * the constraints, the third to lags or leads of endogenous
+		 * variables in the constraints. The contents of the array will
+		 * evolve as the algorithm proceeds.*/
+		IntegerArray3 diff_f;
+		/** Static version of the model atoms. It is needed to build
+		 * static version of diff_b and diff_f. */
+		ogp::StaticFineAtoms static_atoms;
+		/** Static version of all the trees of diff_b and diff_f build
+		 * over static_atoms. */
+		ogp::OperationTree static_tree;
+		/** Tree indices of static version of diff_b over static_atoms and static_tree. */
+		IntegerMatrix diff_b_static;
+		/** Tree indices of static version of diff_f over static_atoms
+		 * and static_tree. This member is created before calling
+		 * lagrange_mult_f(), so it does not contain the
+		 * multiplication with the lagrange multipliers. */
+		IntegerArray3 diff_f_static;
+		/** Auxiliary variables mapping. During the algorithm, some
+		 * auxiliary variables for the terms might be created, so we
+		 * remember their names and tree indices of the terms. This
+		 * maps a name to the tree index of an expression equal to the
+		 * auxiliary variable at time zero. The auxiliary variables
+		 * names point to the dynamic atoms storage, tree inidices to
+		 * the dynamic model tree. */
+		Tsubstmap aux_map;
+		/** Static version of aux_map. The names point to static_atoms
+		 * storage, the tree indices to the static_tree. */
+		Tsubstmap static_aux_map;
+		/** Information about the number of various things. */
+		PlannerInfo info;
+	public:
+		/** Build the planner problem for the given model optimizing
+		 * through the given endogenous variables with the given
+		 * constraints. We allow for a selection of a subset of
+		 * equations and variables in order to eliminate exogenous
+		 * predetermined process which cannot be influenced by the
+		 * social planner. */
+		PlannerBuilder(ogdyn::DynareModel& m, const Tvarset& yyset,
+					   const Teqset& ffset);
+		/** Construct a copy of the builder with provided model, which
+		 * is supposed to be the copy of the model in the builder. */
+		PlannerBuilder(const PlannerBuilder& pb, ogdyn::DynareModel& m);
+		/** Return the information. */
+		const PlannerInfo& get_info() const
+			{return info;}
+	protected:
+		/** Differentiate the planner objective wrt endogenous
+		 * variables with different lags. */
+		void add_derivatives_of_b();
+		/** Differentiate the constraints wrt endogenous variables
+		 * with different lags and leads. */
+		void add_derivatives_of_f();
+		/** Shift derivatives of diff_b. */
+		void shift_derivatives_of_b();
+		/** Shift derivatives of diff_ff. */
+		void shift_derivatives_of_f();
+		/** Multiply with the discount factor terms in diff_b. */
+		void beta_multiply_b();
+		/** Multiply with the discount factor terms in diff_f. */
+		void beta_multiply_f();
+		/** Fill static_atoms and static_tree and build diff_b_static,
+		 * diff_f_static and aux_map_static with static versions of diff_b,
+		 * diff_f and aux_map. */
+		void make_static_version();
+		/** Multiply diff_f with Langrange multipliers. */
+		void lagrange_mult_f();
+		/** Add the equations to the mode, including equation for auxiliary variables. */
+		void form_equations();
+	private:
+		/** Fill yset for a given yyset and given name storage. */
+		void fill_yset(const ogp::NameStorage& ns, const Tvarset& yyset);
+		/** Fill aux_map and aux_map_static for a given aaux_map and
+		 * aaux_map_static for a given storage of dynamic atoms (used
+		 * for aux_map) and static atoms storage from this object for
+		 * aux_map_static. */
+		void fill_aux_map(const ogp::NameStorage& ns, const Tsubstmap& aaux_map,
+						  const Tsubstmap& astatic_aux_map);
+		/** Avoid copying from only PlannerBuilder. */
+		PlannerBuilder(const PlannerBuilder& pb);
+  	};
+
+	/** This class only calculates for the given initial guess of
+	 * endogenous variables, initial guess of the Langrange
+	 * multipliers of the social planner problem yielding the least
+	 * square error. It is used by just calling its constructor. The
+	 * constructor takes non-const reference to the vector of
+	 * endogenous variables, calculates lambdas and put the values of
+	 * lambdas to the vector. The algbera is found in
+	 * dynare++-ramsey.pdf.
+	 *
+	 * The code can be run only after the parsing has been finished in
+	 * atoms. */
+	class MultInitSS : public ogp::FormulaEvalLoader {
+	protected:
+		/** The constant reference to the builder. */
+		const PlannerBuilder& builder;
+		/** The constant term of the problem. Its length is the number
+		 * of endogenous variable wrt the planner optimizes. */
+		Vector b;
+		/** The matrix of the overdetermined problem. The number of
+		 * rows is equal to the number of endogenous variables wrt
+		 * which the planner optimizes, the number of columns is equal
+		 * to the number of Langrange multipliers which is equal to
+		 * the number of constraints which is smaller than the number
+		 * of endogenous variables. Hence the system b+F*lambda=0 is
+		 * overdetermined. */
+		GeneralMatrix F;
+	public:
+		/** The constructor of the object which does everything. Its
+		 * main goal is to update yy. Note that if an item of yy
+		 * corresponding to a lagrange multiplier is already set, it
+		 * is not reset. */
+		MultInitSS(const PlannerBuilder& pb, const Vector& pvals, Vector& yy);
+		/** This loads evaluated parts of b or F and decodes i and
+		 * advances b or F depending on the decoded i. The decoding is
+		 * dependent on the way how the terms of builder.diff_b and
+		 * builder.diff_f_save have been put the the
+		 * ogp::FormulaCustomEvaluator. This is documented in the code
+		 * of the constructor. */
+		void load(int i, double res);
+	};
+};
+
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/BlockDiagonal.cpp b/dynare++/sylv/cc/BlockDiagonal.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..89ddc70c539e27ac1ee068695fb74cc784e001fa
--- /dev/null
+++ b/dynare++/sylv/cc/BlockDiagonal.cpp
@@ -0,0 +1,321 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/BlockDiagonal.cpp,v 1.1.1.1 2004/06/04 13:00:20 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "BlockDiagonal.h"
+
+#include <stdio.h>
+#include <string.h>
+
+BlockDiagonal::BlockDiagonal(const double* d, int d_size)
+	: QuasiTriangular(d, d_size),
+	  row_len(new int[d_size]), col_len(new int[d_size])
+{
+	for (int i = 0; i < d_size; i++) {
+		row_len[i] = d_size;
+		col_len[i] = 0;
+	}
+}
+
+BlockDiagonal::BlockDiagonal(const QuasiTriangular& t)
+	: QuasiTriangular(t),
+	  row_len(new int[t.numRows()]), col_len(new int[t.numRows()])
+{
+	for (int i = 0; i < t.numRows(); i++) {
+		row_len[i] = t.numRows();
+		col_len[i] = 0;
+	}
+}
+
+BlockDiagonal::BlockDiagonal(int p, const BlockDiagonal& b)
+	: QuasiTriangular(p, b),
+	  row_len(new int[b.numRows()]), col_len(new int[b.numRows()])
+{
+	memcpy(row_len, b.row_len, b.numRows()*sizeof(int));
+	memcpy(col_len, b.col_len, b.numRows()*sizeof(int));
+}
+
+BlockDiagonal::BlockDiagonal(const BlockDiagonal& b)
+	: QuasiTriangular(b),
+	  row_len(new int[b.numRows()]), col_len(new int[b.numRows()])
+{
+	memcpy(row_len, b.row_len, b.numRows()*sizeof(int));
+	memcpy(col_len, b.col_len, b.numRows()*sizeof(int));
+}
+
+/* put zeroes to right upper submatrix whose first column is defined
+ * by 'edge' */
+void BlockDiagonal::setZerosToRU(diag_iter edge)
+{
+	int iedge = (*edge).getIndex();
+	for (int i = 0; i < iedge; i++)
+		for (int j = iedge; j < numCols(); j++)
+			get(i,j) = 0.0;
+}
+
+/* Updates row_len and col_len so that there are zeroes in upper right part, this
+ * |T1 0 |
+ * |0  T2|. The first column of T2 is given by diagonal iterator 'edge'.
+
+ * Note the semantics of row_len and col_len. row_len[i] is distance
+ * of the right-most non-zero element of i-th row from the left, and
+ * col_len[j] is distance of top-most non-zero element of j-th column
+ * to the top. (First element has distance 1).
+ */
+void BlockDiagonal::setZeroBlockEdge(diag_iter edge)
+{
+	setZerosToRU(edge);
+
+	int iedge = (*edge).getIndex();
+	for (diag_iter run = diag_begin(); run != edge; ++run) {
+		int ind = (*run).getIndex();
+		if (row_len[ind] > iedge) {
+			row_len[ind] = iedge;
+			if (!(*run).isReal())
+				row_len[ind+1] = iedge;
+		} 
+	}
+	for (diag_iter run = edge; run != diag_end(); ++run) {
+		int ind = (*run).getIndex();
+		if (col_len[ind] < iedge) {
+			col_len[ind] = iedge;
+			if (!(*run).isReal())
+				col_len[ind+1] = iedge;
+		}
+	}
+}
+
+BlockDiagonal::const_col_iter
+BlockDiagonal::col_begin(const DiagonalBlock& b) const
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return const_col_iter(&getData()[jbar*d_size + col_len[jbar]], d_size,
+						  b.isReal(), col_len[jbar]);
+}
+
+BlockDiagonal::col_iter
+BlockDiagonal::col_begin(const DiagonalBlock& b)
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return col_iter(&getData()[jbar*d_size + col_len[jbar]], d_size,
+					b.isReal(), col_len[jbar]);
+}
+
+
+BlockDiagonal::const_row_iter
+BlockDiagonal::row_end(const DiagonalBlock& b) const
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return const_row_iter(&getData()[d_size*row_len[jbar]+jbar], d_size,
+						  b.isReal(), row_len[jbar]);
+}
+
+BlockDiagonal::row_iter
+BlockDiagonal::row_end(const DiagonalBlock& b)
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return row_iter(&getData()[d_size*row_len[jbar]+jbar], d_size,
+					b.isReal(), row_len[jbar]);
+}
+
+int BlockDiagonal::getNumZeros() const
+{
+	int sum = 0;
+	for (int i = 0; i < diagonal.getSize(); i++) {
+		sum += diagonal.getSize() - row_len[i];
+	}
+	return sum;
+}
+
+QuasiTriangular::const_diag_iter
+BlockDiagonal::findBlockStart(const_diag_iter from) const
+{
+	if (from != diag_end()) {
+		++from;
+		while (from != diag_end() &&
+			   col_len[(*from).getIndex()] != (*from).getIndex())
+			++from;
+	}
+	return from;
+}
+
+int BlockDiagonal::getLargestBlock() const
+{
+	int largest = 0;
+	const_diag_iter start = diag_begin();
+	const_diag_iter end = findBlockStart(start);
+	while (start != diag_end()) {
+		int si = (*start).getIndex();
+		int ei = diagonal.getSize();
+		if (end != diag_end())
+			ei = (*end).getIndex();
+		if (largest < ei-si)
+			largest = ei-si;
+		start = end;
+		end = findBlockStart(start);
+	}
+	return largest;
+}
+
+
+void BlockDiagonal::savePartOfX(int si, int ei, const KronVector& x, Vector& work)
+{
+	for (int i = si; i < ei; i++) {
+		ConstKronVector xi(x, i);
+		Vector target(work, (i-si)*xi.length(), xi.length());
+		target = xi;
+	}
+}
+
+void BlockDiagonal::multKronBlock(const_diag_iter start, const_diag_iter end,
+								  KronVector& x, Vector& work) const
+{
+	int si = (*start).getIndex();
+	int ei = diagonal.getSize();
+	if (end != diag_end())
+		ei = (*end).getIndex();
+	savePartOfX(si, ei, x, work);
+
+	for (const_diag_iter di = start; di != end; ++di) {
+		int jbar = (*di).getIndex();
+		if ((*di).isReal()) {
+			KronVector xi(x, jbar);
+			xi.zeros();
+			Vector wi(work, (jbar-si)*xi.length(), xi.length());
+			xi.add(*((*di).getAlpha()), wi);
+			for (const_row_iter ri = row_begin(*di); ri != row_end(*di); ++ri) {
+				int col = ri.getCol();
+				Vector wj(work, (col-si)*xi.length(), xi.length());
+				xi.add(*ri, wj);
+			}
+		} else {
+			KronVector xi(x, jbar);
+			KronVector xii(x, jbar+1);
+			xi.zeros();
+			xii.zeros();
+			Vector wi(work, (jbar-si)*xi.length(), xi.length());
+			Vector wii(work, (jbar+1-si)*xi.length(), xi.length());
+			xi.add(*((*di).getAlpha()), wi);
+			xi.add((*di).getBeta1(), wii);
+			xii.add((*di).getBeta2(), wi);
+			xii.add(*((*di).getAlpha()), wii);
+			for (const_row_iter ri = row_begin(*di); ri != row_end(*di); ++ri) {
+				int col = ri.getCol();
+				Vector wj(work, (col-si)*xi.length(), xi.length());
+				xi.add(ri.a(), wj);
+				xii.add(ri.b(), wj);
+			}
+		}
+	}
+}
+
+void BlockDiagonal::multKronBlockTrans(const_diag_iter start, const_diag_iter end,
+									   KronVector& x, Vector& work) const
+{
+	int si = (*start).getIndex();
+	int ei = diagonal.getSize();
+	if (end != diag_end())
+		ei = (*end).getIndex();
+	savePartOfX(si, ei, x, work);
+
+	for (const_diag_iter di = start; di != end; ++di) {
+		int jbar = (*di).getIndex();
+		if ((*di).isReal()) {
+			KronVector xi(x, jbar);
+			xi.zeros();
+			Vector wi(work, (jbar-si)*xi.length(), xi.length());
+			xi.add(*((*di).getAlpha()), wi);
+			for (const_col_iter ci = col_begin(*di); ci != col_end(*di); ++ci) {
+				int row = ci.getRow();
+				Vector wj(work, (row-si)*xi.length(), xi.length());
+				xi.add(*ci, wj);
+			}
+		} else {
+			KronVector xi(x, jbar);
+			KronVector xii(x, jbar+1);
+			xi.zeros();
+			xii.zeros();
+			Vector wi(work, (jbar-si)*xi.length(), xi.length());
+			Vector wii(work, (jbar+1-si)*xi.length(), xi.length());
+			xi.add(*((*di).getAlpha()), wi);
+			xi.add((*di).getBeta2(), wii);
+			xii.add((*di).getBeta1(), wi);
+			xii.add(*((*di).getAlpha()), wii);
+			for (const_col_iter ci = col_begin(*di); ci != col_end(*di); ++ci) {
+				int row = ci.getRow();
+				Vector wj(work, (row-si)*xi.length(), xi.length());
+				xi.add(ci.a(), wj);
+				xii.add(ci.b(), wj);
+			}
+		}
+	}
+}
+
+void BlockDiagonal::multKron(KronVector& x) const
+{
+	int largest = getLargestBlock();
+	Vector work(largest*x.getN()*power(x.getM(),x.getDepth()-1));
+	const_diag_iter start = diag_begin();
+	const_diag_iter end = findBlockStart(start);
+	while (start != diag_end()) {
+		multKronBlock(start, end, x, work);
+		start = end;
+		end = findBlockStart(start);
+	}
+}
+
+
+void BlockDiagonal::multKronTrans(KronVector& x) const
+{
+	int largest = getLargestBlock();
+	Vector work(largest*x.getN()*power(x.getM(),x.getDepth()-1));
+	const_diag_iter start = diag_begin();
+	const_diag_iter end = findBlockStart(start);
+	while (start != diag_end()) {
+		multKronBlockTrans(start, end, x, work);
+		start = end;
+		end = findBlockStart(start);
+	}
+}
+
+void BlockDiagonal::printInfo() const
+{
+	printf("Block sizes:");
+	int num_blocks = 0;
+	const_diag_iter start = diag_begin();
+	const_diag_iter end = findBlockStart(start);
+	while (start != diag_end()) {
+		int si = (*start).getIndex();
+		int ei = diagonal.getSize();
+		if (end != diag_end())
+			ei = (*end).getIndex();
+		printf(" %d", ei-si);
+		num_blocks++;
+		start = end;
+		end = findBlockStart(start);
+	}
+	printf("\nNum blocks: %d\n", num_blocks);
+	printf("There are %d zeros out of %d\n",
+		   getNumZeros(), getNumOffdiagonal());
+}
+
+int BlockDiagonal::getNumBlocks() const
+{
+	int num_blocks = 0;
+	const_diag_iter start = diag_begin();
+	const_diag_iter end = findBlockStart(start);
+	while (start != diag_end()) {
+		num_blocks++;
+		start = end;
+		end = findBlockStart(start);
+	}
+	return num_blocks;
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/BlockDiagonal.h b/dynare++/sylv/cc/BlockDiagonal.h
new file mode 100644
index 0000000000000000000000000000000000000000..c2b94313a21e5b70c88872ba3ca981acf19804c4
--- /dev/null
+++ b/dynare++/sylv/cc/BlockDiagonal.h
@@ -0,0 +1,53 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/BlockDiagonal.h,v 1.1.1.1 2004/06/04 13:00:20 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef BLOCK_DIAGONAL_H
+#define BLOCK_DIAGONAL_H
+
+#include "QuasiTriangular.h"
+
+
+class BlockDiagonal : public QuasiTriangular {
+	int* const row_len;
+	int* const col_len;
+public:
+	BlockDiagonal(const double* d, int d_size);
+	BlockDiagonal(int p, const BlockDiagonal& b);
+	BlockDiagonal(const BlockDiagonal& b);
+	BlockDiagonal(const QuasiTriangular& t);
+	const BlockDiagonal& operator=(const QuasiTriangular& t)
+		{GeneralMatrix::operator=(t); return *this;}
+	const BlockDiagonal& operator=(const BlockDiagonal& b);
+	~BlockDiagonal() {delete [] row_len; delete [] col_len;}
+	void setZeroBlockEdge(diag_iter edge);
+	int getNumZeros() const;
+	int getNumBlocks() const;
+	int getLargestBlock() const;
+	void printInfo() const;
+
+	void multKron(KronVector& x) const;
+	void multKronTrans(KronVector& x) const;
+
+	const_col_iter col_begin(const DiagonalBlock& b) const;
+	col_iter col_begin(const DiagonalBlock& b);
+	const_row_iter row_end(const DiagonalBlock& b) const;
+	row_iter row_end(const DiagonalBlock& b);
+	QuasiTriangular* clone() const
+		{return new BlockDiagonal(*this);}
+private:
+	void setZerosToRU(diag_iter edge);
+	const_diag_iter findBlockStart(const_diag_iter from) const;
+	static void savePartOfX(int si, int ei, const KronVector& x, Vector& work);
+	void multKronBlock(const_diag_iter start, const_diag_iter end,
+					   KronVector& x, Vector& work) const;
+	void multKronBlockTrans(const_diag_iter start, const_diag_iter end,
+							KronVector& x, Vector& work) const;
+};
+
+#endif /* BLOCK_DIAGONAL_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/GeneralMatrix.cpp b/dynare++/sylv/cc/GeneralMatrix.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b9db8d0f82c5bdca299c6a7f1fb015a8374a3d56
--- /dev/null
+++ b/dynare++/sylv/cc/GeneralMatrix.cpp
@@ -0,0 +1,482 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/GeneralMatrix.cpp,v 1.4 2004/11/24 20:41:59 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+
+#include "SylvException.h"
+#include "GeneralMatrix.h"
+
+#include "cppblas.h"
+#include "cpplapack.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <cmath>
+#include <limits>
+
+int GeneralMatrix::md_length = 32;
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& m)
+	: data(m.rows*m.cols), rows(m.rows), cols(m.cols), ld(m.rows)
+{
+	copy(m);
+}
+
+GeneralMatrix::GeneralMatrix(const ConstGeneralMatrix& m)
+	: data(m.rows*m.cols), rows(m.rows), cols(m.cols), ld(m.rows)
+{
+	copy(m);
+}
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& m, const char* dummy)
+	: data(m.rows*m.cols), rows(m.cols), cols(m.rows), ld(m.cols)
+{
+	for (int i = 0; i < m.rows; i++)
+		for (int j = 0; j < m.cols; j++)
+			get(j,i) = m.get(i,j);
+}
+
+GeneralMatrix::GeneralMatrix(const ConstGeneralMatrix& m, const char* dummy)
+	: data(m.rows*m.cols), rows(m.cols), cols(m.rows), ld(m.cols)
+{
+	for (int i = 0; i < m.rows; i++)
+		for (int j = 0; j < m.cols; j++)
+			get(j,i) = m.get(i,j);
+}
+
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& m, int i, int j, int nrows, int ncols)
+	: data(nrows*ncols), rows(nrows), cols(ncols), ld(nrows)
+{
+	copy(m, i, j);
+}
+
+GeneralMatrix::GeneralMatrix(GeneralMatrix& m, int i, int j, int nrows, int ncols)
+	: data(m.base()+m.ld*j+i, m.ld*(ncols-1)+nrows), rows(nrows), cols(ncols), ld(m.ld)
+{}
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& a, const GeneralMatrix& b)
+	: data(a.rows*b.cols), rows(a.rows), cols(b.cols), ld(a.rows)
+{
+	gemm("N", a, "N", b, 1.0, 0.0);
+}
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& a, const GeneralMatrix& b, const char* dum)
+	: data(a.rows*b.rows), rows(a.rows), cols(b.rows), ld(a.rows)
+{
+	gemm("N", a, "T", b, 1.0, 0.0);
+}
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& a, const char* dum, const GeneralMatrix& b)
+	: data(a.cols*b.cols), rows(a.cols), cols(b.cols), ld(a.cols)
+{
+	gemm("T", a, "N", b, 1.0, 0.0);
+}
+
+GeneralMatrix::GeneralMatrix(const GeneralMatrix& a, const char* dum1,
+							 const GeneralMatrix& b, const char* dum2)
+	: data(a.cols*b.rows), rows(a.cols), cols(b.rows), ld(a.cols)
+{
+	gemm("T", a, "T", b, 1.0, 0.0);
+}
+
+
+
+GeneralMatrix::~GeneralMatrix()
+{
+}
+
+
+
+void GeneralMatrix::place(const ConstGeneralMatrix& m, int i, int j)
+{
+	if (i + m.numRows() > numRows() ||
+		j + m.numCols() > numCols())
+		throw SYLV_MES_EXCEPTION("Bad submatrix placement, matrix dimensions exceeded.");
+
+	GeneralMatrix tmpsub(*this, i, j, m.numRows(), m.numCols());
+	tmpsub.copy(m);
+}
+
+void GeneralMatrix::mult(const ConstGeneralMatrix& a, const ConstGeneralMatrix& b)
+{
+	gemm("N", a, "N", b, 1.0, 0.0);
+}
+
+void GeneralMatrix::multAndAdd(const ConstGeneralMatrix& a, const ConstGeneralMatrix& b,
+							   double mult)
+{
+	gemm("N", a, "N", b, mult, 1.0);
+}
+
+void GeneralMatrix::multAndAdd(const ConstGeneralMatrix& a, const ConstGeneralMatrix& b,
+							   const char* dum, double mult)
+{
+	gemm("N", a, "T", b, mult, 1.0);
+}
+
+void GeneralMatrix::multAndAdd(const ConstGeneralMatrix& a, const char* dum,
+							   const ConstGeneralMatrix& b, double mult)
+{
+	gemm("T", a, "N", b, mult, 1.0);
+}
+
+void GeneralMatrix::multAndAdd(const ConstGeneralMatrix& a, const char* dum1,
+							   const ConstGeneralMatrix& b, const char* dum2, double mult)
+{
+	gemm("T", a, "T", b, mult, 1.0);
+}
+
+void GeneralMatrix::addOuter(const ConstVector& a, double mult)
+{
+	if (numRows() != numCols())
+		throw SYLV_MES_EXCEPTION("Matrix is not square in GeneralMatrix::addOuter.");
+	if (numRows() != a.length())
+		throw SYLV_MES_EXCEPTION("Wrong length of a vector in GeneralMatrix::addOuter.");
+
+	// since BLAS dsyr (symmetric rank 1 update) assumes symmetricity, we do this manually
+	for (int i = 0; i < numRows(); i++)
+		for (int j = i; j < numRows(); j++) {
+			double s = mult*a[i]*a[j];
+			get(i,j) = get(i,j) + s;
+			if (i != j)
+				get(j,i) = get(j,i) + s;
+		}
+}
+
+
+void GeneralMatrix::multRight(const ConstGeneralMatrix& m)
+{
+	gemm_partial_right("N", m, 1.0, 0.0);
+}
+
+void GeneralMatrix::multLeft(const ConstGeneralMatrix& m)
+{
+	gemm_partial_left("N", m, 1.0, 0.0);
+}
+
+void GeneralMatrix::multRightTrans(const ConstGeneralMatrix& m)
+{
+	gemm_partial_right("T", m, 1.0, 0.0);
+}
+
+void GeneralMatrix::multLeftTrans(const ConstGeneralMatrix& m)
+{
+	gemm_partial_left("T", m, 1.0, 0.0);
+}
+
+// here we must be careful for ld
+void GeneralMatrix::zeros()
+{
+	if (ld == rows)
+		data.zeros();
+	else {
+		for (int i = 0; i < rows; i++) 
+			for (int j = 0; j < cols; j++)
+				get(i,j) = 0;
+	}
+}
+
+void GeneralMatrix::unit()
+{
+	for (int i = 0; i < rows; i++)
+		for (int j = 0; j < cols; j++)
+			if (i == j)
+				get(i,j) = 1.0;
+			else
+				get(i,j) = 0.0;
+}
+
+void GeneralMatrix::nans()
+{
+	for (int i = 0; i < rows; i++) 
+		for (int j = 0; j < cols; j++)
+			get(i,j) = std::numeric_limits<double>::quiet_NaN();
+}
+
+void GeneralMatrix::infs()
+{
+	for (int i = 0; i < rows; i++) 
+		for (int j = 0; j < cols; j++)
+			get(i,j) = std::numeric_limits<double>::infinity();
+}
+
+
+// here we must be careful for ld
+void GeneralMatrix::mult(double a)
+{
+	if (ld == rows)
+		data.mult(a);
+	else {
+		for (int i = 0; i < rows; i++) 
+			for (int j = 0; j < cols; j++)
+				get(i,j) *= a;
+	}
+}
+
+// here we must be careful for ld
+void GeneralMatrix::add(double a, const ConstGeneralMatrix& m)
+{
+	if (m.numRows() != rows || m.numCols() != cols)
+		throw SYLV_MES_EXCEPTION("Matrix has different size in GeneralMatrix::add.");
+
+	if (ld == rows && m.ld == m.rows)
+		data.add(a, m.data);
+	else {
+		for (int i = 0; i < rows; i++) 
+			for (int j = 0; j < cols; j++)
+				get(i,j) += a*m.get(i,j);
+	}
+}
+
+void GeneralMatrix::add(double a, const ConstGeneralMatrix& m, const char* dum)
+{
+	if (m.numRows() != cols || m.numCols() != rows)
+		throw SYLV_MES_EXCEPTION("Matrix has different size in GeneralMatrix::add.");
+
+	for (int i = 0; i < rows; i++) 
+		for (int j = 0; j < cols; j++)
+			get(i,j) += a*m.get(j,i);
+}
+
+void GeneralMatrix::copy(const ConstGeneralMatrix& m, int ioff, int joff)
+{
+	for (int i = 0; i < rows; i++)
+		for (int j = 0; j < cols; j++)
+			get(i,j) = m.get(i+ioff,j+joff);
+}
+
+void GeneralMatrix::gemm(const char* transa, const ConstGeneralMatrix& a,
+						 const char* transb, const ConstGeneralMatrix& b,
+						 double alpha, double beta)
+{
+	int opa_rows = a.numRows();
+	int opa_cols = a.numCols();
+	if (!strcmp(transa, "T")) {
+		opa_rows = a.numCols();
+		opa_cols = a.numRows();
+	}
+	int opb_rows = b.numRows();
+	int opb_cols = b.numCols();
+	if (!strcmp(transb, "T")) {
+		opb_rows = b.numCols();
+		opb_cols = b.numRows();
+	}
+
+	if (opa_rows != numRows() ||
+		opb_cols != numCols() ||
+		opa_cols != opb_rows) {
+		throw SYLV_MES_EXCEPTION("Wrong dimensions for matrix multiplication.");
+	}
+
+	int m = opa_rows;
+	int n = opb_cols;
+	int k = opa_cols;
+	int lda = a.ld;
+	int ldb = b.ld;
+	int ldc = ld;
+	if (lda > 0 && ldb > 0 && ldc > 0) {
+		BLAS_dgemm(transa, transb, &m, &n, &k, &alpha, a.data.base(), &lda,
+				   b.data.base(), &ldb, &beta, data.base(), &ldc); 
+	} else if (numRows()*numCols() > 0) {
+		if (beta == 0.0)
+			zeros();
+		else
+			mult(beta);
+	}
+}
+
+void GeneralMatrix::gemm_partial_left(const char* trans, const ConstGeneralMatrix& m,
+									  double alpha, double beta)
+{
+	int icol;
+	for (icol = 0; icol + md_length < cols; icol += md_length) {
+		GeneralMatrix incopy((const GeneralMatrix&)*this, 0, icol, rows, md_length);
+		GeneralMatrix inplace((GeneralMatrix&)*this, 0, icol, rows, md_length);
+		inplace.gemm(trans, m, "N", ConstGeneralMatrix(incopy), alpha, beta);
+	}
+	if (cols > icol) {
+		GeneralMatrix incopy((const GeneralMatrix&)*this, 0, icol, rows, cols - icol);
+		GeneralMatrix inplace((GeneralMatrix&)*this, 0, icol, rows, cols - icol);
+		inplace.gemm(trans, m, "N", ConstGeneralMatrix(incopy), alpha, beta);
+	}
+}
+
+void GeneralMatrix::gemm_partial_right(const char* trans, const ConstGeneralMatrix& m,
+									   double alpha, double beta)
+{
+	int irow;
+	for (irow = 0; irow + md_length < rows; irow += md_length) {
+		GeneralMatrix incopy((const GeneralMatrix&)*this, irow, 0, md_length, cols);
+		GeneralMatrix inplace((GeneralMatrix&)*this, irow, 0, md_length, cols);
+		inplace.gemm("N", ConstGeneralMatrix(incopy), trans, m, alpha, beta);
+	}
+	if (rows > irow) {
+		GeneralMatrix incopy((const GeneralMatrix&)*this, irow, 0, rows - irow, cols);
+		GeneralMatrix inplace((GeneralMatrix&)*this, irow, 0, rows - irow, cols);
+		inplace.gemm("N", ConstGeneralMatrix(incopy), trans, m, alpha, beta);
+	}
+}
+
+ConstGeneralMatrix::ConstGeneralMatrix(const GeneralMatrix& m, int i, int j, int nrows, int ncols)
+	: data(m.getData(), j*m.getLD()+i, (ncols-1)*m.getLD()+nrows), rows(nrows), cols(ncols), ld(m.getLD())
+{
+	// can check that the submatirx is fully in the matrix
+}
+
+ConstGeneralMatrix::ConstGeneralMatrix(const ConstGeneralMatrix& m, int i, int j, int nrows, int ncols)
+	: data(m.getData(), j*m.getLD()+i, (ncols-1)*m.getLD()+nrows), rows(nrows), cols(ncols), ld(m.getLD())
+{
+	// can check that the submatirx is fully in the matrix
+}
+
+
+ConstGeneralMatrix::ConstGeneralMatrix(const GeneralMatrix& m)
+		: data(m.data), rows(m.rows), cols(m.cols), ld(m.ld) {}
+
+double ConstGeneralMatrix::getNormInf() const
+{
+	double norm = 0.0;
+	for (int i = 0; i < numRows(); i++) {
+		ConstVector rowi(data.base()+i, ld, cols);
+		double normi = rowi.getNorm1();
+		if (norm < normi)
+			norm = normi;
+	}
+	return norm;
+}
+
+double ConstGeneralMatrix::getNorm1() const
+{
+	double norm = 0.0;
+	for (int j = 0; j < numCols(); j++) {
+		ConstVector colj(data.base()+ld*j, 1, rows);
+		double normj = colj.getNorm1();
+		if (norm < normj)
+			norm = normj;
+	}
+	return norm;
+}
+
+void ConstGeneralMatrix::multVec(double a, Vector& x, double b, const ConstVector& d) const
+{
+	if (x.length() != rows || cols != d.length()) {
+		throw SYLV_MES_EXCEPTION("Wrong dimensions for vector multiply.");
+	}
+	if (rows > 0) {
+		int mm = rows;
+		int nn = cols;
+		double alpha = b;
+		int lda = ld;
+		int incx = d.skip();
+		double beta = a;
+		int incy = x.skip();
+		BLAS_dgemv("N", &mm, &nn, &alpha, data.base(), &lda, d.base(), &incx,
+				   &beta, x.base(), &incy);
+	}
+	
+}
+
+void ConstGeneralMatrix::multVecTrans(double a, Vector& x, double b,
+									  const ConstVector& d) const
+{
+	if (x.length() != cols || rows != d.length()) {
+		throw SYLV_MES_EXCEPTION("Wrong dimensions for vector multiply.");
+	}
+	if (rows > 0) {
+		int mm = rows;
+		int nn = cols;
+		double alpha = b;
+		int lda = rows;
+		int incx = d.skip();
+		double beta = a;
+		int incy = x.skip();
+		BLAS_dgemv("T", &mm, &nn, &alpha, data.base(), &lda, d.base(), &incx,
+				   &beta, x.base(), &incy);
+	}
+}
+
+/* m = inv(this)*m */
+void ConstGeneralMatrix::multInvLeft(const char* trans, int mrows, int mcols, int mld, double* d) const
+{
+	if (rows != cols) {
+		throw SYLV_MES_EXCEPTION("The matrix is not square for inversion.");
+	}
+	if (cols != mrows) {
+		throw SYLV_MES_EXCEPTION("Wrong dimensions for matrix inverse mutliply.");
+	}
+
+	if (rows > 0) {
+		GeneralMatrix inv(*this);
+		int* ipiv = new int[rows];
+		int info;
+		LAPACK_dgetrf(&rows, &rows, inv.getData().base(), &rows, ipiv, &info);
+		LAPACK_dgetrs(trans, &rows, &mcols, inv.base(), &rows, ipiv, d,
+					  &mld, &info);
+		delete [] ipiv;
+	}
+}
+
+/* m = inv(this)*m */
+void ConstGeneralMatrix::multInvLeft(GeneralMatrix& m) const
+{
+	multInvLeft("N", m.numRows(), m.numCols(), m.getLD(), m.getData().base());
+}
+
+/* m = inv(this')*m */
+void ConstGeneralMatrix::multInvLeftTrans(GeneralMatrix& m) const
+{
+	multInvLeft("T", m.numRows(), m.numCols(), m.getLD(), m.getData().base());
+}
+
+/* d = inv(this)*d */
+void ConstGeneralMatrix::multInvLeft(Vector& d) const
+{
+	if (d.skip() != 1) {
+		throw SYLV_MES_EXCEPTION("Skip!=1 not implemented in ConstGeneralMatrix::multInvLeft(Vector&)");
+	}
+
+	multInvLeft("N", d.length(), 1, d.length(), d.base());
+}
+
+/* d = inv(this')*d */
+void ConstGeneralMatrix::multInvLeftTrans(Vector& d) const
+{
+	if (d.skip() != 1) {
+		throw SYLV_MES_EXCEPTION("Skip!=1 not implemented in ConstGeneralMatrix::multInvLeft(Vector&)");
+	}
+
+	multInvLeft("T", d.length(), 1, d.length(), d.base());
+}
+
+
+bool ConstGeneralMatrix::isFinite() const
+{
+	for (int i = 0; i < numRows(); i++)
+		for (int j = 0; j < numCols(); j++)
+			if (! std::isfinite(get(i,j)))
+				return false;
+	return true;
+}
+
+bool ConstGeneralMatrix::isZero() const
+{
+	for (int i = 0; i < numRows(); i++)
+		for (int j = 0; j < numCols(); j++)
+			if (get(i,j) != 0.0)
+				return false;
+	return true;
+}
+
+void ConstGeneralMatrix::print() const
+{
+	printf("rows=%d, cols=%d\n",rows, cols);
+	for (int i = 0; i < rows; i++) {
+		printf("row %d:\n",i);
+		for (int j = 0; j < cols; j++) {
+			printf("%6.3g ",get(i,j));
+		}
+		printf("\n");
+	}
+}
diff --git a/dynare++/sylv/cc/GeneralMatrix.h b/dynare++/sylv/cc/GeneralMatrix.h
new file mode 100644
index 0000000000000000000000000000000000000000..18413b762d67f970ab53197e2af757ac8dada72e
--- /dev/null
+++ b/dynare++/sylv/cc/GeneralMatrix.h
@@ -0,0 +1,284 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/GeneralMatrix.h,v 1.3 2004/11/24 20:41:59 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef GENERAL_MATRIX_H
+#define GENERAL_MATRIX_H
+
+#include "Vector.h"
+
+class GeneralMatrix;
+
+class ConstGeneralMatrix {
+	friend class GeneralMatrix;
+protected:
+	ConstVector data;
+	int rows;
+	int cols;
+	int ld;
+public:
+	ConstGeneralMatrix(const double* d, int m, int n)
+		: data(d, m*n), rows(m), cols(n), ld(m) {}
+	ConstGeneralMatrix(const GeneralMatrix& m);
+	ConstGeneralMatrix(const GeneralMatrix& m, int i, int j, int nrows, int ncols);
+	ConstGeneralMatrix(const ConstGeneralMatrix& m, int i, int j, int nrows, int ncols);
+	virtual ~ConstGeneralMatrix() {}
+
+	const double& get(int i, int j) const
+		{return data[j*ld+i];}
+	int numRows() const {return rows;}
+	int numCols() const {return cols;}
+	int getLD() const {return ld;}
+	const double* base() const {return data.base();}
+	const ConstVector& getData() const {return data;}
+
+	double getNormInf() const;
+	double getNorm1() const;
+	/* x = scalar(a)*x + scalar(b)*this*d */
+	void multVec(double a, Vector& x, double b, const ConstVector& d) const;
+	/* x = scalar(a)*x + scalar(b)*this'*d */
+	void multVecTrans(double a, Vector& x, double b, const ConstVector& d) const;
+	/* x = x + this*d */
+	void multaVec(Vector& x, const ConstVector& d) const
+		{multVec(1.0, x, 1.0, d);}
+	/* x = x + this'*d */
+	void multaVecTrans(Vector& x, const ConstVector& d) const
+		{multVecTrans(1.0, x, 1.0, d);}
+	/* x = x - this*d */
+	void multsVec(Vector& x, const ConstVector& d) const
+		{multVec(1.0, x, -1.0, d);}
+	/* x = x - this'*d */
+	void multsVecTrans(Vector& x, const ConstVector& d) const
+		{multVecTrans(1.0, x, -1.0, d);}
+	/* m = inv(this)*m */
+	void multInvLeft(GeneralMatrix& m) const;
+	/* m = inv(this')*m */
+	void multInvLeftTrans(GeneralMatrix& m) const;
+	/* d = inv(this)*d */
+	void multInvLeft(Vector& d) const;
+	/* d = inv(this')*d */
+	void multInvLeftTrans(Vector& d) const;
+
+	bool isFinite() const;
+	/** Returns true of the matrix is exactly zero. */
+	bool isZero() const;
+
+	virtual void print() const;
+protected:
+	void multInvLeft(const char* trans, int mrows, int mcols, int mld, double* d) const;
+};
+
+
+class GeneralMatrix {
+	friend class ConstGeneralMatrix;
+protected:
+	Vector data;
+	int rows;
+	int cols;
+	int ld;
+public:
+	GeneralMatrix(int m, int n)
+		: data(m*n), rows(m), cols(n), ld(m) {}
+	GeneralMatrix(const double* d, int m, int n)
+		: data(d, m*n), rows(m), cols(n), ld(m) {}
+	GeneralMatrix(double* d, int m, int n)
+		: data(d, m*n), rows(m), cols(n), ld(m) {}
+	GeneralMatrix(const GeneralMatrix& m);
+	GeneralMatrix(const ConstGeneralMatrix& m);
+	GeneralMatrix(const GeneralMatrix&m, const char* dummy); // transpose
+	GeneralMatrix(const ConstGeneralMatrix&m, const char* dummy); // transpose
+	GeneralMatrix(const GeneralMatrix& m, int i, int j, int nrows, int ncols);
+	GeneralMatrix(GeneralMatrix& m, int i, int j, int nrows, int ncols);
+	/* this = a*b */
+	GeneralMatrix(const GeneralMatrix& a, const GeneralMatrix& b);
+	/* this = a*b' */
+	GeneralMatrix(const GeneralMatrix& a, const GeneralMatrix& b, const char* dum);
+	/* this = a'*b */
+	GeneralMatrix(const GeneralMatrix& a, const char* dum, const GeneralMatrix& b);
+	/* this = a'*b */
+	GeneralMatrix(const GeneralMatrix& a, const char* dum1,
+				  const GeneralMatrix& b, const char* dum2);
+
+	virtual ~GeneralMatrix();
+	const GeneralMatrix& operator=(const GeneralMatrix& m)
+		{data=m.data; rows=m.rows; cols=m.cols; ld=m.ld; return *this;}
+
+	const double& get(int i, int j) const
+		{return data[j*ld+i];}
+	double& get(int i, int j)
+		{return data[j*ld+i];}
+	int numRows() const {return rows;}
+	int numCols() const {return cols;}
+	int getLD() const {return ld;}
+	double* base() {return data.base();}
+	const double* base() const {return data.base();}
+	Vector& getData() {return data;}
+	const Vector& getData() const {return data;}
+
+	double getNormInf() const
+		{return ConstGeneralMatrix(*this).getNormInf();}
+	double getNorm1() const
+		{return ConstGeneralMatrix(*this).getNorm1();}
+
+	/* place matrix m to the position (i,j) */
+	void place(const ConstGeneralMatrix& m, int i, int j);
+	void place(const GeneralMatrix& m, int i, int j)
+		{place(ConstGeneralMatrix(m), i, j);}
+
+	/* this = a*b */
+	void mult(const ConstGeneralMatrix& a, const ConstGeneralMatrix& b);
+	void mult(const GeneralMatrix& a, const GeneralMatrix& b)
+		{mult(ConstGeneralMatrix(a), ConstGeneralMatrix(b));}
+
+	/* this = this + scalar*a*b */
+	void multAndAdd(const ConstGeneralMatrix& a, const ConstGeneralMatrix& b,
+					double mult=1.0);
+	void multAndAdd(const GeneralMatrix& a, const GeneralMatrix& b,
+					double mult=1.0)
+		{multAndAdd(ConstGeneralMatrix(a), ConstGeneralMatrix(b), mult);}
+
+	/* this = this + scalar*a*b' */
+	void multAndAdd(const ConstGeneralMatrix& a, const ConstGeneralMatrix& b,
+					const char* dum, double mult=1.0);
+	void multAndAdd(const GeneralMatrix& a, const GeneralMatrix& b,
+					const char* dum, double mult=1.0)
+		{multAndAdd(ConstGeneralMatrix(a), ConstGeneralMatrix(b), dum, mult);}
+
+	/* this = this + scalar*a'*b */
+	void multAndAdd(const ConstGeneralMatrix& a, const char* dum, const ConstGeneralMatrix& b,
+					double mult=1.0);
+	void multAndAdd(const GeneralMatrix& a, const char* dum, const GeneralMatrix& b,
+					double mult=1.0)
+		{multAndAdd(ConstGeneralMatrix(a), dum, ConstGeneralMatrix(b), mult);}
+
+	/* this = this + scalar*a'*b' */
+	void multAndAdd(const ConstGeneralMatrix& a, const char* dum1,
+					const ConstGeneralMatrix& b, const char* dum2, double mult=1.0);
+	void multAndAdd(const GeneralMatrix& a, const char* dum1,
+					const GeneralMatrix& b, const char* dum2, double mult=1.0)
+		{multAndAdd(ConstGeneralMatrix(a), dum1, ConstGeneralMatrix(b),dum2, mult);}
+
+	/* this = this + scalar*a*a' */
+	void addOuter(const ConstVector& a, double mult=1.0);
+	void addOuter(const Vector& a, double mult=1.0)
+		{addOuter(ConstVector(a), mult);}
+
+	/* this = this * m */
+	void multRight(const ConstGeneralMatrix& m);
+	void multRight(const GeneralMatrix& m)
+		{multRight(ConstGeneralMatrix(m));}
+
+	/* this = m * this */
+	void multLeft(const ConstGeneralMatrix& m);
+	void multLeft(const GeneralMatrix& m)
+		{multLeft(ConstGeneralMatrix(m));}
+
+	/* this = this * m' */
+	void multRightTrans(const ConstGeneralMatrix& m);
+	void multRightTrans(const GeneralMatrix& m)
+		{multRightTrans(ConstGeneralMatrix(m));}
+
+	/* this = m' * this */
+	void multLeftTrans(const ConstGeneralMatrix& m);
+	void multLeftTrans(const GeneralMatrix& m)
+		{multLeftTrans(ConstGeneralMatrix(m));}
+
+	/* x = scalar(a)*x + scalar(b)*this*d */
+	void multVec(double a, Vector& x, double b, const ConstVector& d) const
+		{ConstGeneralMatrix(*this).multVec(a, x, b, d);}
+
+	/* x = scalar(a)*x + scalar(b)*this'*d */
+	void multVecTrans(double a, Vector& x, double b, const ConstVector& d) const
+		{ConstGeneralMatrix(*this).multVecTrans(a, x, b, d);}
+
+	/* x = x + this*d */
+	void multaVec(Vector& x, const ConstVector& d) const
+		{ConstGeneralMatrix(*this).multaVec(x, d);}
+
+	/* x = x + this'*d */
+	void multaVecTrans(Vector& x, const ConstVector& d) const
+		{ConstGeneralMatrix(*this).multaVecTrans(x, d);}
+
+	/* x = x - this*d */
+	void multsVec(Vector& x, const ConstVector& d) const
+		{ConstGeneralMatrix(*this).multsVec(x, d);}
+
+	/* x = x - this'*d */
+	void multsVecTrans(Vector& x, const ConstVector& d) const
+		{ConstGeneralMatrix(*this).multsVecTrans(x, d);}
+
+	/* this = zero */
+	void zeros();
+
+	/** this = unit (on main diagonal) */
+	void unit();
+
+	/* this = NaN */
+	void nans();
+
+	/* this = Inf */
+	void infs();
+
+	/* this = scalar*this */
+	void mult(double a);
+
+	/* this = this + scalar*m */
+	void add(double a, const ConstGeneralMatrix& m);
+	void add(double a, const GeneralMatrix& m)
+		{add(a, ConstGeneralMatrix(m));}
+
+	/* this = this + scalar*m' */
+	void add(double a, const ConstGeneralMatrix& m, const char* dum);
+	void add(double a, const GeneralMatrix& m, const char* dum)
+		{add(a, ConstGeneralMatrix(m), dum);}
+
+	bool isFinite() const
+		{return (ConstGeneralMatrix(*this)).isFinite();}
+
+	bool isZero() const
+		{return (ConstGeneralMatrix(*this)).isZero();}
+
+	virtual void print() const
+		{ConstGeneralMatrix(*this).print();}
+private:
+	void copy(const ConstGeneralMatrix& m, int ioff = 0, int joff = 0);
+	void copy(const GeneralMatrix& m, int ioff = 0, int joff = 0)
+		{copy(ConstGeneralMatrix(m), ioff, joff);}
+
+	void gemm(const char* transa, const ConstGeneralMatrix& a,
+			  const char* transb, const ConstGeneralMatrix& b,
+			  double alpha, double beta);
+	void gemm(const char* transa, const GeneralMatrix& a,
+			  const char* transb, const GeneralMatrix& b,
+			  double alpha, double beta)
+		{gemm(transa, ConstGeneralMatrix(a), transb, ConstGeneralMatrix(b),
+			  alpha, beta);}
+
+	/* this = this * op(m) (without whole copy of this) */
+	void gemm_partial_right(const char* trans, const ConstGeneralMatrix& m,
+							double alpha, double beta);
+	void gemm_partial_right(const char* trans, const GeneralMatrix& m,
+							double alpha, double beta)
+		{gemm_partial_right(trans, ConstGeneralMatrix(m), alpha, beta);}
+
+	/* this = op(m) *this (without whole copy of this) */
+	void gemm_partial_left(const char* trans, const ConstGeneralMatrix& m,
+						   double alpha, double beta);
+	void gemm_partial_left(const char* trans, const GeneralMatrix& m,
+						   double alpha, double beta)
+		{gemm_partial_left(trans, ConstGeneralMatrix(m), alpha, beta);}
+
+	/* number of rows/columns for copy used in gemm_partial_* */
+	static int md_length;
+};
+
+
+
+
+
+#endif /* GENERAL_MATRIX_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/GeneralSylvester.cpp b/dynare++/sylv/cc/GeneralSylvester.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4cfc33131b869f536cb08c46df2043cfee42f5d7
--- /dev/null
+++ b/dynare++/sylv/cc/GeneralSylvester.cpp
@@ -0,0 +1,138 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/GeneralSylvester.cpp,v 1.1.1.1 2004/06/04 13:00:20 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "GeneralSylvester.h"
+#include "SchurDecomp.h"
+#include "SylvException.h"
+#include "TriangularSylvester.h"
+#include "IterativeSylvester.h"
+
+#include <time.h>
+
+GeneralSylvester::GeneralSylvester(int ord, int n, int m, int zero_cols,
+								   const double* da, const double* db,
+								   const double* dc, const double* dd,
+								   const SylvParams& ps)
+	: pars(ps), 
+	  mem_driver(pars, 1, m, n, ord), order(ord), a(da, n),
+	  b(db, n, n-zero_cols), c(dc, m), d(dd, n, power(m, order)),
+	  solved(false)
+{
+	init();
+}
+
+GeneralSylvester::GeneralSylvester(int ord, int n, int m, int zero_cols,
+								   const double* da, const double* db,
+								   const double* dc, double* dd,
+								   const SylvParams& ps)
+	: pars(ps),
+	  mem_driver(pars, 0, m, n, ord), order(ord), a(da, n),
+	  b(db, n, n-zero_cols), c(dc, m), d(dd, n, power(m, order)),
+	  solved(false)
+{
+	init();
+}
+
+GeneralSylvester::GeneralSylvester(int ord, int n, int m, int zero_cols,
+								   const double* da, const double* db,
+								   const double* dc, const double* dd,
+								   bool alloc_for_check)
+	: pars(alloc_for_check), 
+	  mem_driver(pars, 1, m, n, ord), order(ord), a(da, n),
+	  b(db, n, n-zero_cols), c(dc, m), d(dd, n, power(m, order)),
+	  solved(false)
+{
+	init();
+}
+
+GeneralSylvester::GeneralSylvester(int ord, int n, int m, int zero_cols,
+								   const double* da, const double* db,
+								   const double* dc, double* dd,
+								   bool alloc_for_check)
+	: pars(alloc_for_check),
+	  mem_driver(pars, 0, m, n, ord), order(ord), a(da, n),
+	  b(db, n, n-zero_cols), c(dc, m), d(dd, n, power(m, order)),
+	  solved(false)
+{
+	init();
+}
+
+void GeneralSylvester::init()
+{
+	GeneralMatrix ainvb(b);
+	double rcond1;
+	double rcondinf;
+	a.multInvLeft2(ainvb, d, rcond1, rcondinf);
+	pars.rcondA1 = rcond1;
+	pars.rcondAI = rcondinf;
+	bdecomp = new SchurDecompZero(ainvb);
+	cdecomp = new SimilarityDecomp(c.getData().base(), c.numRows(), *(pars.bs_norm));
+	cdecomp->check(pars, c);
+	cdecomp->infoToPars(pars);
+	if (*(pars.method) == SylvParams::recurse)
+		sylv = new TriangularSylvester(*bdecomp, *cdecomp);
+	else
+		sylv = new IterativeSylvester(*bdecomp, *cdecomp);
+}
+
+void GeneralSylvester::solve()
+{
+	if (solved)
+		throw SYLV_MES_EXCEPTION("Attempt to run solve() more than once.");
+
+	mem_driver.setStackMode(true);
+
+	clock_t start = clock();
+	// multiply d
+	d.multLeftITrans(bdecomp->getQ());
+	d.multRightKron(cdecomp->getQ(), order);
+	// convert to KronVector
+	KronVector dkron(d.getData(), getM(), getN(), order);
+	// solve
+	sylv->solve(pars, dkron);
+	// multiply d back
+	d.multLeftI(bdecomp->getQ());
+	d.multRightKron(cdecomp->getInvQ(), order);
+	clock_t end = clock();
+	pars.cpu_time = ((double)(end-start))/CLOCKS_PER_SEC;
+
+	mem_driver.setStackMode(false);
+
+	solved = true;
+}
+
+void GeneralSylvester::check(const double* ds)
+{
+	if (!solved)
+		throw SYLV_MES_EXCEPTION("Cannot run check on system, which is not solved yet.");
+
+	mem_driver.setStackMode(true);
+
+	// calculate xcheck = AX+BXC^i-D
+	SylvMatrix dcheck(d.numRows(), d.numCols());
+	dcheck.multLeft(b.numRows()-b.numCols(), b, d);
+	dcheck.multRightKron(c, order);
+	dcheck.multAndAdd(a,d);
+	ConstVector dv(ds, d.numRows()*d.numCols());
+	dcheck.getData().add(-1.0, dv);
+	// calculate relative norms
+	pars.mat_err1 = dcheck.getNorm1()/d.getNorm1();
+	pars.mat_errI = dcheck.getNormInf()/d.getNormInf();
+	pars.mat_errF = dcheck.getData().getNorm()/d.getData().getNorm();
+	pars.vec_err1 = dcheck.getData().getNorm1()/d.getData().getNorm1();
+	pars.vec_errI = dcheck.getData().getMax()/d.getData().getMax();
+
+	mem_driver.setStackMode(false);
+}
+
+GeneralSylvester::~GeneralSylvester()
+{
+	delete bdecomp;
+	delete cdecomp;
+	delete sylv;
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/GeneralSylvester.h b/dynare++/sylv/cc/GeneralSylvester.h
new file mode 100644
index 0000000000000000000000000000000000000000..a81f5a2d4561fdf2e0f51dbedb9f7fc3d4f872c7
--- /dev/null
+++ b/dynare++/sylv/cc/GeneralSylvester.h
@@ -0,0 +1,61 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/GeneralSylvester.h,v 1.1.1.1 2004/06/04 13:00:20 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef GENERAL_SYLVESTER_H
+#define GENERAL_SYLVESTER_H
+
+#include "SylvMatrix.h"
+#include "SylvMemory.h"
+#include "SimilarityDecomp.h"
+#include "SylvesterSolver.h"
+
+class GeneralSylvester {
+	SylvParams pars;
+	SylvMemoryDriver mem_driver;
+	int order;
+	const SqSylvMatrix a;
+	const SylvMatrix b;
+	const SqSylvMatrix c;
+	SylvMatrix d;
+	bool solved;
+	SchurDecompZero* bdecomp;
+	SimilarityDecomp* cdecomp;
+	SylvesterSolver* sylv;
+public:
+	/* construct with my copy of d*/
+	GeneralSylvester(int ord, int n, int m, int zero_cols,
+					 const double* da, const double* db,
+					 const double* dc, const double* dd,
+					 const SylvParams& ps);
+	GeneralSylvester(int ord, int n, int m, int zero_cols,
+					 const double* da, const double* db,
+					 const double* dc, const double* dd,
+					 bool alloc_for_check = false);
+	/* construct with provided storage for d */
+	GeneralSylvester(int ord, int n, int m, int zero_cols,
+					 const double* da, const double* db,
+					 const double* dc, double* dd,
+					 bool alloc_for_check = false);
+	GeneralSylvester(int ord, int n, int m, int zero_cols,
+					 const double* da, const double* db,
+					 const double* dc, double* dd,
+					 const SylvParams& ps);
+	virtual ~GeneralSylvester();
+	int getM() const {return c.numRows();}
+	int getN() const {return a.numRows();}
+	const double* getResult() const {return d.base();}
+	const SylvParams& getParams() const {return pars;}
+	SylvParams& getParams() {return pars;}
+	void solve();
+	void check(const double* ds);
+private:
+	void init();
+};
+
+#endif /* GENERAL_SYLVESTER_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/IterativeSylvester.cpp b/dynare++/sylv/cc/IterativeSylvester.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e585f655af909abb7df393dc0b6e98106b572067
--- /dev/null
+++ b/dynare++/sylv/cc/IterativeSylvester.cpp
@@ -0,0 +1,53 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/IterativeSylvester.cpp,v 1.1.1.1 2004/06/04 13:00:20 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "IterativeSylvester.h"
+#include "KronUtils.h"
+
+void IterativeSylvester::solve(SylvParams& pars, KronVector& x) const
+{
+	int max_steps = *(pars.max_num_iter);
+	int steps = 1;
+	double max_norm = *(pars.convergence_tol);
+	double norm = performFirstStep(x);
+
+	QuasiTriangular* kpow = matrixK->clone();
+	QuasiTriangular* fpow = matrixF->clone();
+	while (steps < max_steps && norm > max_norm) {
+		kpow->multRight(SqSylvMatrix(*kpow)); // be careful to make copy
+		fpow->multRight(SqSylvMatrix(*fpow)); // also here
+		norm = performStep(*kpow, *fpow, x);
+		steps++;
+	}
+
+	delete fpow;
+	delete kpow;
+
+	pars.converged = (norm <= max_norm);
+	pars.iter_last_norm = norm;
+	pars.num_iter = steps;
+}
+
+double IterativeSylvester::performFirstStep(KronVector& x) const
+{
+	KronVector xtmp((const KronVector&)x);
+	KronUtils::multKron(*matrixF, *matrixK, xtmp);
+	x.add(-1., xtmp);
+	double norm = xtmp.getMax();
+	return norm;
+}
+
+double IterativeSylvester::performStep(const QuasiTriangular& k, const QuasiTriangular& f,
+									   KronVector& x)
+{
+	KronVector xtmp((const KronVector&)x);
+	KronUtils::multKron(f, k, xtmp);
+	x.add(1.0, xtmp);
+	double norm = xtmp.getMax();
+	return norm;
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/IterativeSylvester.h b/dynare++/sylv/cc/IterativeSylvester.h
new file mode 100644
index 0000000000000000000000000000000000000000..cb69fbf7cab6b9ec5e03eb0a044f22255f718b43
--- /dev/null
+++ b/dynare++/sylv/cc/IterativeSylvester.h
@@ -0,0 +1,33 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/IterativeSylvester.h,v 1.1.1.1 2004/06/04 13:00:20 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef ITERATIVE_SYLVESTER_H
+#define ITERATIVE_SYLVESTER_H
+
+#include "SylvesterSolver.h"
+#include "KronVector.h"
+#include "QuasiTriangular.h"
+#include "SimilarityDecomp.h"
+
+class IterativeSylvester : public SylvesterSolver {
+public:
+	IterativeSylvester(const QuasiTriangular& k, const QuasiTriangular& f)
+		: SylvesterSolver(k, f) {}
+	IterativeSylvester(const SchurDecompZero& kdecomp, const SchurDecomp& fdecomp)
+		: SylvesterSolver(kdecomp, fdecomp) {}
+	IterativeSylvester(const SchurDecompZero& kdecomp, const SimilarityDecomp& fdecomp)
+		: SylvesterSolver(kdecomp, fdecomp) {}
+	void solve(SylvParams& pars, KronVector& x) const;
+private:
+	double performFirstStep(KronVector& x) const;
+	static double performStep(const QuasiTriangular& k, const QuasiTriangular& f,
+							  KronVector& x);
+};
+
+#endif /* ITERATIVE_SYLVESTER_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/KronUtils.cpp b/dynare++/sylv/cc/KronUtils.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..013d16520537eba421a47a4edf6e9d592f6d723b
--- /dev/null
+++ b/dynare++/sylv/cc/KronUtils.cpp
@@ -0,0 +1,53 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/KronUtils.cpp,v 1.1.1.1 2004/06/04 13:00:31 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "KronUtils.h"
+
+void KronUtils::multAtLevel(int level, const QuasiTriangular& t,
+							KronVector& x)
+{
+	if (0 < level && level < x.getDepth()) {
+		for (int i = 0; i < x.getM(); i++) {
+			KronVector xi(x, i);
+			multAtLevel(level, t, xi);
+		}
+	} else if (0 == level && 0 < x.getDepth()) {
+		GeneralMatrix tmp(x.base(), x.getN(), power(x.getM(),x.getDepth()));
+		t.multLeftOther(tmp);
+	} else if (0 == level && 0 == x.getDepth()) {
+		Vector b((const Vector&)x);
+		t.multVec(x,b);
+	} else { // 0 < level == depth
+		t.multKron(x);
+	}
+}
+
+void KronUtils::multAtLevelTrans(int level, const QuasiTriangular& t,
+								 KronVector& x)
+{
+	if (0 < level && level < x.getDepth()) {
+		for (int i = 0; i < x.getM(); i++) {
+			KronVector xi(x, i);
+			multAtLevelTrans(level, t, xi);
+		}
+	} else if (0 == level && 0 < x.getDepth()) {
+		GeneralMatrix tmp(x.base(), x.getN(), power(x.getM(),x.getDepth()));
+		t.multLeftOtherTrans(tmp);
+	} else if (level == 0 && 0 == x.getDepth()) {
+		Vector b((const Vector&)x);
+		t.multVecTrans(x,b);
+	} else { // 0 < level == depth
+		t.multKronTrans(x);
+	}
+}
+
+void KronUtils::multKron(const QuasiTriangular& f, const QuasiTriangular& k,
+						 KronVector& x)
+{
+	multAtLevel(0, k, x);
+	if (x.getDepth() > 0) {
+		for (int level = 1; level <= x.getDepth(); level++)
+			multAtLevelTrans(level, f, x);
+	}
+}
diff --git a/dynare++/sylv/cc/KronUtils.h b/dynare++/sylv/cc/KronUtils.h
new file mode 100644
index 0000000000000000000000000000000000000000..2ebeeee30cbd7abd70bf70581aa640c311be9b3f
--- /dev/null
+++ b/dynare++/sylv/cc/KronUtils.h
@@ -0,0 +1,32 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/KronUtils.h,v 1.1.1.1 2004/06/04 13:00:31 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef KRON_UTILS_H
+#define KRON_UTILS_H
+
+#include "KronVector.h"
+#include "QuasiTriangular.h"
+
+class KronUtils {
+public:
+	/* multiplies I_m\otimes..\I_m\otimes T\otimes I_m...I_m\otimes I_n
+	   with given b and returns x. T must be (m,m), number of
+	   \otimes is b.getDepth(), level is a number of I_m's between T
+	   and I_n plus 1. If level=0, then we multiply
+       \I_m\otimes ..\otimes I_m\otimes T, T is (n,n) */
+	static void multAtLevel(int level, const QuasiTriangular& t,
+							KronVector& x);
+	static void multAtLevelTrans(int level, const QuasiTriangular& t,
+								 KronVector& x);
+
+	/* multiplies x=(F'\otimes F'\otimes..\otimes K)x */
+	static void multKron(const QuasiTriangular& f, const QuasiTriangular& k,
+						 KronVector& x);
+};
+
+#endif /* KRON_UTILS_H */
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/KronVector.cpp b/dynare++/sylv/cc/KronVector.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..71da28f7b6150b00167f83eb96ef0f17ee50e75a
--- /dev/null
+++ b/dynare++/sylv/cc/KronVector.cpp
@@ -0,0 +1,109 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/KronVector.cpp,v 1.1.1.1 2004/06/04 13:00:31 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "KronVector.h"
+#include "SylvException.h"
+
+int power(int m, int depth)
+{
+	int p = 1;
+	for (int i = 0; i < depth; i++) {
+		p *= m;
+	}
+	return p;
+}
+
+KronVector::KronVector(int mm, int nn, int dp)
+	: Vector(power(mm,dp)*nn), m(mm), n(nn), depth(dp)
+{}
+
+KronVector::KronVector(Vector& v, int mm, int nn, int dp)
+	: Vector(v), m(mm), n(nn), depth(dp)
+{
+	len = power(m,depth)*n;
+	if (v.length() != length()) {
+		throw SYLV_MES_EXCEPTION("Bad conversion KronVector from Vector.");
+	}
+}
+
+KronVector::KronVector(KronVector& v, int i)
+	: Vector(v, i*power(v.m,v.depth-1)*v.n, power(v.m, v.depth-1)*v.n), m(v.m), n(v.n),
+	  depth(v.depth-1)
+{
+	if (depth < 0) {
+		throw SYLV_MES_EXCEPTION("Bad KronVector pick, depth < 0.");
+	}
+}
+
+KronVector::KronVector(const ConstKronVector& v)
+	: Vector(v.length()), m(v.getM()), n(v.getN()), depth(v.getDepth())
+{
+	Vector::operator=(v);
+}
+
+const KronVector& KronVector::operator=(const ConstKronVector& v)
+{
+	Vector::operator=(v);
+	m=v.getM();
+	n=v.getN();
+	depth = v.getDepth();
+	return *this;
+}
+
+const KronVector& KronVector::operator=(const Vector& v)
+{
+	if (length() != v.length()) {
+		throw SYLV_MES_EXCEPTION("Wrong lengths for vector operator =.");
+	}
+	Vector::operator=(v);
+	return *this;
+}
+
+
+
+ConstKronVector::ConstKronVector(const KronVector& v)
+	: ConstVector(v), m(v.getM()), n(v.getN()), depth(v.getDepth())
+{}
+
+ConstKronVector::ConstKronVector(const ConstKronVector& v)
+	: ConstVector(power(v.getM(),v.getDepth())*v.getN()), m(v.getM()), n(v.getN()),
+	  depth(v.getDepth())	  
+{}
+
+ConstKronVector::ConstKronVector(const Vector& v, int mm, int nn, int dp)
+	: ConstVector(v), m(mm), n(nn), depth(dp)
+{
+	len = power(m,depth)*n;
+	if (v.length() != length()) {
+		throw SYLV_MES_EXCEPTION("Bad conversion KronVector from Vector.");
+	}
+}
+
+ConstKronVector::ConstKronVector(const ConstVector& v, int mm, int nn, int dp)
+	: ConstVector(v), m(mm), n(nn), depth(dp)
+{
+	len = power(m,depth)*n;
+	if (v.length() != length()) {
+		throw SYLV_MES_EXCEPTION("Bad conversion KronVector from Vector.");
+	}
+}
+
+ConstKronVector::ConstKronVector(const KronVector& v, int i)
+	: ConstVector(v, i*power(v.getM(),v.getDepth()-1)*v.getN(),
+				  power(v.getM(),v.getDepth()-1)*v.getN()),
+	  m(v.getM()), n(v.getN()), depth(v.getDepth()-1)
+{
+	if (depth < 0) {
+		throw SYLV_MES_EXCEPTION("Bad KronVector pick, depth < 0.");
+	}
+}
+
+ConstKronVector::ConstKronVector(const ConstKronVector& v, int i)
+	: ConstVector(v, i*power(v.m,v.depth-1)*v.n, power(v.m,v.depth-1)*v.n),
+	  m(v.getM()), n(v.getN()), depth(v.getDepth()-1)
+{
+	if (depth < 0) {
+		throw SYLV_MES_EXCEPTION("Bad KronVector pick, depth < 0.");
+	}
+}
diff --git a/dynare++/sylv/cc/KronVector.h b/dynare++/sylv/cc/KronVector.h
new file mode 100644
index 0000000000000000000000000000000000000000..db721c3b73b928b3ae9443d30baece834a6c6069
--- /dev/null
+++ b/dynare++/sylv/cc/KronVector.h
@@ -0,0 +1,58 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/KronVector.h,v 1.1.1.1 2004/06/04 13:00:31 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef KRON_VECTOR_H
+#define KRON_VECTOR_H
+
+#include "Vector.h"
+
+class ConstKronVector;
+
+class KronVector : public Vector {
+protected:
+	int m;
+	int n;
+	int depth;
+public:
+	KronVector() : Vector((double*)0, 0), m(0), n(0), depth(0)  {}
+	KronVector(int mm, int nn, int dp); // new instance
+	KronVector(Vector& v, int mm, int nn, int dp); // conversion
+	KronVector(KronVector&, int i); // picks i-th subvector
+	KronVector(const ConstKronVector& v); // new instance and copy
+	const KronVector& operator=(KronVector& v)
+		{Vector::operator=(v); m=v.m; n=v.n; depth = v.depth; return *this;}
+	const KronVector& operator=(const KronVector& v)
+		{Vector::operator=(v); m=v.m; n=v.n; depth = v.depth; return *this;}
+	const KronVector& operator=(const ConstKronVector& v);
+	const KronVector& operator=(const Vector& v);
+	int getM() const {return m;}
+	int getN() const {return n;}
+	int getDepth() const {return depth;}
+};
+
+class ConstKronVector : public ConstVector
+{
+protected:
+	int m;
+	int n;
+	int depth;
+public:
+	ConstKronVector(const KronVector& v);
+	ConstKronVector(const ConstKronVector& v);
+	ConstKronVector(const Vector& v, int mm, int nn, int dp);
+	ConstKronVector(const ConstVector& v, int mm, int nn, int dp);
+	ConstKronVector(const KronVector& v, int i);
+	ConstKronVector(const ConstKronVector& v, int i);
+	int getM() const {return m;}
+	int getN() const {return n;}
+	int getDepth() const {return depth;}
+};
+
+int power(int m, int depth);
+
+#endif /* KRON_VECTOR */
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/Makefile b/dynare++/sylv/cc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..05aa17a0061a894f976367fe0174e860e57dc398
--- /dev/null
+++ b/dynare++/sylv/cc/Makefile
@@ -0,0 +1,35 @@
+# $Header: /var/lib/cvs/dynare_cpp/sylv/cc/Makefile,v 1.4 2005/01/18 21:28:26 kamenik Exp $
+
+# Tag $Name:  $
+
+include ../../Makefile.include
+
+CC_FLAGS := -I../testing -I../cc $(CC_FLAGS)
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g
+else
+	CC_FLAGS := $(CC_FLAGS) -O2
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+endif
+
+
+objects := $(patsubst %.cpp,%.o,$(wildcard *.cpp)) 
+headers := $(wildcard *.h)
+
+all: $(objects)
+
+sylvester.a: $(objects)
+	ar cr sylvester.a $(objects)
+	ranlib sylvester.a
+
+clear:
+	rm -f *.o
+	rm -f sylvester.a
+
+%.o : %.cpp $(headers)
+	$(CC) $(CC_FLAGS) $(EXTERN_DEFS) -c $*.cpp
+
diff --git a/dynare++/sylv/cc/QuasiTriangular.cpp b/dynare++/sylv/cc/QuasiTriangular.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..744a0df5fa442b99c8e631678df608f5ac862cdb
--- /dev/null
+++ b/dynare++/sylv/cc/QuasiTriangular.cpp
@@ -0,0 +1,682 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/QuasiTriangular.cpp,v 1.1.1.1 2004/06/04 13:00:31 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "QuasiTriangular.h"
+#include "SylvException.h"
+#include "SchurDecomp.h"
+
+#include "cppblas.h"
+
+#include <stdio.h>
+#include <cmath>
+
+using namespace std;
+
+double DiagonalBlock::getDeterminant() const
+{
+	return (*alpha)*(*alpha) + getSBeta();
+}
+
+double DiagonalBlock::getSBeta() const
+{
+	return -(*beta1)*(*beta2);
+}
+
+double DiagonalBlock::getSize() const
+{
+	if (real)
+		return abs(*alpha);
+	else
+		return sqrt(getDeterminant());
+}
+
+// this function makes Diagonal inconsistent, it should only be used
+// on temorary matrices, which will not be used any more, e.g. in
+// QuasiTriangular::solve (we need fast performance)
+void DiagonalBlock::setReal()
+{
+	*beta1 = 0;
+	*beta2 = 0;
+	real = true;
+}
+
+void DiagonalBlock::checkBlock(const double* d, int d_size)
+{
+	const double* a1 = d + jbar*d_size+jbar;
+	const double* b1 = a1 + d_size;
+	const double* b2 = a1 + 1;
+	const double* a2 = b1 + 1;
+	if (a1 != alpha.a1)
+		throw SYLV_MES_EXCEPTION("Bad alpha1.");
+	if (!real && b1 != beta1)
+		throw SYLV_MES_EXCEPTION("Bad beta1.");
+	if (!real && b2 != beta2)
+		throw SYLV_MES_EXCEPTION("Bad beta2.");
+	if (!real && a2 != alpha.a2)
+		throw SYLV_MES_EXCEPTION("Bad alpha2.");
+}
+
+Diagonal::Diagonal(double* data, int d_size)
+{
+	int nc = getNumComplex(data, d_size); // return nc <= d_size/2
+	num_all = d_size - nc;
+	num_real = d_size - 2*nc;
+
+	int jbar = 0;
+	int j = 0;
+	while (j < num_all) {		
+		int id = jbar*d_size + jbar; // index of diagonal block in data
+		int ill = id + 1; // index of element below the diagonal
+		int iur = id + d_size; // index of element right to diagonal
+		int idd = id + d_size + 1; // index of element next on diagonal
+		if ((jbar < d_size-1) && !isZero(data[ill])) {
+			// it is not last column and we have nonzero below diagonal
+			DiagonalBlock b(jbar, false, &data[id], &data[idd],
+							&data[iur], &data[ill]);
+			blocks.push_back(b);
+			jbar++;
+		} else {
+			// it is last column or we have zero below diagonal
+			DiagonalBlock b(jbar, true, &data[id], &data[id], NULL, NULL);
+			blocks.push_back(b);
+		}
+		jbar++;
+		j++;
+	}
+}
+
+
+Diagonal::Diagonal(double* data, const Diagonal& d)
+{
+	num_all = d.num_all;
+	num_real = d.num_real;
+	int d_size = d.getSize();
+	for (const_diag_iter it = d.begin(); it != d.end(); ++it) {
+		const DiagonalBlock& dit = *it;
+		double* beta1 = NULL;
+		double* beta2 = NULL;
+		int id = dit.getIndex()*(d_size+1);
+		int idd = id;
+		if (! dit.isReal()) {
+			beta1 = &data[id+d_size];
+			beta2 = &data[id+1];
+			idd = id + d_size + 1;
+		}
+		DiagonalBlock b(dit.getIndex(), dit.isReal(),
+						&data[id], &data[idd], beta1, beta2);
+		blocks.push_back(b);
+	}
+}
+
+
+void Diagonal::copy(const Diagonal& d)
+{
+	num_all = d.num_all;
+	num_real = d.num_real;
+	blocks = d.blocks;
+}
+
+int Diagonal::getNumComplex(const double* data, int d_size)
+{
+	int num_complex = 0;
+	int in = 1;
+	for (int i = 0; i < d_size-1; i++, in = in + d_size + 1) {
+		if (! isZero(data[in])) {
+			num_complex++;
+			if (in < d_size - 2 && ! isZero(data[in + d_size +1])) {
+				throw SYLV_MES_EXCEPTION("Matrix is not quasi-triangular");
+			}
+		}
+	}
+	return num_complex;
+}
+
+void Diagonal::changeBase(double* p)
+{
+	int d_size = getSize();
+	for (diag_iter it = begin(); it != end(); ++it) {
+		const DiagonalBlock& b = *it;
+		int jbar = b.getIndex();
+		int base = d_size*jbar + jbar;
+		if (b.isReal()) {
+			DiagonalBlock bnew(jbar, true, &p[base], &p[base],
+							   NULL, NULL);
+			*it = bnew;
+		} else {
+			DiagonalBlock bnew(jbar, false, &p[base], &p[base+d_size+1],
+							   &p[base+d_size], &p[base+1]);
+			*it = bnew;			
+		}
+	}
+}
+
+void Diagonal::getEigenValues(Vector& eig) const
+{
+	int d_size = getSize();
+	if (eig.length() != 2*d_size) {
+		char mes[500];
+		sprintf(mes, "Wrong length of vector for eigenvalues len=%d, should be=%d.\n",
+				eig.length(), 2*d_size);
+		throw SYLV_MES_EXCEPTION(mes);
+	}
+	for (const_diag_iter it = begin(); it != end(); ++it) {
+		const DiagonalBlock& b = *it;
+		int ind = b.getIndex();
+		eig[2*ind] = *(b.getAlpha());
+		if (b.isReal()) {
+			eig[2*ind+1] = 0.0;
+		} else {
+			double beta = sqrt(b.getSBeta());
+			eig[2*ind+1] = beta;
+			eig[2*ind+2] = eig[2*ind];
+			eig[2*ind+3] = -beta;
+		}
+	}
+}
+
+/* swaps logically blocks 'it', and '++it'. remember to move also
+ * addresses, alpha, beta1, beta2. This is a dirty (but most
+ * effective) way how to do it. */
+void Diagonal::swapLogically(diag_iter it)
+{
+	diag_iter itp = it;
+	++itp;
+
+	if ((*it).isReal() && !(*itp).isReal()) {
+		// first is real, second is complex
+		double* d1 = (*it).alpha.a1;
+		double* d2 = (*itp).alpha.a1;
+		double* d3 = (*itp).alpha.a2;
+		// swap
+		DiagonalBlock new_it((*it).jbar, d1, d2);
+		*it = new_it;
+		DiagonalBlock new_itp((*itp).jbar+1, d3);
+		*itp = new_itp;
+	} else if (!(*it).isReal() && (*itp).isReal()) {
+		// first is complex, second is real
+		double* d1 = (*it).alpha.a1;
+		double* d2 = (*it).alpha.a2;
+		double* d3 = (*itp).alpha.a1;
+		// swap
+		DiagonalBlock new_it((*it).jbar, d1);
+		*it = new_it;
+		DiagonalBlock new_itp((*itp).jbar-1, d2, d3);
+		*itp = new_itp;
+	}
+}
+
+void Diagonal::checkConsistency(diag_iter it)
+{
+	if (!(*it).isReal() && isZero((*it).getBeta2())) {
+		(*it).getBeta2() = 0.0; // put exact zero
+		int jbar = (*it).getIndex();
+		double* d2 = (*it).alpha.a2;
+		(*it).alpha.a2 = (*it).alpha.a1;
+		(*it).real = true;
+		(*it).beta1 = 0;
+		(*it).beta2 = 0;
+		DiagonalBlock b(jbar+1, d2);
+		blocks.insert((++it).iter(), b);
+		num_real += 2;
+		num_all++;
+	}		
+}
+
+double Diagonal::getAverageSize(diag_iter start, diag_iter end)
+{
+	double res = 0;
+	int num = 0;
+	for (diag_iter run = start; run != end; ++run) {
+		num++;
+		res += (*run).getSize();
+	}
+	if (num > 0)
+		res = res/num;
+	return res;
+}
+
+Diagonal::diag_iter Diagonal::findClosestBlock(diag_iter start, diag_iter end, double a)
+{
+	diag_iter closest = start;
+	double minim = 1.0e100;
+	for (diag_iter run = start; run != end; ++run) {
+		double dist = abs(a - (*run).getSize());
+		if (dist < minim) {
+			minim = dist;
+			closest = run;
+		}
+	}
+	return closest;
+}
+
+Diagonal::diag_iter Diagonal::findNextLargerBlock(diag_iter start, diag_iter end, double a)
+{
+	diag_iter closest = start;
+	double minim = 1.0e100;
+	for (diag_iter run = start; run != end; ++run) {
+		double dist = (*run).getSize() - a;
+		if ((0 <= dist) && (dist < minim)) {
+			minim = dist;
+			closest = run;
+		}
+	}
+	return closest;
+}	
+
+void Diagonal::print() const
+{
+	printf("Num real: %d, num complex: %d\n",getNumReal(), getNumComplex());
+	for (const_diag_iter it = begin(); it != end(); ++it) {
+		if ((*it).isReal()) {
+			printf("real: jbar=%d, alpha=%f\n", (*it).getIndex(), *((*it).getAlpha()));
+		}
+		else {
+			printf("complex: jbar=%d, alpha=%f, beta1=%f, beta2=%f\n",
+				   (*it).getIndex(), *((*it).getAlpha()), (*it).getBeta1(), (*it).getBeta2());
+		}
+	}
+}
+
+double Diagonal::EPS = 1.0e-300;
+
+bool Diagonal::isZero(double p)
+{
+	return (abs(p)<EPS);
+}
+
+
+QuasiTriangular::const_col_iter
+QuasiTriangular::col_begin(const DiagonalBlock& b) const
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return const_col_iter(&getData()[jbar*d_size], d_size, b.isReal(), 0);
+}
+
+QuasiTriangular::col_iter
+QuasiTriangular::col_begin(const DiagonalBlock& b)
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return col_iter(&getData()[jbar*d_size], d_size, b.isReal(), 0);
+}
+
+QuasiTriangular::const_row_iter
+QuasiTriangular::row_begin(const DiagonalBlock& b) const
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	int off = jbar*d_size+jbar+d_size;
+	int col = jbar+1;
+	if (!b.isReal()) {
+		off = off + d_size;
+		col++;
+	}
+	return const_row_iter(&getData()[off], d_size, b.isReal(), col);
+}
+
+
+QuasiTriangular::row_iter
+QuasiTriangular::row_begin(const DiagonalBlock& b)
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	int off = jbar*d_size+jbar+d_size;
+	int col = jbar+1;
+	if (!b.isReal()) {
+		off = off + d_size;
+		col++;
+	}
+	return row_iter(&getData()[off], d_size, b.isReal(), col);
+}
+
+QuasiTriangular::const_col_iter
+QuasiTriangular::col_end(const DiagonalBlock& b) const
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return const_col_iter(getData().base()+jbar*d_size+jbar, d_size, b.isReal(),
+						  jbar);
+}
+
+QuasiTriangular::col_iter
+QuasiTriangular::col_end(const DiagonalBlock& b)
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return col_iter(&getData()[jbar*d_size+jbar], d_size, b.isReal(), jbar);
+}
+
+QuasiTriangular::const_row_iter
+QuasiTriangular::row_end(const DiagonalBlock& b) const
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return const_row_iter(&getData()[d_size*d_size+jbar], d_size, b.isReal(),
+						  d_size);
+}
+
+QuasiTriangular::row_iter
+QuasiTriangular::row_end(const DiagonalBlock& b)
+{
+	int jbar = b.getIndex();
+	int d_size = diagonal.getSize();
+	return row_iter(&getData()[d_size*d_size+jbar], d_size, b.isReal(), d_size);
+}
+
+QuasiTriangular::QuasiTriangular(double r, const QuasiTriangular& t)
+	: SqSylvMatrix(t.numRows()), diagonal(getData().base(), t.diagonal)
+{
+	setMatrix(r, t);
+}
+
+QuasiTriangular::QuasiTriangular(double r, const QuasiTriangular& t,
+								 double rr, const QuasiTriangular& tt)
+	: SqSylvMatrix(t.numRows()), diagonal(getData().base(), t.diagonal)
+{
+	setMatrix(r, t);
+	addMatrix(rr, tt);
+}
+
+QuasiTriangular::QuasiTriangular(const QuasiTriangular& t)
+	: SqSylvMatrix(t), diagonal(getData().base(), t.diagonal)
+{
+}
+
+QuasiTriangular::QuasiTriangular(const double* d, int d_size)
+	: SqSylvMatrix(d, d_size), diagonal(getData().base(), d_size)
+{}
+
+QuasiTriangular::~QuasiTriangular()
+{
+}
+
+QuasiTriangular::QuasiTriangular(int p, const QuasiTriangular& t)
+	: SqSylvMatrix(t.numRows()), diagonal(getData().base(), t.diagonal)
+{
+	Vector aux(t.getData());
+	int d_size = diagonal.getSize();
+	double alpha = 1.0;
+	double beta = 0.0;
+	BLAS_dgemm("N", "N", &d_size, &d_size, &d_size, &alpha, aux.base(),
+			   &d_size, t.getData().base(), &d_size, &beta, getData().base(), &d_size);
+}
+
+QuasiTriangular::QuasiTriangular(const SchurDecomp& decomp)
+	: SqSylvMatrix(decomp.getT()),
+	  diagonal(getData().base(), decomp.getDim())
+{
+}
+
+/* this pads matrix with intial columns with zeros */
+QuasiTriangular::QuasiTriangular(const SchurDecompZero& decomp)
+	: SqSylvMatrix(decomp.getDim())
+{
+	// nullify first decomp.getZeroCols() columns
+	int zeros = decomp.getZeroCols()*decomp.getDim();
+	Vector zv(getData(), 0, zeros);
+	zv.zeros();
+	// fill right upper part with decomp.getRU()
+	for (int i = 0; i < decomp.getRU().numRows(); i++) {
+		for (int j = 0; j < decomp.getRU().numCols(); j++) {
+			getData()[(j+decomp.getZeroCols())*decomp.getDim()+i] = decomp.getRU().get(i,j);
+		}
+	}
+	// fill right lower part with decomp.getT()
+	for (int i = 0; i < decomp.getT().numRows(); i++) {
+		for (int j = 0; j < decomp.getT().numCols(); j++) {
+			getData()[(j+decomp.getZeroCols())*decomp.getDim()+decomp.getZeroCols()+i] = 
+				decomp.getT().get(i,j);
+		}
+	}
+	// construct diagonal
+	Diagonal* const d = new Diagonal(getData().base(), decomp.getDim());
+	diagonal = *d;
+	delete d;
+}
+
+void QuasiTriangular::setMatrix(double r, const QuasiTriangular& t)
+{
+	getData().zeros();
+	getData().add(r, t.getData());
+}
+
+void QuasiTriangular::setMatrixViaIter(double r, const QuasiTriangular& t)
+{
+	register double rr = r;
+	diag_iter dil = diag_begin();
+	const_diag_iter dir = t.diag_begin();
+	for ( ; dil != diag_end(); ++dil, ++dir) {
+		(*dil).getAlpha() = rr*(*(*dir).getAlpha());
+		if (! (*dil).isReal()) {
+			(*dil).getBeta1() = rr*(*dir).getBeta1();
+			(*dil).getBeta2() = rr*(*dir).getBeta2();
+		}
+		col_iter cil = col_begin(*dil);
+		const_col_iter cir = t.col_begin(*dir);
+		for ( ; cil != col_end(*dil); ++cil, ++cir) {
+			if ((*dil).isReal()) {
+				*cil = rr*(*cir);
+			} else {
+				cil.a() = rr*cir.a();
+				cil.b() = rr*cir.b();
+			}
+		}
+	}
+}
+
+void QuasiTriangular::addMatrix(double r, const QuasiTriangular& t)
+{
+	getData().add(r, t.getData());
+}
+
+void QuasiTriangular::addMatrixViaIter(double r, const QuasiTriangular& t)
+{
+	register double rr = r;
+	diag_iter dil = diag_begin();
+	const_diag_iter dir = t.diag_begin();
+	for ( ; dil != diag_end(); ++dil, ++dir) {
+		(*dil).getAlpha() = (*(*dil).getAlpha()) + rr*(*(*dir).getAlpha());
+		if (! (*dil).isReal()) {
+			(*dil).getBeta1() += rr*(*dir).getBeta1();
+			(*dil).getBeta2() += rr*(*dir).getBeta2();
+		}
+		col_iter cil = col_begin(*dil);
+		const_col_iter cir = t.col_begin(*dir);
+		for ( ; cil != col_end(*dil); ++cil, ++cir) {
+			if ((*dil).isReal()) {
+				*cil += rr*(*cir);
+			} else {
+				cil.a() += rr*cir.a();
+				cil.b() += rr*cir.b();
+			}
+		}
+	}
+}
+
+void QuasiTriangular::addUnit()
+{
+	for (diag_iter di = diag_begin(); di != diag_end(); ++di) {
+		(*di).getAlpha() = *((*di).getAlpha()) + 1.0;
+	}
+}
+
+void QuasiTriangular::solve(Vector& x, const ConstVector& b, double& eig_min)
+{
+	x = b;
+	solvePre(x, eig_min);
+}
+
+void QuasiTriangular::solveTrans(Vector& x, const ConstVector& b, double& eig_min)
+{
+	x = b;
+	solvePreTrans(x, eig_min);
+}
+
+void QuasiTriangular::solvePre(Vector& x, double& eig_min)
+{
+	addUnit();
+	for (diag_iter di = diag_begin(); di != diag_end(); ++di) {
+		double eig_size;
+		if (!(*di).isReal()) {
+			eig_size = (*di).getDeterminant();
+			eliminateLeft((*di).getIndex()+1, (*di).getIndex(), x);
+		} else {
+			eig_size = *(*di).getAlpha()*(*(*di).getAlpha());
+		}
+		if (eig_size < eig_min)
+			eig_min = eig_size;
+	}
+
+	int nn = diagonal.getSize();
+	int lda = diagonal.getSize();
+	int incx = x.skip();
+	BLAS_dtrsv("U", "N", "N", &nn, getData().base(), &lda, x.base(), &incx);
+}
+
+void QuasiTriangular::solvePreTrans(Vector& x, double& eig_min)
+{
+	addUnit();
+	for (diag_iter di = diag_begin(); di != diag_end(); ++di) {
+		double eig_size;
+		if (!(*di).isReal()) {
+			eig_size = (*di).getDeterminant();
+			eliminateRight((*di).getIndex()+1, (*di).getIndex(), x);
+		} else {
+			eig_size = *(*di).getAlpha()*(*(*di).getAlpha());
+		}
+		if (eig_size < eig_min)
+			eig_min = eig_size;
+	}
+	
+	int nn = diagonal.getSize();
+	int lda = diagonal.getSize();
+	int incx = x.skip();
+	BLAS_dtrsv("U", "T", "N", &nn, getData().base(), &lda, x.base(), &incx);
+}
+
+
+/* calculates x = Tb */
+void QuasiTriangular::multVec(Vector& x, const ConstVector& b) const
+{
+	x = b;
+	int nn = diagonal.getSize();
+	int lda = diagonal.getSize();
+	int incx = x.skip();
+	BLAS_dtrmv("U", "N", "N", &nn, getData().base(), &lda, x.base(), &incx);
+	for (const_diag_iter di = diag_begin(); di != diag_end(); ++di) {
+		if (!(*di).isReal()) {
+			int jbar = (*di).getIndex();
+			x[jbar+1] += (*di).getBeta2()*(b[jbar]);
+		}
+	}
+}
+
+
+void QuasiTriangular::multVecTrans(Vector& x, const ConstVector& b) const
+{
+	x = b;
+	int nn = diagonal.getSize();
+	int lda = diagonal.getSize();
+	int incx = x.skip();
+	BLAS_dtrmv("U", "T", "N", &nn, getData().base(), &lda, x.base(), &incx);
+	for (const_diag_iter di = diag_begin(); di != diag_end(); ++di) {
+		if (!(*di).isReal()) {
+			int jbar = (*di).getIndex();
+			x[jbar] += (*di).getBeta2()*b[jbar+1];
+		}
+	}	
+}
+
+void QuasiTriangular::multaVec(Vector& x, const ConstVector& b) const
+{
+	Vector tmp((const Vector&) x); // new copy
+	multVec(x, b);
+	x.add(1.0, tmp);
+}
+
+void QuasiTriangular::multaVecTrans(Vector& x, const ConstVector& b) const
+{
+	Vector tmp((const Vector&) x); // new copy
+	multVecTrans(x, b);
+	x.add(1.0, tmp);
+}
+
+/* calculates x=x+(T\otimes I)b, where size of I is given by b (KronVector) */
+void QuasiTriangular::multaKron(KronVector& x, const ConstKronVector& b) const
+{
+	int id = b.getN()*power(b.getM(), b.getDepth()-1);
+	ConstGeneralMatrix b_resh(b.base(), id, b.getM());
+	GeneralMatrix x_resh(x.base(), id, b.getM());
+	x_resh.multAndAdd(b_resh, ConstGeneralMatrix(*this), "trans");
+}
+
+
+/* calculates x=x+(T'\otimes I)b, where size of I is given by b (KronVector) */
+void
+QuasiTriangular::multaKronTrans(KronVector& x, const ConstKronVector& b) const
+{
+	int id = b.getN()*power(b.getM(), b.getDepth()-1);
+	ConstGeneralMatrix b_resh(b.base(), id, b.getM());
+	GeneralMatrix x_resh(x.base(), id, b.getM());
+	x_resh.multAndAdd(b_resh, ConstGeneralMatrix(*this));
+}
+
+
+void QuasiTriangular::multKron(KronVector& x) const
+{
+	KronVector b((const KronVector&)x); // make copy
+	x.zeros();
+	multaKron(x, b);
+}
+
+void
+QuasiTriangular::multKronTrans(KronVector& x) const
+{
+	KronVector b((const KronVector&)x); // make copy
+	x.zeros();
+	multaKronTrans(x, b);
+}
+
+void QuasiTriangular::multLeftOther(GeneralMatrix& a) const
+{
+	a.multLeft(*this);
+}
+
+void QuasiTriangular::multLeftOtherTrans(GeneralMatrix& a) const
+{
+	a.multLeftTrans(*this);
+}
+
+void QuasiTriangular::swapDiagLogically(diag_iter it)
+{
+	diagonal.swapLogically(it);
+}
+
+void QuasiTriangular::checkDiagConsistency(diag_iter it)
+{
+	diagonal.checkConsistency(it);
+}
+
+double QuasiTriangular::getAverageDiagSize(diag_iter start, diag_iter end)
+{
+	return diagonal.getAverageSize(start, end);
+}
+
+QuasiTriangular::diag_iter
+QuasiTriangular::findClosestDiagBlock(diag_iter start, diag_iter end, double a)
+{
+	return diagonal.findClosestBlock(start, end, a);
+}
+
+QuasiTriangular::diag_iter
+QuasiTriangular::findNextLargerBlock(diag_iter start, diag_iter end, double a)
+{
+	return diagonal.findNextLargerBlock(start, end, a);
+}
+
+int QuasiTriangular::getNumOffdiagonal() const
+{
+	return 	diagonal.getSize()*(diagonal.getSize()-1)/2 - diagonal.getNumComplex();
+}
diff --git a/dynare++/sylv/cc/QuasiTriangular.h b/dynare++/sylv/cc/QuasiTriangular.h
new file mode 100644
index 0000000000000000000000000000000000000000..ff7281141912a1c9e8d15a32e54090ad5f365573
--- /dev/null
+++ b/dynare++/sylv/cc/QuasiTriangular.h
@@ -0,0 +1,339 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/QuasiTriangular.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef QUASI_TRIANGULAR_H
+#define QUASI_TRIANGULAR_H
+
+#include "Vector.h"
+#include "KronVector.h"
+#include "SylvMatrix.h"
+
+#include <list>
+
+using namespace std;
+
+class DiagonalBlock;
+class Diagonal;
+class DiagPair {
+private:
+	double* a1;
+	double* a2;
+public:
+	DiagPair() {}
+	DiagPair(double* aa1, double* aa2) {a1 = aa1; a2 = aa2;}
+	DiagPair(const DiagPair& p) {a1 = p.a1; a2 = p.a2;}
+	const DiagPair& operator=(const DiagPair& p) {a1 = p.a1; a2 = p.a2; return *this;}
+	const DiagPair& operator=(double v) {*a1 = v; *a2 = v; return *this;}
+	const double& operator*() const {return *a1;}
+	/** here we must not define double& operator*(), since it wouldn't 
+	 rewrite both values, we use operator= for this */ 
+	friend class Diagonal;
+	friend class DiagonalBlock;
+};
+
+class DiagonalBlock {
+private:
+	int jbar;
+	bool real;
+	DiagPair alpha;
+	double* beta1;
+	double* beta2;
+
+	void copy(const DiagonalBlock& b) {
+		jbar = b.jbar;
+		real = b.real;
+		alpha = b.alpha;
+		beta1 = b.beta1;
+		beta2 = b.beta2;
+	}
+
+public:
+	DiagonalBlock() {}
+	DiagonalBlock(int jb, bool r, double* a1, double* a2,
+				  double* b1, double* b2)
+		: alpha(a1, a2)
+		{
+			jbar = jb;
+			real = r;
+			beta1 = b1;
+			beta2 = b2;
+		}
+	// construct complex block
+	DiagonalBlock(int jb, double* a1, double* a2)
+		: alpha(a1, a2)
+		{
+			jbar = jb;
+			real = false;
+			beta1 = a2 - 1;
+			beta2 = a1 + 1;
+		}
+	// construct real block
+	DiagonalBlock(int jb, double* a1)
+		: alpha(a1, a1)
+		{
+			jbar = jb;
+			real = true;
+			beta1 = 0;
+			beta2 = 0;
+		}
+	DiagonalBlock(const DiagonalBlock& b)
+		{copy(b);}
+	const DiagonalBlock& operator=(const DiagonalBlock& b)
+		{copy(b); return *this;}
+	int getIndex() const
+		{return jbar;}
+	bool isReal() const
+		{return real;}
+	const DiagPair& getAlpha() const
+		{return alpha;}
+	DiagPair& getAlpha()
+		{return alpha;}
+	double& getBeta1() const
+		{return *beta1;}
+	double& getBeta2() const
+		{return *beta2;}
+	double getDeterminant() const;
+	double getSBeta() const;
+	double getSize() const;
+	void setReal();
+	// for debugging
+	void checkBlock(const double* d, int d_size);
+	friend class Diagonal;
+};
+
+template <class _Tdiag, class _Tblock, class _Titer>
+struct _diag_iter {
+	typedef _diag_iter<_Tdiag, _Tblock, _Titer> _Self;
+	_Tdiag diag;
+	_Titer it;
+public:
+	_diag_iter(_Tdiag d, _Titer iter) : diag(d), it(iter) {}
+	_Tblock operator*() const {return *it;}
+	_Self& operator++() {++it; return *this;}
+	_Self& operator--() {--it; return *this;}
+	bool operator==(const _Self& x) const {return x.it == it;}
+	bool operator!=(const _Self& x) const {return x.it != it;}
+	const _Self& operator=(const _Self& x) {it = x.it; return *this;}
+	_Titer iter() const {return it;}
+};
+
+class Diagonal {
+public:
+	typedef _diag_iter<const Diagonal&, const DiagonalBlock&, list<DiagonalBlock>::const_iterator> const_diag_iter;
+	typedef _diag_iter<Diagonal&, DiagonalBlock&, list<DiagonalBlock>::iterator> diag_iter;
+private:
+	int num_all;
+	list<DiagonalBlock> blocks;
+	int num_real;
+	void copy(const Diagonal&);
+public:
+	Diagonal() : num_all(0), num_real(0) {}
+	Diagonal(double* data, int d_size);
+	Diagonal(double* data, const Diagonal& d);
+	Diagonal(const Diagonal& d) {copy(d);}
+	const Diagonal& operator =(const Diagonal& d) {copy(d); return *this;}
+	virtual ~Diagonal() {}
+
+	int getNumComplex() const {return num_all - num_real;}
+	int getNumReal() const {return num_real;}
+	int getSize() const {return getNumReal() + 2*getNumComplex();}
+	int getNumBlocks() const {return num_all;}
+	void getEigenValues(Vector& eig) const;
+	void swapLogically(diag_iter it);
+	void checkConsistency(diag_iter it);
+	double getAverageSize(diag_iter start, diag_iter end);
+	diag_iter findClosestBlock(diag_iter start, diag_iter end, double a);
+	diag_iter findNextLargerBlock(diag_iter start, diag_iter end, double a);
+	void print() const;
+
+	diag_iter begin()
+		{return diag_iter(*this, blocks.begin());}
+	const_diag_iter begin() const
+		{return const_diag_iter(*this, blocks.begin());}
+	diag_iter end()
+		{return diag_iter(*this, blocks.end());}
+	const_diag_iter end() const
+		{return const_diag_iter(*this, blocks.end());}
+
+	/* redefine pointers as data start at p */
+	void changeBase(double* p);
+private:
+	static double EPS;
+	static int getNumComplex(const double* data, int d_size);
+	static bool isZero(double p);
+};
+
+template <class _TRef, class _TPtr>
+struct _matrix_iter {
+	typedef _matrix_iter<_TRef, _TPtr> _Self;
+	int d_size;
+	bool real;
+	_TPtr ptr;
+public:
+	_matrix_iter(_TPtr base, int ds, bool r)
+		{ptr = base; d_size = ds; real = r;}
+	virtual ~_matrix_iter() {}
+	const _Self& operator=(const _Self& it)
+		{ptr = it.ptr; d_size = it.d_size; real = it.real; return *this;}
+	bool operator==(const _Self& it) const
+		{return ptr == it.ptr;}
+	bool operator!=(const _Self& it) const
+		{return ptr != it.ptr;}
+	_TRef operator*() const
+		{return *ptr;}
+	_TRef a() const
+		{return *ptr;}
+	virtual _Self& operator++() =0;
+};
+
+template <class _TRef, class _TPtr>
+class _column_iter : public _matrix_iter<_TRef, _TPtr> {
+	typedef _matrix_iter<_TRef, _TPtr> _Tparent; 
+	typedef _column_iter<_TRef, _TPtr> _Self;
+	int row;
+public:
+	_column_iter(_TPtr base, int ds, bool r, int rw)
+		: _matrix_iter<_TRef, _TPtr>(base, ds, r), row(rw) {};
+	_Self& operator++()
+		{_Tparent::ptr++; row++; return *this;}
+	_TRef b() const
+		{
+			if (_Tparent::real) {
+				return *(_Tparent::ptr);
+			} else {
+				return *(_Tparent::ptr+_Tparent::d_size);
+			}
+		}
+	int getRow() const {return row;}
+};
+
+template <class _TRef, class _TPtr>
+class _row_iter : public _matrix_iter<_TRef, _TPtr> {
+	typedef _matrix_iter<_TRef, _TPtr> _Tparent; 
+	typedef _row_iter<_TRef, _TPtr> _Self;
+	int col;
+public:
+	_row_iter(_TPtr base, int ds, bool r, int cl)
+		: _matrix_iter<_TRef, _TPtr>(base, ds, r), col(cl) {};
+	_Self& operator++()
+		{_Tparent::ptr += _Tparent::d_size; col++; return *this;}
+	virtual _TRef b() const
+		{
+			if (_Tparent::real) {
+				return *(_Tparent::ptr);
+			}else {
+				return *(_Tparent::ptr+1);
+			}
+		}
+	int getCol() const {return col;}
+};
+
+class SchurDecomp;
+class SchurDecompZero;
+
+class QuasiTriangular : public SqSylvMatrix {
+public:
+	typedef _column_iter<const double&, const double*> const_col_iter;
+	typedef _column_iter<double&, double*> col_iter;
+	typedef _row_iter<const double&, const double*> const_row_iter;
+	typedef _row_iter<double&, double*> row_iter;	
+	typedef Diagonal::const_diag_iter const_diag_iter;
+	typedef Diagonal::diag_iter diag_iter;
+protected:
+	Diagonal diagonal;
+public:
+	QuasiTriangular(const double* d, int d_size);
+	QuasiTriangular(double r, const QuasiTriangular& t);
+	QuasiTriangular(double r, const QuasiTriangular& t,
+					double rr, const QuasiTriangular& tt);
+	QuasiTriangular(int p, const QuasiTriangular& t);
+	QuasiTriangular(const SchurDecomp& decomp);
+	QuasiTriangular(const SchurDecompZero& decomp);
+	QuasiTriangular(const QuasiTriangular& t);
+	virtual ~QuasiTriangular();
+	const Diagonal& getDiagonal() const {return diagonal;}
+	int getNumOffdiagonal() const;
+	void swapDiagLogically(diag_iter it);
+	void checkDiagConsistency(diag_iter it);
+	double getAverageDiagSize(diag_iter start, diag_iter end);
+	diag_iter findClosestDiagBlock(diag_iter start, diag_iter end, double a);
+	diag_iter findNextLargerBlock(diag_iter start, diag_iter end, double a);
+
+
+	/* (I+T)y = x, y-->x  */
+	virtual void solvePre(Vector& x, double& eig_min);
+	/* (I+T')y = x, y-->x */
+	virtual void solvePreTrans(Vector& x, double& eig_min);
+	/* (I+T)x = b */
+	virtual void solve(Vector& x, const ConstVector& b, double& eig_min);
+	/* (I+T')x = b */
+	virtual void solveTrans(Vector& x, const ConstVector& b, double& eig_min);
+	/* x = Tb */
+	virtual void multVec(Vector& x, const ConstVector& b) const;
+	/* x = T'b */
+	virtual void multVecTrans(Vector& x, const ConstVector& b) const;
+	/* x = x + Tb */
+	virtual void multaVec(Vector& x, const ConstVector& b) const;
+	/* x = x + T'b */
+	virtual void multaVecTrans(Vector& x, const ConstVector& b) const;
+	/* x = (T\otimes I)x */
+	virtual void multKron(KronVector& x) const;
+	/* x = (T'\otimes I)x */
+	virtual void multKronTrans(KronVector& x) const;
+	/* A = T*A */
+	virtual void multLeftOther(GeneralMatrix& a) const;
+	/* A = T'*A */
+	virtual void multLeftOtherTrans(GeneralMatrix& a) const;
+
+	const_diag_iter diag_begin() const
+		{return diagonal.begin();}
+	diag_iter diag_begin()
+		{return diagonal.begin();}
+	const_diag_iter diag_end() const 
+		{return diagonal.end();}
+	diag_iter diag_end()
+		{return diagonal.end();}
+
+	/* iterators for off diagonal elements */
+	virtual const_col_iter col_begin(const DiagonalBlock& b) const;
+	virtual col_iter col_begin(const DiagonalBlock& b);
+	virtual const_row_iter row_begin(const DiagonalBlock& b) const;
+	virtual row_iter row_begin(const DiagonalBlock& b);
+	virtual const_col_iter col_end(const DiagonalBlock& b) const;
+	virtual col_iter col_end(const DiagonalBlock& b);
+	virtual const_row_iter row_end(const DiagonalBlock& b) const;
+	virtual row_iter row_end(const DiagonalBlock& b);
+
+	/* clone */
+	virtual QuasiTriangular* clone() const
+		{return new QuasiTriangular(*this);}
+	virtual QuasiTriangular* clone(int p, const QuasiTriangular& t) const
+		{return new QuasiTriangular(p, t);}
+	virtual QuasiTriangular* clone(double r) const
+		{return new QuasiTriangular(r, *this);}
+	virtual QuasiTriangular* clone(double r, double rr, const QuasiTriangular& tt) const
+		{return new QuasiTriangular(r, *this, rr, tt);}
+protected:
+	void setMatrix(double r, const QuasiTriangular& t);
+	void addMatrix(double r, const QuasiTriangular& t);
+private:
+	void addUnit();
+	/* x = x + (T\otimes I)b */
+	void multaKron(KronVector& x, const ConstKronVector& b) const;
+	/* x = x + (T'\otimes I)b */
+	void multaKronTrans(KronVector& x, const ConstKronVector& b) const;
+	/* implementation via iterators, useful for large matrices */
+	void setMatrixViaIter(double r, const QuasiTriangular& t);
+	void addMatrixViaIter(double r, const QuasiTriangular& t);	
+	/* hide noneffective implementations of parents */
+	void multsVec(Vector& x, const ConstVector& d) const;
+	void multsVecTrans(Vector& x, const ConstVector& d) const;
+};
+
+#endif /* QUASI_TRIANGULAR_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/QuasiTriangularZero.cpp b/dynare++/sylv/cc/QuasiTriangularZero.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..49054970d396d96892b58bb1f29466aa44532e24
--- /dev/null
+++ b/dynare++/sylv/cc/QuasiTriangularZero.cpp
@@ -0,0 +1,148 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/QuasiTriangularZero.cpp,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "QuasiTriangularZero.h"
+#include "SchurDecomp.h"
+#include "SylvMatrix.h"
+#include "SylvException.h"
+
+#include <stdio.h>
+
+QuasiTriangularZero::QuasiTriangularZero(int num_zeros, const double* d,
+										 int d_size)
+	: QuasiTriangular(SqSylvMatrix(GeneralMatrix(d, num_zeros+d_size, d_size),
+								   num_zeros, 0, d_size).getData().base(),
+					  d_size),
+	  nz(num_zeros),
+	  ru(GeneralMatrix(d, num_zeros+d_size, d_size), 0, 0, num_zeros, d_size)
+{
+}
+
+QuasiTriangularZero::QuasiTriangularZero(double r,
+										 const QuasiTriangularZero& t)
+	: QuasiTriangular(r, t),
+	  nz(t.nz),
+	  ru(t.ru)
+{
+	ru.mult(r);
+}
+
+QuasiTriangularZero::QuasiTriangularZero(double r,
+										 const QuasiTriangularZero& t,
+										 double rr,
+										 const QuasiTriangularZero& tt)
+	: QuasiTriangular(r, t, rr, tt),
+	  nz(t.nz),
+	  ru(t.ru)
+{
+	ru.mult(r);
+	ru.add(rr, tt.ru);
+}
+
+QuasiTriangularZero::QuasiTriangularZero(int p, const QuasiTriangularZero& t)
+	: QuasiTriangular(p, t),
+	  nz(t.nz),
+	  ru(t.ru)
+{
+	ru.multRight(t);
+}
+
+QuasiTriangularZero::QuasiTriangularZero(const SchurDecompZero& decomp)
+	: QuasiTriangular(decomp.getT().getData().base(),
+					  decomp.getT().numRows()),
+	  nz(decomp.getZeroCols()),
+	  ru(decomp.getRU())
+{
+}
+
+QuasiTriangularZero::QuasiTriangularZero(const QuasiTriangular& t)
+	: QuasiTriangular(t),
+	  nz(0), ru(0, t.getDiagonal().getSize())
+{
+}
+
+QuasiTriangularZero::~QuasiTriangularZero()
+{
+}
+
+void QuasiTriangularZero::solvePre(Vector& x, double& eig_min)
+{
+	Vector xu(x, 0, nz);
+	Vector xl(x, nz, x.length()-nz);
+	QuasiTriangular::solvePre(xl, eig_min);
+	ru.multsVec(xu, xl);
+	if (nz > 0)
+		eig_min = (eig_min > 1.0)? 1.0 : eig_min;
+}
+
+void QuasiTriangularZero::solvePreTrans(Vector& x, double& eig_min)
+{
+	Vector xu(x, 0, nz);
+	Vector xl(x, nz, x.length()-nz);
+	ru.multsVecTrans(xl, xu);
+	QuasiTriangular::solvePreTrans(xl, eig_min);
+	if (nz > 0)
+		eig_min = (eig_min > 1.0)? 1.0 : eig_min;
+}
+
+void QuasiTriangularZero::multVec(Vector& x, const ConstVector& b) const
+{
+	x.zeros();
+	multaVec(x, b);
+}
+
+void QuasiTriangularZero::multVecTrans(Vector& x, const ConstVector& b) const
+{
+	x.zeros();
+	multaVecTrans(x, b);
+}
+
+void QuasiTriangularZero::multaVec(Vector& x, const ConstVector& b) const
+{
+	ConstVector bl(b, nz, b.length()-nz);
+	Vector xu(x, 0, nz);
+	Vector xl(x, nz, x.length()-nz);
+	xu.zeros();
+	ru.multaVec(xu, bl);
+	QuasiTriangular::multVec(xl, bl);
+}
+
+void QuasiTriangularZero::multaVecTrans(Vector& x, const ConstVector& b) const
+{
+	ConstVector bu(b, 0, b.length());
+	ConstVector bl(b, nz, b.length()-nz);
+	Vector xu(x, 0, nz);
+	Vector xl(x, nz, x.length()-nz);
+	xu.zeros();
+	QuasiTriangular::multVecTrans(xl, bl);
+	ru.multaVecTrans(xl, bu);
+}
+
+void QuasiTriangularZero::multLeftOther(GeneralMatrix& a) const
+{
+	GeneralMatrix a1(a, 0, 0, nz, a.numCols());
+	GeneralMatrix a2(a, nz, 0, a.numRows()-nz, a.numCols());
+	a1.mult(ru, a2);
+	QuasiTriangular::multLeftOther(a2);
+}
+
+void QuasiTriangularZero::print() const
+{
+	printf("super=\n");
+	QuasiTriangular::print();
+	printf("nz=%d\n",nz);
+	printf("ru=\n");
+	ru.print();
+}
+
+void QuasiTriangularZero::multKron(KronVector& x) const
+{
+	throw SYLV_MES_EXCEPTION("Attempt to run QuasiTriangularZero::multKron.");
+}
+
+void QuasiTriangularZero::multKronTrans(KronVector& x) const
+{
+	throw SYLV_MES_EXCEPTION("Attempt to run QuasiTriangularZero::multKronTrans.");
+}
+
diff --git a/dynare++/sylv/cc/QuasiTriangularZero.h b/dynare++/sylv/cc/QuasiTriangularZero.h
new file mode 100644
index 0000000000000000000000000000000000000000..6396eed18e00d7582162336fcc9b85ab31d7505d
--- /dev/null
+++ b/dynare++/sylv/cc/QuasiTriangularZero.h
@@ -0,0 +1,48 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/QuasiTriangularZero.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef QUASI_TRIANGULAR_ZERO_H
+#define QUASI_TRIANGULAR_ZERO_H
+
+#include "QuasiTriangular.h"
+#include "GeneralMatrix.h"
+
+class QuasiTriangularZero : public QuasiTriangular {
+	int nz; // number of zero columns
+	GeneralMatrix ru; // data in right upper part (nz,d_size)
+public:
+	QuasiTriangularZero(int num_zeros, const double* d, int d_size);
+	QuasiTriangularZero(double r, const QuasiTriangularZero& t);
+	QuasiTriangularZero(double r, const QuasiTriangularZero& t,
+						double rr, const QuasiTriangularZero& tt);
+	QuasiTriangularZero(int p, const QuasiTriangularZero& t);
+	QuasiTriangularZero(const QuasiTriangular& t);
+	QuasiTriangularZero(const SchurDecompZero& decomp);
+	~QuasiTriangularZero();
+	void solvePre(Vector& x, double& eig_min);
+	void solvePreTrans(Vector& x, double& eig_min);
+	void multVec(Vector& x, const ConstVector& b) const;
+	void multVecTrans(Vector& x, const ConstVector& b) const;
+	void multaVec(Vector& x, const ConstVector& b) const;
+	void multaVecTrans(Vector& x, const ConstVector& b) const;
+	void multKron(KronVector& x) const;
+	void multKronTrans(KronVector& x) const;
+	void multLeftOther(GeneralMatrix& a) const;
+	/* clone */
+	virtual QuasiTriangular* clone() const
+		{return new QuasiTriangularZero(*this);}
+	virtual QuasiTriangular* clone(int p, const QuasiTriangular& t) const
+		{return new QuasiTriangularZero(p, (const QuasiTriangularZero&)t);}
+	virtual QuasiTriangular* clone(double r) const
+		{return new QuasiTriangularZero(r, *this);}
+	virtual QuasiTriangular* clone(double r, double rr, const QuasiTriangular& tt) const
+		{return new QuasiTriangularZero(r, *this, rr, (const QuasiTriangularZero&)tt);}
+	void print() const;
+};
+
+#endif /* QUASI_TRIANGULAR_ZERO_H */
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SchurDecomp.cpp b/dynare++/sylv/cc/SchurDecomp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..63fe59cebec3b39bf6706f8cd038c9819d049a1e
--- /dev/null
+++ b/dynare++/sylv/cc/SchurDecomp.cpp
@@ -0,0 +1,71 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SchurDecomp.cpp,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SchurDecomp.h"
+
+#include "cpplapack.h"
+
+SchurDecomp::SchurDecomp(const SqSylvMatrix& m)
+	: q_destroy(true), t_destroy(true)
+{
+	int rows = m.numRows();
+	q = new SqSylvMatrix(rows);
+	SqSylvMatrix auxt(m);
+	int sdim;
+	double* const wr = new double[rows];
+	double* const wi = new double[rows];
+	int lwork = 6*rows;
+	double* const work = new double[lwork];
+	int info;
+	LAPACK_dgees("V", "N", 0, &rows, auxt.base(), &rows, &sdim,
+				 wr, wi, q->base(), &rows,
+				 work, &lwork, 0, &info);
+	delete [] work;
+	delete [] wi;
+	delete [] wr;
+	t = new QuasiTriangular(auxt.base(), rows);
+}
+
+SchurDecomp::SchurDecomp(const QuasiTriangular& tr)
+	: q_destroy(true), t_destroy(true)
+{
+	q = new SqSylvMatrix(tr.numRows());
+	q->setUnit();
+	t = new QuasiTriangular(tr);
+}
+
+SchurDecomp::SchurDecomp(QuasiTriangular& tr)
+	: q_destroy(true), t_destroy(false)
+{
+	q = new SqSylvMatrix(tr.numRows());
+	q->setUnit();
+	t = &tr;
+}
+
+SchurDecomp::~SchurDecomp()
+{
+	if (t_destroy)
+		delete t;
+	if (q_destroy)
+		delete q;
+}
+
+int SchurDecomp::getDim() const
+{
+	return t->numRows();
+}
+
+SchurDecompZero::SchurDecompZero(const GeneralMatrix& m)
+	: SchurDecomp(SqSylvMatrix(m, m.numRows()-m.numCols(), 0, m.numCols())),
+	  ru(m, 0, 0, m.numRows()-m.numCols(), m.numCols())
+{
+	ru.multRight(getQ());
+}
+
+int SchurDecompZero::getDim() const
+{
+	return getT().numRows()+ru.numRows();
+}
+
+
diff --git a/dynare++/sylv/cc/SchurDecomp.h b/dynare++/sylv/cc/SchurDecomp.h
new file mode 100644
index 0000000000000000000000000000000000000000..644e5d5c5c1233781c6a03404539dddebffd340f
--- /dev/null
+++ b/dynare++/sylv/cc/SchurDecomp.h
@@ -0,0 +1,43 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SchurDecomp.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SCHUR_DECOMP_H
+#define SCHUR_DECOMP_H
+
+#include "SylvMatrix.h"
+#include "QuasiTriangular.h"
+
+class QuasiTriangular;
+class SchurDecomp {
+	bool q_destroy;
+	SqSylvMatrix* q;
+	bool t_destroy;
+	QuasiTriangular* t;
+public:
+	SchurDecomp(const SqSylvMatrix& m);
+	SchurDecomp(const QuasiTriangular& tr);
+	SchurDecomp(QuasiTriangular& tr);
+	const SqSylvMatrix& getQ() const {return *q;}
+	const QuasiTriangular& getT() const {return *t;}
+	SqSylvMatrix& getQ() {return *q;}
+	QuasiTriangular& getT() {return *t;}
+	virtual int getDim() const;
+	virtual ~SchurDecomp();
+};
+
+class SchurDecompZero : public SchurDecomp {
+	GeneralMatrix ru; /* right upper matrix */
+public:
+	SchurDecompZero(const GeneralMatrix& m);
+	const GeneralMatrix& getRU() const {return ru;}
+	int getDim() const;
+	int getZeroCols() const {return ru.numRows();}
+};
+
+#endif /* SCHUR_DECOMP_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SchurDecompEig.cpp b/dynare++/sylv/cc/SchurDecompEig.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..623ad3c5a9503e84800a9f41b25492e596cdf131
--- /dev/null
+++ b/dynare++/sylv/cc/SchurDecompEig.cpp
@@ -0,0 +1,91 @@
+#include "SchurDecompEig.h"
+#include "SylvException.h"
+#include "cpplapack.h"
+
+/* bubble diagonal 1-1, or 2-2 block from position 'from' to position
+ * 'to'. If an eigenvalue cannot be swapped with its neighbour, the
+ * neighbour is bubbled also in front. The method returns a new
+ * position 'to', where the original block pointed by 'to' happens to
+ * appear at the end. 'from' must be greater than 'to'.
+ */
+SchurDecompEig::diag_iter
+SchurDecompEig::bubbleEigen(diag_iter from, diag_iter to)
+{
+	diag_iter run = from;
+	while (run != to) {
+		diag_iter runm = run;
+		if (!tryToSwap(run, runm) && runm == to) {
+			++to;
+		} else {
+			// bubble all eigenvalues from runm(incl.) to run(excl.),
+			// this includes either bubbling generated eigenvalues due
+			// to split, or an eigenvalue which couldn't be swapped
+			while (runm != run) {
+				to = bubbleEigen(runm, to);
+				++runm;
+			}
+		}
+	}
+	return to;
+}
+
+/* this tries to swap two neighbouring eigenvalues, 'it' and '--it',
+ * and returns 'itadd'. If the blocks can be swapped, new eigenvalues
+ * can emerge due to possible 2-2 block splits. 'it' then points to
+ * the last eigenvalue coming from block pointed by 'it' at the
+ * begining, and 'itadd' points to the first. On swap failure, 'it' is
+ * not changed, and 'itadd' points to previous eignevalue (which must
+ * be moved backwards before). In either case, it is necessary to
+ * resolve eigenvalues from 'itadd' to 'it', before the 'it' can be
+ * resolved.
+ * The success is signaled by returned true.
+ */
+bool SchurDecompEig::tryToSwap(diag_iter& it, diag_iter& itadd)
+{
+	itadd = it;
+	--itadd;
+
+	int n = getDim();
+	int ifst = (*it).getIndex() + 1;
+	int ilst = (*itadd).getIndex() + 1;
+	double* work = new double[n];
+	int info;
+	LAPACK_dtrexc("V", &n, getT().base(), &n, getQ().base(), &n, &ifst, &ilst, work,
+				  &info);
+	delete [] work;
+	if (info < 0) {
+		throw SYLV_MES_EXCEPTION("Wrong argument to LAPACK_dtrexc.");
+	}
+
+	if (info == 0) {
+		// swap successful
+		getT().swapDiagLogically(itadd);
+		//check for 2-2 block splits
+		getT().checkDiagConsistency(it);
+		getT().checkDiagConsistency(itadd);
+		// and go back by 'it' in NEW eigenvalue set
+		--it;
+		return true;
+	}
+	return false;
+}
+
+
+void SchurDecompEig::orderEigen()
+{
+	diag_iter run = getT().diag_begin();
+	diag_iter runp = run;
+	++runp;
+	double last_size = 0.0;
+	while (runp != getT().diag_end()) {
+		diag_iter least = getT().findNextLargerBlock(run, getT().diag_end(),
+													 last_size);
+		last_size = (*least).getSize();
+		if (run == least)
+			++run;
+		else
+			run = bubbleEigen(least, run);
+		runp = run;
+		++runp;
+	}
+}
diff --git a/dynare++/sylv/cc/SchurDecompEig.h b/dynare++/sylv/cc/SchurDecompEig.h
new file mode 100644
index 0000000000000000000000000000000000000000..0e0da38d5c83ff2c455528ecb2258f9415f52aac
--- /dev/null
+++ b/dynare++/sylv/cc/SchurDecompEig.h
@@ -0,0 +1,31 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SchurDecompEig.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+// contains algorithms for eigenvalue reordering
+
+#ifndef SCHUR_DECOMP_EIG_H
+#define SCHUR_DECOMP_EIG_H
+
+#include "SchurDecomp.h"
+#include "QuasiTriangular.h"
+
+class SchurDecompEig : public SchurDecomp {
+public:
+	typedef QuasiTriangular::diag_iter diag_iter;
+	SchurDecompEig(const SqSylvMatrix& m) : SchurDecomp(m) {}
+	SchurDecompEig(const QuasiTriangular& tr) : SchurDecomp(tr) {};
+	SchurDecompEig(QuasiTriangular& tr) : SchurDecomp(tr) {}
+	diag_iter bubbleEigen(diag_iter from, diag_iter to);
+	void orderEigen();
+protected:
+	bool tryToSwap(diag_iter& it, diag_iter& itadd);
+};
+
+#endif /* SCHUR_DECOMP_EIG_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
+
diff --git a/dynare++/sylv/cc/SimilarityDecomp.cpp b/dynare++/sylv/cc/SimilarityDecomp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3b62e957ff461ce2b24ee099df1eb16942440979
--- /dev/null
+++ b/dynare++/sylv/cc/SimilarityDecomp.cpp
@@ -0,0 +1,160 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SimilarityDecomp.cpp,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SimilarityDecomp.h"
+#include "SchurDecomp.h"
+#include "SchurDecompEig.h"
+#include "SylvException.h"
+
+#include "cpplapack.h"
+
+#include <cmath>
+
+SimilarityDecomp::SimilarityDecomp(const double* d, int d_size, double log10norm)
+{
+	SchurDecomp sd(SqSylvMatrix(d, d_size));
+	q = new SqSylvMatrix(sd.getQ());
+	b = new BlockDiagonal(sd.getT());
+	invq = new SqSylvMatrix(d_size);
+	invq->setUnit();
+	invq->multLeftTrans(sd.getQ());
+	double norm = pow(10.0, log10norm);
+	diagonalize(norm);
+}
+
+SimilarityDecomp::~SimilarityDecomp()
+{
+	delete invq;
+	delete b;
+	delete q;
+}
+
+void SimilarityDecomp::getXDim(diag_iter start, diag_iter end,
+							   int &rows, int& cols) const
+{
+	int si = (*start).getIndex();
+	int ei = (*end).getIndex();
+	cols = b->numRows() - ei;
+	rows = ei - si;
+}
+
+/* find solution of X for diagonal block given by start(incl.) and
+ * end(excl.). If the solution cannot be found, or it is greater than
+ * norm, X is not changed and flase is returned.
+ */
+bool SimilarityDecomp::solveX(diag_iter start, diag_iter end,
+							  GeneralMatrix& X, double norm) const
+{
+	int si = (*start).getIndex();
+	int ei = (*end).getIndex();
+
+	SqSylvMatrix A((const GeneralMatrix&)*b, si, si, X.numRows());
+	SqSylvMatrix B((const GeneralMatrix&)*b, ei, ei, X.numCols());
+	GeneralMatrix C((const GeneralMatrix&)*b, si, ei, X.numRows(), X.numCols());
+
+	int isgn = -1;
+	int m = A.numRows();
+	int n = B.numRows();
+	double scale;
+	int info;
+	LAPACK_dtrsyl("N", "N", &isgn, &m, &n, A.base(), &m, B.base(), &n,
+				  C.base(), &m, &scale, &info);
+	if (info < -1)
+		throw SYLV_MES_EXCEPTION("Wrong parameter to LAPACK dtrsyl.");
+
+	if (info == 1 || scale < 1)
+		return false;
+	if (C.getData().getMax() > norm)
+		return false;
+	
+	X = C;
+	return true;
+}
+
+/* multiply Q and invQ with (I -X; 0 I), and (I X; 0 I). This also sets X=-X. */
+void SimilarityDecomp::updateTransform(diag_iter start, diag_iter end,
+									   GeneralMatrix& X)
+{
+	int si = (*start).getIndex();
+	int ei = (*end).getIndex();
+	
+	SqSylvMatrix iX(q->numRows());
+	iX.setUnit();
+	iX.place(X, si, ei);
+	invq->GeneralMatrix::multLeft(iX);
+
+	iX.setUnit();
+	X.mult(-1.0);
+	iX.place(X, si, ei);
+	q->multRight(iX);
+}
+
+void SimilarityDecomp::bringGuiltyBlock(diag_iter start, diag_iter& end)
+{
+	double av = b->getAverageDiagSize(start, end);
+	diag_iter guilty = b->findClosestDiagBlock(end, b->diag_end(), av);
+	SchurDecompEig sd((QuasiTriangular&)*b); // works on b including diagonal structure
+	end = sd.bubbleEigen(guilty, end); // iterators are valid
+	++end;
+	q->multRight(sd.getQ());
+	invq->multLeftTrans(sd.getQ());
+}
+
+void SimilarityDecomp::diagonalize(double norm)
+{
+	diag_iter start = b->diag_begin();
+	diag_iter end = start;
+	++end;
+
+	while (end != b->diag_end()) {
+		int xrows;
+		int xcols;
+		getXDim(start, end, xrows, xcols);
+		GeneralMatrix X(xrows, xcols);
+		if (solveX(start, end, X, norm)) {
+			updateTransform(start, end, X);
+			b->setZeroBlockEdge(end);
+			start = end;
+			++end;
+		} else {
+			bringGuiltyBlock(start, end); // moves with end
+		}
+	}
+}
+
+void SimilarityDecomp::check(SylvParams& pars, const GeneralMatrix& m) const
+{
+	// M - Q*B*inv(Q)
+	SqSylvMatrix c(getQ(), getB());
+	c.multRight(getInvQ());
+	c.add(-1.0, m);
+	pars.f_err1 = c.getNorm1();
+	pars.f_errI = c.getNormInf();
+
+	// I - Q*inv(Q)
+	c.setUnit();
+	c.mult(-1);
+	c.multAndAdd(getQ(), getInvQ());
+	pars.viv_err1 = c.getNorm1();
+	pars.viv_errI = c.getNormInf();
+
+	// I - inv(Q)*Q
+	c.setUnit();
+	c.mult(-1);
+	c.multAndAdd(getInvQ(), getQ());
+	pars.ivv_err1 = c.getNorm1();
+	pars.ivv_errI = c.getNormInf();	
+}
+
+void SimilarityDecomp::infoToPars(SylvParams& pars) const
+{
+	pars.f_blocks = getB().getNumBlocks();
+	pars.f_largest = getB().getLargestBlock();
+	pars.f_zeros = getB().getNumZeros();
+	pars.f_offdiag = getB().getNumOffdiagonal();
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SimilarityDecomp.h b/dynare++/sylv/cc/SimilarityDecomp.h
new file mode 100644
index 0000000000000000000000000000000000000000..14f79297a21656827f7173b68fc92954bf4c1a2d
--- /dev/null
+++ b/dynare++/sylv/cc/SimilarityDecomp.h
@@ -0,0 +1,41 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SimilarityDecomp.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SIMILARITY_DECOMP_H
+#define SIMILARITY_DECOMP_H
+
+#include "SylvMatrix.h"
+#include "BlockDiagonal.h"
+#include "SylvParams.h"
+
+class SimilarityDecomp {
+	SqSylvMatrix* q;
+	BlockDiagonal* b;
+	SqSylvMatrix* invq;
+	typedef BlockDiagonal::diag_iter diag_iter;
+public:
+	SimilarityDecomp(const double* d, int d_size, double log10norm = 3.0);
+	virtual ~SimilarityDecomp();
+	const SqSylvMatrix& getQ() const
+		{return *q;}
+	const SqSylvMatrix& getInvQ() const
+		{return *invq;}
+	const BlockDiagonal& getB() const
+		{return *b;}
+	void check(SylvParams& pars, const GeneralMatrix& m) const;
+	void infoToPars(SylvParams& pars) const;
+protected:
+	void getXDim(diag_iter start, diag_iter end, int& rows, int& cols) const;
+	bool solveX(diag_iter start, diag_iter end, GeneralMatrix& X, double norm) const;
+	void updateTransform(diag_iter start, diag_iter end, GeneralMatrix& X);
+	void bringGuiltyBlock(diag_iter start, diag_iter& end);
+	void diagonalize(double norm);
+};
+
+#endif /* SIMILARITY_DECOMP_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvException.cpp b/dynare++/sylv/cc/SylvException.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..71466ea5ee1f40e0441490f65b85d4d7116a9c29
--- /dev/null
+++ b/dynare++/sylv/cc/SylvException.cpp
@@ -0,0 +1,69 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvException.cpp,v 1.2 2004/10/01 10:30:40 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SylvException.h"
+
+#include <string.h>
+#include <stdio.h>
+
+SylvException::SylvException(const char* f, int l, const SylvException* s)
+{
+	strcpy(file,f);
+	line = l;
+	source = s;
+}
+
+SylvException::~SylvException()
+{
+	if (source != NULL) {
+		delete source;
+	}
+}
+
+void SylvException::printMessage() const
+{
+	char mes[1500];
+	mes[0] = '\0';
+	printMessage(mes, 1499);
+	printf(mes);
+}
+
+int SylvException::printMessage(char* str, int maxlen) const
+{
+	int remain = maxlen;
+	if (source != NULL) {
+		remain = source->printMessage(str, maxlen);
+	}
+	char aux[100];
+	sprintf(aux, "From %s:%d\n", file, line);
+	int newremain = remain - strlen(aux);
+	if (newremain < 0) {
+		aux[remain] = '\0';
+		newremain = 0;
+	}
+	strcat(str, aux);
+	return newremain;
+}
+
+SylvExceptionMessage::SylvExceptionMessage(const char* f, int i,
+										   const char* mes)
+	: SylvException(f,i,NULL)
+{
+	strcpy(message,mes);
+}
+
+int SylvExceptionMessage::printMessage(char* str, int maxlen) const
+{
+	char aux[600];
+	sprintf(aux, "At %s:%d:%s\n", file, line, message);
+	int newremain = maxlen - strlen(aux);
+	if (newremain < 0) {
+		aux[maxlen] = '\0';
+		newremain = 0;
+	}
+	strcat(str, aux);
+	return newremain;
+}
+
+
diff --git a/dynare++/sylv/cc/SylvException.h b/dynare++/sylv/cc/SylvException.h
new file mode 100644
index 0000000000000000000000000000000000000000..f3c22338a673772f3dfb531d6b369a45e8597671
--- /dev/null
+++ b/dynare++/sylv/cc/SylvException.h
@@ -0,0 +1,39 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvException.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SYLV_EXCEPTION_H
+#define SYLV_EXCEPTION_H
+
+#include "SylvMemory.h"
+
+
+class SylvException : public MallocAllocator {
+protected:
+	char file[50];
+	int line;
+	const SylvException* source;
+public:
+	SylvException(const char* f, int l, const SylvException* s);
+	virtual ~SylvException();
+	virtual int printMessage(char* str, int maxlen) const;
+	void printMessage() const;
+};
+
+class SylvExceptionMessage : public SylvException {
+	char message[500];
+public:
+	SylvExceptionMessage(const char* f, int l, const char* mes);
+	virtual int printMessage(char* str, int maxlen) const;
+};
+
+// define macros:
+#define SYLV_EXCEPTION(exc) (SylvException(__FILE__, __LINE__, exc))
+#define SYLV_MES_EXCEPTION(mes) (SylvExceptionMessage(__FILE__, __LINE__, mes)) 
+
+#endif /* SYLV_EXCEPTION_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvMatrix.cpp b/dynare++/sylv/cc/SylvMatrix.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e228d73a970478d299a0de075e1aede28d502b2c
--- /dev/null
+++ b/dynare++/sylv/cc/SylvMatrix.cpp
@@ -0,0 +1,251 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvMatrix.cpp,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SylvException.h"
+#include "SylvMatrix.h"
+
+#include "cppblas.h"
+#include "cpplapack.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <cmath>
+
+void SylvMatrix::multLeftI(const SqSylvMatrix& m)
+{
+	int off = rows - m.numRows();
+	if (off < 0) {
+		throw SYLV_MES_EXCEPTION("Wrong matrix dimensions for multLeftI.");
+	}
+	GeneralMatrix subtmp(*this, off, 0, m.numRows(), cols);
+	subtmp.multLeft(m);
+}
+
+void SylvMatrix::multLeftITrans(const SqSylvMatrix& m)
+{
+	int off = rows - m.numRows();
+	if (off < 0) {
+		throw SYLV_MES_EXCEPTION("Wrong matrix dimensions for multLeftITrans.");
+	}
+	GeneralMatrix subtmp(*this, off, 0, m.numRows(), cols);
+	subtmp.multLeftTrans(m);
+}
+
+
+void SylvMatrix::multLeft(int zero_cols, const GeneralMatrix& a, const GeneralMatrix& b)
+{
+	int off = a.numRows() - a.numCols();
+	if (off < 0 || a.numRows() != rows || off != zero_cols ||
+		rows != b.numRows() || cols != b.numCols()) {
+		throw SYLV_MES_EXCEPTION("Wrong matrix dimensions for multLeft.");
+	}
+	// here we cannot call SylvMatrix::gemm since it would require
+	// another copy of (usually big) b (we are not able to do inplace
+	// submatrix of const GeneralMatrix)
+	if (a.getLD() > 0 && ld > 0) {
+		int mm = a.numRows();
+		int nn = cols;
+		int kk = a.numCols();
+		double alpha = 1.0;
+		int lda = a.getLD();
+		int ldb = ld;
+		double beta = 0.0;
+		int ldc = ld;
+		BLAS_dgemm("N", "N", &mm, &nn, &kk, &alpha, a.getData().base(), &lda,
+				   b.getData().base()+off, &ldb, &beta, data.base(), &ldc);
+	}
+}
+
+
+void SylvMatrix::multRightKron(const SqSylvMatrix& m, int order)
+{
+	if (power(m.numRows(), order) != cols) {
+		throw SYLV_MES_EXCEPTION("Wrong number of cols for right kron multiply.");
+	}
+	KronVector auxrow(m.numRows(), m.numRows(), order-1);
+	for (int i = 0; i < rows; i++) {
+		Vector rowi(data.base()+i, rows, cols);
+		KronVector rowikron(rowi, m.numRows(), m.numRows(), order-1);
+		auxrow = rowi; // copy data
+		m.multVecKronTrans(rowikron, auxrow);
+	}
+}
+
+void SylvMatrix::multRightKronTrans(const SqSylvMatrix& m, int order)
+{
+	if (power(m.numRows(), order) != cols) {
+		throw SYLV_MES_EXCEPTION("Wrong number of cols for right kron multiply.");
+	}
+	
+	KronVector auxrow(m.numRows(), m.numRows(), order-1);
+	for (int i = 0; i < rows; i++) {
+		Vector rowi(data.base()+i, rows, cols);
+		KronVector rowikron(rowi, m.numRows(), m.numRows(), order-1);
+		auxrow = rowi; // copy data
+		m.multVecKron(rowikron, auxrow);
+	}
+}
+
+void SylvMatrix::eliminateLeft(int row, int col, Vector& x)
+{
+	double d = get(col, col);
+	double e = get(row, col);
+	if (std::abs(d) > std::abs(e)) {
+		get(row, col) = 0.0;
+		double mult = e/d;
+		for (int i = col + 1; i < numCols(); i++) {
+			get(row, i) = get(row, i) - mult*get(col, i);
+		}
+		x[row] = x[row] - mult*x[col];
+	} else if (std::abs(e) > std::abs(d)) {
+		get(row, col) = 0.0;
+		get(col, col) = e;
+		double mult = d/e;
+		for (int i = col + 1; i < numCols(); i++) {
+			double tx = get(col, i);
+			double ty = get(row, i);
+			get(col, i) = ty;
+			get(row, i) = tx - mult*ty;
+		}
+		double tx = x[col];
+		double ty = x[row];
+		x[col] = ty;
+		x[row] = tx - mult*ty;
+	}
+}
+
+void SylvMatrix::eliminateRight(int row, int col, Vector& x)
+{
+	double d = get(row, row);
+	double e = get(row, col);
+	
+	if (std::abs(d) > std::abs(e)) {
+		get(row, col) = 0.0;
+		double mult = e/d;
+		for (int i = 0; i < row; i++) {
+			get(i, col) = get(i, col) - mult*get(i, row);
+		}
+		x[col] = x[col] - mult*x[row];
+	} else if (std::abs(e) > std::abs(d)) {
+		get(row, col) = 0.0;
+		get(row, row) = e;
+		double mult = d/e;
+		for (int i = 0; i < row; i++) {
+			double tx = get(i, row);
+			double ty = get(i, col);
+			get(i, row) = ty;
+			get(i, col) = tx - mult*ty;
+		}
+		double tx = x[row];
+		double ty = x[col];
+		x[row] = ty;
+		x[col] = tx - mult*ty;
+	}
+}
+
+
+
+SqSylvMatrix::SqSylvMatrix(const GeneralMatrix& a, const GeneralMatrix& b)
+	: SylvMatrix(a,b)
+{
+	if (rows != cols)
+		throw SYLV_MES_EXCEPTION("Wrong matrix dimensions in multiplication constructor of square matrix.");
+}
+
+void SqSylvMatrix::multVecKron(KronVector& x, const KronVector& d) const
+{
+	x.zeros();
+	if (d.getDepth() == 0) {
+		multaVec(x, d);
+	} else {
+		KronVector aux(x.getM(), x.getN(), x.getDepth());
+		for (int i = 0; i < x.getM(); i++) {
+			KronVector auxi(aux, i);
+			ConstKronVector di(d, i);
+			multVecKron(auxi, di);
+		}
+		for (int i = 0; i < rows; i++) {
+			KronVector xi(x, i);
+			for (int j = 0; j < cols; j++) {
+				KronVector auxj(aux, j);
+				xi.add(get(i,j),auxj);
+			}
+		}
+	}
+}
+
+
+void SqSylvMatrix::multVecKronTrans(KronVector& x, const KronVector& d) const
+{
+	x.zeros();
+	if (d.getDepth() == 0) {
+		multaVecTrans(x, d);
+	} else {
+		KronVector aux(x.getM(), x.getN(), x.getDepth());
+		for (int i = 0; i < x.getM(); i++) {
+			KronVector auxi(aux, i);
+			ConstKronVector di(d, i);
+			multVecKronTrans(auxi, di);
+		}
+		for (int i = 0; i < rows; i++) {
+			KronVector xi(x, i);
+			for (int j = 0; j < cols; j++) {
+				KronVector auxj(aux, j);
+				xi.add(get(j,i), auxj);
+			}
+		}
+	}
+}
+
+void SqSylvMatrix::multInvLeft2(GeneralMatrix& a, GeneralMatrix& b,
+								double& rcond1, double& rcondinf) const
+{
+	if (rows != a.numRows() || rows != b.numRows()) {
+		throw SYLV_MES_EXCEPTION("Wrong dimensions for multInvLeft2.");
+	}
+	// PLU factorization
+	Vector inv(data);
+	int * const ipiv = new int[rows];
+	int info;
+	LAPACK_dgetrf(&rows, &rows, inv.base(), &rows, ipiv, &info);
+	// solve a
+	int acols = a.numCols();
+	double* abase = a.base();
+	LAPACK_dgetrs("N", &rows, &acols, inv.base(), &rows, ipiv,
+				  abase, &rows, &info);
+	// solve b
+	int bcols = b.numCols();
+	double* bbase = b.base();
+	LAPACK_dgetrs("N", &rows, &bcols, inv.base(), &rows, ipiv,
+				  bbase, &rows, &info);
+	delete [] ipiv;
+
+	// condition numbers
+	double* const work = new double[4*rows];
+	int* const iwork = new int[rows];
+	double norm1 = getNorm1();
+	LAPACK_dgecon("1", &rows, inv.base(), &rows, &norm1, &rcond1, 
+				  work, iwork, &info);
+	double norminf = getNormInf();
+	LAPACK_dgecon("I", &rows, inv.base(), &rows, &norminf, &rcondinf, 
+				  work, iwork, &info);
+	delete [] iwork;
+	delete [] work;
+}
+
+void SqSylvMatrix::setUnit()
+{
+	for (int i = 0; i < rows; i++) {
+		for (int j = 0; j < cols; j++) {
+			if (i==j)
+				get(i,j) = 1.0;
+			else 
+				get(i,j) = 0.0;
+		}
+	}
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvMatrix.h b/dynare++/sylv/cc/SylvMatrix.h
new file mode 100644
index 0000000000000000000000000000000000000000..99ab504ae0f62e3aa49122cb0bd4aa02594dfe51
--- /dev/null
+++ b/dynare++/sylv/cc/SylvMatrix.h
@@ -0,0 +1,81 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvMatrix.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SYLV_MATRIX_H
+#define SYLV_MATRIX_H
+
+#include "GeneralMatrix.h"
+#include "KronVector.h"
+
+class SqSylvMatrix;
+
+class SylvMatrix : public GeneralMatrix {
+public:
+	SylvMatrix(int m, int n)
+		: GeneralMatrix(m,n) {}
+	SylvMatrix(const double* d, int m, int n)
+		: GeneralMatrix(d, m, n) {}
+	SylvMatrix(double* d, int m, int n)
+		: GeneralMatrix(d, m, n) {}
+	SylvMatrix(const GeneralMatrix& m)
+		: GeneralMatrix(m) {}
+	SylvMatrix(const GeneralMatrix& m, int i, int j, int nrows, int ncols)
+		: GeneralMatrix(m, i, j, nrows, ncols) {}
+	SylvMatrix(GeneralMatrix& m, int i, int j, int nrows, int ncols)
+		: GeneralMatrix(m, i, j, nrows, ncols) {}
+	SylvMatrix(const GeneralMatrix& a, const GeneralMatrix& b)
+		: GeneralMatrix(a, b) {}
+
+	/* this = |I 0|* this
+	          |0 m|         */
+	void multLeftI(const SqSylvMatrix& m);
+	/* this = |I  0|* this
+	          |0 m'|         */
+	void multLeftITrans(const SqSylvMatrix& m);
+	/* this = |0 a|*b, so that |0 a| is square */
+	void multLeft(int zero_cols, const GeneralMatrix& a, const GeneralMatrix& b);
+	/* this = this * (m\otimes m..\otimes m) */
+	void multRightKron(const SqSylvMatrix& m, int order);
+	/* this = this * (m'\otimes m'..\otimes m') */
+	void multRightKronTrans(const SqSylvMatrix& m, int order);
+	/* this = P*this, x = P*x, where P is gauss transformation setting
+	 * a given element to zero */
+	void eliminateLeft(int row, int col, Vector& x);
+	/* this = this*P, x = P'*x, where P is gauss transformation setting
+	 * a given element to zero */
+	void eliminateRight(int row, int col, Vector& x);
+};
+
+
+class SqSylvMatrix : public SylvMatrix {
+public:
+	SqSylvMatrix(int m) : SylvMatrix(m, m) {}
+	SqSylvMatrix(const double* d, int m) : SylvMatrix(d, m, m) {}
+	SqSylvMatrix(double* d, int m) : SylvMatrix(d, m, m) {}
+	SqSylvMatrix(const SqSylvMatrix& m) : SylvMatrix(m) {}
+	SqSylvMatrix(const GeneralMatrix& m, int i, int j, int nrows)
+		: SylvMatrix(m, i, j, nrows, nrows) {} 
+	SqSylvMatrix(GeneralMatrix& m, int i, int j, int nrows)
+		: SylvMatrix(m, i, j, nrows, nrows) {} 
+	SqSylvMatrix(const GeneralMatrix& a, const GeneralMatrix& b);
+	const SqSylvMatrix& operator=(const SqSylvMatrix& m)
+		{GeneralMatrix::operator=(m); return *this;}
+	/* x = (this \otimes this..\otimes this)*d */
+	void multVecKron(KronVector& x, const KronVector& d) const;
+	/* x = (this' \otimes this'..\otimes this')*d */
+	void multVecKronTrans(KronVector& x, const KronVector& d) const;
+	/* a = inv(this)*a, b=inv(this)*b */
+	void multInvLeft2(GeneralMatrix& a, GeneralMatrix& b,
+					  double& rcond1, double& rcondinf) const;
+	/* this = I */
+	void setUnit();
+};
+
+
+#endif /* SYLV_MATRIX_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvMemory.cpp b/dynare++/sylv/cc/SylvMemory.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d9f7efa1abbcaf022d8474b124815c332a053f1b
--- /dev/null
+++ b/dynare++/sylv/cc/SylvMemory.cpp
@@ -0,0 +1,221 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvMemory.cpp,v 1.1.1.1 2004/06/04 13:00:49 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SylvMemory.h"
+#include "SylvException.h"
+#include "KronVector.h"
+
+#ifdef MATLAB
+#include "mex.h"
+#endif
+
+#include <math.h> 
+#include <stdio.h>
+#include <stdlib.h>
+
+/**********************************************************/
+/*   SylvMemoryPool                                       */
+/**********************************************************/
+
+SylvMemoryPool memory_pool;
+
+SylvMemoryPool::SylvMemoryPool()
+	: base(0), length(0), allocated(0), stack_mode(false)
+{
+}
+
+void SylvMemoryPool::init(size_t size)
+{
+#ifdef USE_MEMORY_POOL
+	length = size;
+
+#ifdef MATLAB
+	if (base)
+		throw SYLV_MES_EXCEPTION("Attempt to use matlab memory pool twice.");
+	base = (char*) mxMalloc(length);
+#else
+	base = (char*) malloc(length);
+#endif
+
+#else
+	throw SYLV_MES_EXCEPTION("SylvMemoryPool::init() called for non memory pool code.");
+#endif
+}
+
+void* SylvMemoryPool::allocate(size_t size)
+{
+#ifdef USE_MEMORY_POOL
+	if (allocated + size < length) {
+		char* res = base + allocated;
+		allocated += size;
+		return res;
+	} else {
+		throw SYLV_MES_EXCEPTION("Run out of memory space");
+	}
+#else
+	throw SYLV_MES_EXCEPTION("SylvMemoryPool::allocate() called for non memory pool code.");
+#endif
+}
+
+void SylvMemoryPool::free(void* p)
+{
+#ifdef USE_MEMORY_POOL
+	int offset = ((char*)p) - base;
+
+#ifdef DEBUG
+	if (offset < 0)
+		throw SYLV_MES_EXCEPTION("SylvMemoryPool::free() frees wrong address < begin.");
+	if (offset >= (int)length)
+		throw SYLV_MES_EXCEPTION("SylvMemoryPool::free() frees wrong address > end.");
+#endif	
+
+	if (stack_mode && offset >= 0 && offset < (int)allocated)
+		allocated = offset;
+
+#else
+	throw SYLV_MES_EXCEPTION("SylvMemoryPool::free() called for non memory pool code.");
+#endif
+}
+
+void SylvMemoryPool::setStackMode(bool mode)
+{
+	stack_mode = mode;
+}
+
+SylvMemoryPool::~SylvMemoryPool()
+{
+	reset();
+}
+
+void SylvMemoryPool::reset()
+{
+#ifndef MATLAB
+	delete [] base;
+	base = 0;
+	allocated = 0;
+	length = 0;
+	stack_mode = false;
+#endif
+}
+
+/**********************************************************/
+/*   global new and delete                                */
+/**********************************************************/
+
+#ifdef USE_MEMORY_POOL
+
+void* operator new(size_t size)
+{
+	return memory_pool.allocate(size);
+}
+
+void* operator new[](size_t size)
+{
+	return memory_pool.allocate(size);
+}
+
+void operator delete(void* p)
+{
+	memory_pool.free(p);
+}
+
+void operator delete[](void* p)
+{
+	memory_pool.free(p);
+}
+
+#endif
+
+/**********************************************************/
+/*   saved version of global new and delete               */
+/**********************************************************/
+
+#ifdef USE_MEMORY_POOL
+void* MallocAllocator::operator new(size_t size)
+{
+#ifdef MATLAB
+	throw SYLV_MES_EXCEPTION("Attempt to call wrong memory allocator.");
+#else
+	void* res = malloc(size);
+	if (!res) 
+		throw SYLV_MES_EXCEPTION("Malloc unable to allocate memory.");
+	return res;
+#endif
+}
+
+void* MallocAllocator::operator new[](size_t size)
+{
+#ifdef MATLAB
+	throw SYLV_MES_EXCEPTION("Attempt to call wrong memory allocator.");
+#else
+	void* res = malloc(size);
+	if (!res)
+		throw SYLV_MES_EXCEPTION("Malloc unable allocate memory.");
+	return res;
+#endif
+}
+
+void MallocAllocator::operator delete(void* p)
+{
+#ifdef MATLAB
+	throw SYLV_MES_EXCEPTION("Attempt to call wrong memory destructor.");
+#else
+	free(p);
+#endif
+}
+
+void MallocAllocator::operator delete[](void* p)
+{
+#ifdef MATLAB
+	throw SYLV_MES_EXCEPTION("Attempt to call wrong memory destructor.");
+#else
+	free(p);
+#endif
+}
+
+#endif
+
+
+/**********************************************************/
+/*   SylvMemoryDriver                                     */
+/**********************************************************/
+
+void SylvMemoryDriver::allocate(int num_d, int m, int n, int order)
+{
+#ifdef USE_MEMORY_POOL
+	int x_cols = power(m,order);
+	int total = num_d*x_cols*n; // storage for big matrices
+	total += x_cols; // storage for one extra row of a big matrix
+	int dig_vectors = (int)ceil(((double)(power(m,order)-1))/(m-1));
+	total += 8*n*dig_vectors; // storage for kron vectors instantiated during solv
+	total += 50*(m*m+n*n); // some storage for small square matrices
+	total *= sizeof(double); // everything in doubles
+	memory_pool.init(total);
+#endif
+}
+
+
+SylvMemoryDriver::SylvMemoryDriver(int num_d, int m, int n, int order)
+{
+	allocate(num_d, m, n, order);
+}
+
+SylvMemoryDriver::SylvMemoryDriver(const SylvParams& pars, int num_d,
+								   int m, int n, int order)
+{
+	if (*(pars.method) == SylvParams::iter)
+		num_d++;
+	if (*(pars.want_check))
+		num_d++;
+	allocate(num_d, m, n, order);
+}
+
+SylvMemoryDriver::~SylvMemoryDriver()
+{
+	memory_pool.reset();
+}
+
+void SylvMemoryDriver::setStackMode(bool mode) {
+	memory_pool.setStackMode(mode);
+}
diff --git a/dynare++/sylv/cc/SylvMemory.h b/dynare++/sylv/cc/SylvMemory.h
new file mode 100644
index 0000000000000000000000000000000000000000..187aac0b508b54941593220388f12cd154a2c38d
--- /dev/null
+++ b/dynare++/sylv/cc/SylvMemory.h
@@ -0,0 +1,63 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvMemory.h,v 1.1.1.1 2004/06/04 13:00:49 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SYLV_MEMORY_H
+#define SYLV_MEMORY_H
+
+#include "SylvParams.h"
+
+#include <new>
+
+class MallocAllocator {
+#ifdef USE_MEMORY_POOL
+public:
+	void* operator new(size_t size);
+	void* operator new[](size_t size);
+	void operator delete(void* p);
+	void operator delete[](void* p);
+#endif
+};
+
+#ifdef USE_MEMORY_POOL
+void* operator new(size_t size);
+void* operator new[](size_t size);
+void operator delete(void* p);
+void operator delete[](void* p);
+#endif
+
+class SylvMemoryPool {
+	char* base;
+	size_t length;
+	size_t allocated;
+	bool stack_mode;
+	SylvMemoryPool(const SylvMemoryPool&);
+	const SylvMemoryPool& operator=(const SylvMemoryPool&);
+public:
+	SylvMemoryPool();
+	~SylvMemoryPool();
+	void init(size_t size);
+	void* allocate(size_t size);
+	void free(void* p);
+	void reset();
+	void setStackMode(bool);
+};
+
+class SylvMemoryDriver {
+	SylvMemoryDriver(const SylvMemoryDriver&);
+	const SylvMemoryDriver& operator=(const SylvMemoryDriver&);
+public:
+	SylvMemoryDriver(int num_d, int m, int n, int order);
+	SylvMemoryDriver(const SylvParams& pars, int num_d, int m, int n, int order);
+	static void setStackMode(bool);
+	~SylvMemoryDriver();
+protected:
+	void allocate(int num_d, int m, int n, int order);
+};
+
+#endif /* SYLV_MEMORY_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvParams.cpp b/dynare++/sylv/cc/SylvParams.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9c00a438ded284380c901203f13c8fc7a14b2770
--- /dev/null
+++ b/dynare++/sylv/cc/SylvParams.cpp
@@ -0,0 +1,230 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvParams.cpp,v 1.1.1.1 2004/06/04 13:00:52 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SylvParams.h"
+
+
+void SylvParams::print(const char* prefix) const
+{
+	print(stdout, prefix);
+}
+
+void SylvParams::print(FILE* fdesc, const char* prefix) const
+{
+	rcondA1.print(fdesc, prefix,  "reci. cond1 A      ", "%8.4g");
+	rcondAI.print(fdesc, prefix,  "reci. condInf A    ", "%8.4g");
+	bs_norm.print(fdesc, prefix,  "log10 diag norm    ", "%8.4g");
+	f_err1.print(fdesc, prefix,   "abs. err 1 F diag  ", "%8.4g");
+	f_errI.print(fdesc, prefix,   "abs. err I F diag  ", "%8.4g");
+	viv_err1.print(fdesc, prefix, "abs. err 1 V*invV  ", "%8.4g");
+	viv_errI.print(fdesc, prefix, "abs. err I V*invV  ", "%8.4g");
+	ivv_err1.print(fdesc, prefix, "abs. err 1 invV*V  ", "%8.4g");
+	ivv_errI.print(fdesc, prefix, "abs. err I invV*V  ", "%8.4g");
+	f_blocks.print(fdesc, prefix, "num blocks in F    ", "%d");
+	f_largest.print(fdesc, prefix,"largest block in F ", "%d");
+	f_zeros.print(fdesc, prefix,  "num zeros in F     ", "%d");
+	f_offdiag.print(fdesc, prefix,"num offdiag in F   ", "%d");
+	if (*method == iter) {
+		converged.print(fdesc, prefix,       "converged          ", "%d");
+		convergence_tol.print(fdesc, prefix, "convergence tol.   ", "%8.4g");
+		iter_last_norm.print(fdesc, prefix,  "last norm          ", "%8.4g");
+		max_num_iter.print(fdesc, prefix,    "max num iter       ", "%d");
+		num_iter.print(fdesc, prefix,        "num iter           ", "%d");
+	} else {
+		eig_min.print(fdesc, prefix,         "minimum eigenvalue ", "%8.4g");
+	}
+	mat_err1.print(fdesc, prefix, "rel. matrix norm1  ", "%8.4g");
+	mat_errI.print(fdesc, prefix, "rel. matrix normInf", "%8.4g");
+	mat_errF.print(fdesc, prefix, "rel. matrix normFro", "%8.4g");
+	vec_err1.print(fdesc, prefix, "rel. vector norm1  ", "%8.4g");
+	vec_errI.print(fdesc, prefix, "rel. vector normInf", "%8.4g");
+	cpu_time.print(fdesc, prefix, "time (CPU secs)    ", "%8.4g");
+}
+
+void SylvParams::copy(const SylvParams& p)
+{
+	method = p.method;
+	convergence_tol = p.convergence_tol;
+	max_num_iter = p.max_num_iter;
+	bs_norm = p.bs_norm;
+	want_check = p.want_check;
+	converged = p.converged;
+	iter_last_norm = p.iter_last_norm;
+	num_iter = p.num_iter;
+	f_err1 = p.f_err1;
+	f_errI = p.f_errI;
+	viv_err1 = p.viv_err1;
+	viv_errI = p.viv_errI;
+	ivv_err1 = p.ivv_err1;
+	ivv_errI = p.ivv_errI;
+	f_blocks = p.f_blocks;
+	f_largest = p.f_largest;
+	f_zeros = p.f_zeros;
+	f_offdiag = p.f_offdiag;
+	rcondA1 = p.rcondA1;
+	rcondAI = p.rcondAI;
+	eig_min = p.eig_min;
+	mat_err1 = p.mat_err1;
+	mat_errI = p.mat_errI;
+	mat_errF = p.mat_errF;
+	vec_err1 = p.vec_err1;
+	vec_errI = p.vec_errI;
+	cpu_time = p.cpu_time;
+}
+
+void SylvParams::setArrayNames(int& num, const char** names) const
+{
+	num = 0;
+	if (method.getStatus() != undef)
+		names[num++] = "method";
+	if (convergence_tol.getStatus() != undef)
+		names[num++] = "convergence_tol";
+	if (max_num_iter.getStatus() != undef)
+		names[num++] = "max_num_iter";
+	if (bs_norm.getStatus() != undef)
+		names[num++] = "bs_norm";
+	if (converged.getStatus() != undef)
+		names[num++] = "converged";
+	if (iter_last_norm.getStatus() != undef)
+		names[num++] = "iter_last_norm";
+	if (num_iter.getStatus() != undef)
+		names[num++] = "num_iter";
+	if (f_err1.getStatus() != undef)
+		names[num++] = "f_err1";
+	if (f_errI.getStatus() != undef)
+		names[num++] = "f_errI";
+	if (viv_err1.getStatus() != undef)
+		names[num++] = "viv_err1";
+	if (viv_errI.getStatus() != undef)
+		names[num++] = "viv_errI";
+	if (ivv_err1.getStatus() != undef)
+		names[num++] = "ivv_err1";
+	if (ivv_errI.getStatus() != undef)
+		names[num++] = "ivv_errI";
+	if (f_blocks.getStatus() != undef)
+		names[num++] = "f_blocks";
+	if (f_largest.getStatus() != undef)
+		names[num++] = "f_largest";
+	if (f_zeros.getStatus() != undef)
+		names[num++] = "f_zeros";
+	if (f_offdiag.getStatus() != undef)
+		names[num++] = "f_offdiag";
+	if (rcondA1.getStatus() != undef)
+		names[num++] = "rcondA1";
+	if (rcondAI.getStatus() != undef)
+		names[num++] = "rcondAI";
+	if (eig_min.getStatus() != undef)
+		names[num++] = "eig_min";
+	if (mat_err1.getStatus() != undef)
+		names[num++] = "mat_err1";
+	if (mat_errI.getStatus() != undef)
+		names[num++] = "mat_errI";
+	if (mat_errF.getStatus() != undef)
+		names[num++] = "mat_errF";
+	if (vec_err1.getStatus() != undef)
+		names[num++] = "vec_err1";
+	if (vec_errI.getStatus() != undef)
+		names[num++] = "vec_errI";
+	if (cpu_time.getStatus() != undef)
+		names[num++] = "cpu_time";
+}
+
+#ifdef MATLAB
+mxArray* SylvParams::DoubleParamItem::createMatlabArray() const
+{
+    return mxCreateScalarDouble(value);
+}
+
+mxArray* SylvParams::IntParamItem::createMatlabArray() const
+{
+	mxArray* res = mxCreateNumericMatrix(1, 1, mxINT32_CLASS, mxREAL);
+	*((int*)mxGetData(res)) = value;
+	return res;
+}
+
+mxArray* SylvParams::BoolParamItem::createMatlabArray() const
+{
+	if (value)
+		return mxCreateString("true");
+	else
+		return mxCreateString("false");
+}
+
+mxArray* SylvParams::MethodParamItem::createMatlabArray() const
+{
+	if (value == iter)
+		return mxCreateString("iterative");
+	else
+		return mxCreateString("recursive");
+}
+
+mxArray* SylvParams::createStructArray() const
+{
+	const char* names[50];
+	int num;
+	setArrayNames(num, names);
+	const int dims[] = {1, 1};
+	mxArray* const res = mxCreateStructArray(2, dims, num, names);
+
+	int i = 0;
+	if (method.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, method.createMatlabArray());
+	if (convergence_tol.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, convergence_tol.createMatlabArray());
+	if (max_num_iter.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, max_num_iter.createMatlabArray());
+	if (bs_norm.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, bs_norm.createMatlabArray());
+	if (converged.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, converged.createMatlabArray());
+	if (iter_last_norm.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, iter_last_norm.createMatlabArray());
+	if (num_iter.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, num_iter.createMatlabArray());
+	if (f_err1.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, f_err1.createMatlabArray());
+	if (f_errI.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, f_errI.createMatlabArray());
+	if (viv_err1.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, viv_err1.createMatlabArray());
+	if (viv_errI.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, viv_errI.createMatlabArray());
+	if (ivv_err1.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, ivv_err1.createMatlabArray());
+	if (ivv_errI.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, ivv_errI.createMatlabArray());
+	if (f_blocks.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, f_blocks.createMatlabArray());
+	if (f_largest.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, f_largest.createMatlabArray());
+	if (f_zeros.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, f_zeros.createMatlabArray());
+	if (f_offdiag.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, f_offdiag.createMatlabArray());
+	if (rcondA1.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, rcondA1.createMatlabArray());
+	if (rcondAI.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, rcondAI.createMatlabArray());
+	if (eig_min.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, eig_min.createMatlabArray());
+	if (mat_err1.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, mat_err1.createMatlabArray());
+	if (mat_errI.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, mat_errI.createMatlabArray());
+	if (mat_errF.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, mat_errF.createMatlabArray());
+	if (vec_err1.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, vec_err1.createMatlabArray());
+	if (vec_errI.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, vec_errI.createMatlabArray());
+	if (cpu_time.getStatus() != undef)
+		mxSetFieldByNumber(res, 0, i++, cpu_time.createMatlabArray());
+
+	return res;
+}
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvParams.h b/dynare++/sylv/cc/SylvParams.h
new file mode 100644
index 0000000000000000000000000000000000000000..afb5b874a4bdf61dd9e1afa22161f971a6bdb1a7
--- /dev/null
+++ b/dynare++/sylv/cc/SylvParams.h
@@ -0,0 +1,162 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvParams.h,v 1.1.1.1 2004/06/04 13:00:54 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SYLV_PARAMS_H
+#define SYLV_PARAMS_H
+
+#include <stdio.h>
+#include <string.h>
+
+#ifdef MATLAB
+#include "mex.h"
+#endif
+
+typedef enum {def, changed, undef} status;
+
+template <class _Type>
+struct ParamItem {
+protected:
+	typedef ParamItem<_Type> _Self;
+	status s;
+	_Type value;
+public:
+	ParamItem()
+		{s = undef;}
+	ParamItem(_Type val)
+		{value = val; s = def;}
+	ParamItem(const _Self& item)
+		{value = item.value; s = item.s;}
+	const _Self& operator=(const _Self& item)
+		{value = item.value; s = item.s; return *this;}
+	const _Self& operator=(const _Type& val)
+		{value = val; s = changed; return *this;}
+	_Type operator*() const
+		{return value;}
+	status getStatus() const
+		{return s;}
+	void print(FILE* f, const char* prefix, const char* str, const char* fmt) const
+		{
+			if (s == undef)
+				return;
+			char out[1000];
+			strcpy(out, prefix);
+			strcat(out, str);
+			strcat(out, "= ");
+			strcat(out, fmt);
+			if (s == def)
+				strcat(out, " <default>");
+			strcat(out,"\n");
+			fprintf(f, out, value);
+		} 
+};
+
+class SylvParams {
+public:
+	typedef enum {iter, recurse} solve_method;
+
+protected:
+	class DoubleParamItem : public ParamItem<double> {
+	public:
+		DoubleParamItem() : ParamItem<double>() {}
+		DoubleParamItem(double val) : ParamItem<double>(val) {}
+		DoubleParamItem(const DoubleParamItem& item) : ParamItem<double>(item) {}
+		const DoubleParamItem& operator=(const double& val)
+			{ParamItem<double>::operator=(val); return *this;}
+#ifdef MATLAB
+		mxArray* createMatlabArray() const;
+#endif
+	};
+
+	class IntParamItem : public ParamItem<int> {
+	public:
+		IntParamItem() : ParamItem<int>() {}
+		IntParamItem(int val) : ParamItem<int>(val) {}
+		IntParamItem(const IntParamItem& item) : ParamItem<int>(item) {}
+		const IntParamItem& operator=(const int& val)
+			{ParamItem<int>::operator=(val); return *this;}
+#ifdef MATLAB
+		mxArray* createMatlabArray() const;
+#endif
+	};
+
+	class BoolParamItem : public ParamItem<bool> {
+	public:
+		BoolParamItem() : ParamItem<bool>() {}
+		BoolParamItem(bool val) : ParamItem<bool>(val) {}
+		BoolParamItem(const BoolParamItem& item) : ParamItem<bool>(item) {}
+		const BoolParamItem& operator=(const bool& val)
+			{ParamItem<bool>::operator=(val); return *this;}
+#ifdef MATLAB
+		mxArray* createMatlabArray() const;
+#endif
+	};
+
+	class MethodParamItem : public ParamItem<solve_method> {
+	public:
+		MethodParamItem() : ParamItem<solve_method>() {}
+		MethodParamItem(solve_method val) : ParamItem<solve_method>(val) {}
+		MethodParamItem(const MethodParamItem& item) : ParamItem<solve_method>(item) {}
+		const MethodParamItem operator=(const solve_method& val)
+			{ParamItem<solve_method>::operator=(val); return *this;}
+#ifdef MATLAB
+		mxArray* createMatlabArray() const;
+#endif
+	};
+
+public:
+	// input parameters
+	MethodParamItem method; // method of solution: iter/recurse
+	DoubleParamItem convergence_tol; // norm for what we consider converged
+	IntParamItem max_num_iter; // max number of iterations
+	DoubleParamItem bs_norm; // Bavely Stewart log10 of norm for diagonalization
+	BoolParamItem want_check; // true => allocate extra space for checks
+	// output parameters
+	BoolParamItem converged; // true if converged
+	DoubleParamItem iter_last_norm; // norm of the last iteration
+	IntParamItem num_iter; // number of iterations
+	DoubleParamItem f_err1; // norm 1 of diagonalization abs. error C-V*F*inv(V)
+	DoubleParamItem f_errI; // norm Inf of diagonalization abs. error C-V*F*inv(V)
+	DoubleParamItem viv_err1; // norm 1 of error I-V*inv(V)
+	DoubleParamItem viv_errI; // norm Inf of error I-V*inv(V)
+	DoubleParamItem ivv_err1; // norm 1 of error I-inv(V)*V
+	DoubleParamItem ivv_errI; // norm Inf of error I-inv(V)*V
+	IntParamItem f_blocks; // number of diagonal blocks of F
+	IntParamItem f_largest; // size of largest diagonal block in F
+	IntParamItem f_zeros; // number of off diagonal zeros in F
+	IntParamItem f_offdiag; // number of all off diagonal elements in F
+	DoubleParamItem rcondA1; // reciprocal cond 1 number of A
+	DoubleParamItem rcondAI; // reciprocal cond Inf number of A
+	DoubleParamItem eig_min; // minimum eigenvalue of the solved system
+	DoubleParamItem mat_err1; // rel. matrix 1 norm of A*X-B*X*kron(C,..,C)-D
+	DoubleParamItem mat_errI; // rel. matrix Inf norm of A*X-B*X*kron(C,..,C)-D
+	DoubleParamItem mat_errF; // rel. matrix Frob. norm of A*X-B*X*kron(C,..,C)-D
+	DoubleParamItem vec_err1; // rel. vector 1 norm of A*X-B*X*kron(C,..,C)-D
+	DoubleParamItem vec_errI; // rel. vector Inf norm of A*X-B*X*kron(C,..,C)-D
+	DoubleParamItem cpu_time; // time of the job in CPU seconds
+	// note: remember to change copy() if adding/removing member
+
+	SylvParams(bool wc = false)
+		: method(recurse), convergence_tol(1.e-30), max_num_iter(15),
+		  bs_norm(1.3), want_check(wc) {}
+	SylvParams(const SylvParams& p)
+		{copy(p);}
+	const SylvParams& operator=(const SylvParams& p)
+		{copy(p); return *this;}
+	~SylvParams() {}
+	void print(const char* prefix) const;
+	void print(FILE* fdesc, const char* prefix) const;
+	void setArrayNames(int& num, const char** names) const;
+#ifdef MATLAB
+	mxArray* createStructArray() const;
+#endif
+private:
+	void copy(const SylvParams& p);
+};
+
+#endif /* SYLV_PARAMS_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SylvesterSolver.h b/dynare++/sylv/cc/SylvesterSolver.h
new file mode 100644
index 0000000000000000000000000000000000000000..df9bcce4520476fbb131628967a3f3f5cea2322c
--- /dev/null
+++ b/dynare++/sylv/cc/SylvesterSolver.h
@@ -0,0 +1,51 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SylvesterSolver.h,v 1.1.1.1 2004/06/04 13:00:54 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SYLVESTER_SOLVER_H
+#define SYLVESTER_SOLVER_H
+
+#include "KronVector.h"
+#include "QuasiTriangular.h"
+#include "QuasiTriangularZero.h"
+#include "SimilarityDecomp.h"
+#include "SylvParams.h"
+#include "SchurDecomp.h"
+
+class SylvesterSolver {
+protected:
+	const QuasiTriangular* const matrixK;
+	const QuasiTriangular* const matrixF;
+private:
+	/* return true when it is more efficient to use QuasiTriangular
+	 * than QuasiTriangularZero */
+	static bool zeroPad(const SchurDecompZero& kdecomp) {
+		return ((kdecomp.getZeroCols()*3 < kdecomp.getDim()*2) ||
+				(kdecomp.getZeroCols() < 10));
+	}
+public:
+	SylvesterSolver(const QuasiTriangular& k, const QuasiTriangular& f)
+		: matrixK(new QuasiTriangular(k)),
+		  matrixF(new QuasiTriangular(f))
+		{}
+	SylvesterSolver(const SchurDecompZero& kdecomp, const SchurDecomp& fdecomp)
+		: matrixK((zeroPad(kdecomp)) ?
+				  new QuasiTriangular(kdecomp) : new QuasiTriangularZero(kdecomp)),
+		  matrixF(new QuasiTriangular(fdecomp))
+		{}
+	SylvesterSolver(const SchurDecompZero& kdecomp, const SimilarityDecomp& fdecomp)
+		: matrixK((zeroPad(kdecomp)) ?
+				  new QuasiTriangular(kdecomp) : new QuasiTriangularZero(kdecomp)),
+		  matrixF(new BlockDiagonal(fdecomp.getB()))
+		{}
+	virtual ~SylvesterSolver()
+		{delete matrixK; delete matrixF;}
+	virtual void solve(SylvParams& pars, KronVector& x) const = 0;
+};
+
+#endif /* SYLVESTER_SOLVER_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/SymSchurDecomp.cpp b/dynare++/sylv/cc/SymSchurDecomp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3c11314ac5d60936abb942d8df650bfb7615b32f
--- /dev/null
+++ b/dynare++/sylv/cc/SymSchurDecomp.cpp
@@ -0,0 +1,101 @@
+/* $Header$ */
+
+/* Tag $Name$ */
+
+#include "SymSchurDecomp.h"
+#include "SylvException.h"
+
+#include "cpplapack.h"
+
+#include <algorithm>
+#include <cmath>
+
+SymSchurDecomp::SymSchurDecomp(const GeneralMatrix& mata)
+	: lambda(mata.numRows()), q(mata.numRows())
+{
+	// check mata is square
+	if (mata.numRows() != mata.numCols())
+		throw SYLV_MES_EXCEPTION("Matrix is not square in SymSchurDecomp constructor");
+
+	// prepare for dsyevr
+	const char* jobz = "V";
+	const char* range = "A";
+	const char* uplo = "U";
+	int n = mata.numRows();
+	GeneralMatrix tmpa(mata);
+	double* a = tmpa.base();
+	int lda = tmpa.getLD();
+	double dum;
+	double* vl = &dum;
+	double* vu = &dum;
+	int idum;
+	int* il = &idum;
+	int* iu = &idum;
+	double abstol = 0.0;
+	int m = n;
+	double* w = lambda.base();
+	double* z = q.base();
+	int ldz = q.getLD();
+	int* isuppz = new int[2*std::max(1,m)];
+	double tmpwork;
+	int lwork = -1;
+	int tmpiwork;
+	int liwork = -1;
+	int info;
+
+	// query for lwork and liwork
+	LAPACK_dsyevr(jobz, range, uplo, &n, a, &lda, vl, vu, il, iu, &abstol,
+				  &m, w, z, &ldz, isuppz, &tmpwork, &lwork, &tmpiwork, &liwork, &info);
+	lwork = (int)tmpwork;
+	liwork = tmpiwork;
+	// allocate work arrays
+	double* work = new double[lwork];
+	int* iwork = new int[liwork];
+	
+	// do the calculation
+	LAPACK_dsyevr(jobz, range, uplo, &n, a, &lda, vl, vu, il, iu, &abstol,
+				  &m, w, z, &ldz, isuppz, work, &lwork, iwork, &liwork, &info);
+
+	if (info < 0)
+		throw SYLV_MES_EXCEPTION("Internal error in SymSchurDecomp constructor");
+	if (info > 0)
+		throw SYLV_MES_EXCEPTION("Internal LAPACK error in DSYEVR");
+
+	delete [] work;
+	delete [] iwork;
+	delete [] isuppz;
+}
+
+void SymSchurDecomp::getFactor(GeneralMatrix& f) const
+{
+	if (f.numRows() != q.numRows())
+		throw SYLV_MES_EXCEPTION("Wrong dimension of factor matrix in SymSchurDecomp::getFactor");
+	if (f.numRows() != f.numCols())
+		throw SYLV_MES_EXCEPTION("Factor matrix is not square in SymSchurDecomp::getFactor");
+	if (! isPositiveSemidefinite())
+		throw SYLV_MES_EXCEPTION("Symmetric decomposition not positive semidefinite in SymSchurDecomp::getFactor");
+
+	f = q;
+	for (int i = 0; i < f.numCols(); i++) {
+		Vector fi(f, i);
+		fi.mult(std::sqrt(lambda[i]));
+	}
+}
+
+
+// LAPACK says that eigenvalues are ordered in ascending order, but we
+// do not rely her on it
+bool SymSchurDecomp::isPositiveSemidefinite() const
+{
+	for (int i = 0; i < lambda.length(); i++)
+		if (lambda[i] < 0)
+			return false;
+	return true;
+}
+
+void SymSchurDecomp::correctDefinitness(double tol)
+{
+	for (int i = 0; i < lambda.length(); i++)
+		if (lambda[i] < 0 && lambda[i] > - tol)
+			lambda[i] = 0.0;
+}
diff --git a/dynare++/sylv/cc/SymSchurDecomp.h b/dynare++/sylv/cc/SymSchurDecomp.h
new file mode 100644
index 0000000000000000000000000000000000000000..7840421118a55fdb68f6e196c2ef3cd391153d69
--- /dev/null
+++ b/dynare++/sylv/cc/SymSchurDecomp.h
@@ -0,0 +1,41 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/SchurDecomp.h,v 1.1.1.1 2004/06/04 13:00:44 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef SYM_SCHUR_DECOMP_H
+#define SYM_SCHUR_DECOMP_H
+
+#include "SylvMatrix.h"
+
+class SymSchurDecomp {
+protected:
+	Vector lambda;
+	SqSylvMatrix q;
+public:
+	/** Calculates A = Q*Lambda*Q^T, where A is assummed to be
+	 * symmetric and Lambda real diagonal, hence a vector. */
+	SymSchurDecomp(const GeneralMatrix& a);
+	SymSchurDecomp(const SymSchurDecomp& ssd)
+		: lambda(ssd.lambda), q(ssd.q) {}
+	virtual ~SymSchurDecomp() {}
+	const Vector& getLambda() const
+		{return lambda;}
+	const SqSylvMatrix& getQ() const
+		{return q;}
+	/** Return factor F*F^T = A, raises and exception if A is not
+	 * positive semidefinite, F must be square. */
+	void getFactor(GeneralMatrix& f) const;
+	/** Returns true if A is positive semidefinite. */
+	bool isPositiveSemidefinite() const;
+	/** Correct definitness. This sets all eigenvalues between minus
+	 * tolerance and zero to zero. */
+	void correctDefinitness(double tol);
+
+};
+
+#endif
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/TriangularSylvester.cpp b/dynare++/sylv/cc/TriangularSylvester.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..64471afe8011fb5ab99175e81238daa43b8c33ed
--- /dev/null
+++ b/dynare++/sylv/cc/TriangularSylvester.cpp
@@ -0,0 +1,392 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/TriangularSylvester.cpp,v 1.1.1.1 2004/06/04 13:00:59 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "TriangularSylvester.h"
+#include "QuasiTriangularZero.h"
+#include "KronUtils.h"
+#include "BlockDiagonal.h"
+
+#include <stdio.h>
+#include <cmath>
+
+double TriangularSylvester::diag_zero = 1.e-15;
+double TriangularSylvester::diag_zero_sq = 1.e-30;
+
+TriangularSylvester::TriangularSylvester(const QuasiTriangular& k,
+										 const QuasiTriangular& f)
+	: SylvesterSolver(k, f),
+	  matrixKK(matrixK->clone(2, *matrixK)),
+	  matrixFF(new QuasiTriangular(2, *matrixF))
+{
+}
+
+TriangularSylvester::TriangularSylvester(const SchurDecompZero& kdecomp,
+										 const SchurDecomp& fdecomp)
+	: SylvesterSolver(kdecomp, fdecomp),
+	  matrixKK(matrixK->clone(2, *matrixK)),
+	  matrixFF(new QuasiTriangular(2, *matrixF))
+{
+}
+
+TriangularSylvester::TriangularSylvester(const SchurDecompZero& kdecomp,
+										 const SimilarityDecomp& fdecomp)
+	: SylvesterSolver(kdecomp, fdecomp),
+	  matrixKK(matrixK->clone(2, *matrixK)),
+	  matrixFF(new BlockDiagonal(2, *matrixF))
+{
+}
+
+TriangularSylvester::~TriangularSylvester()
+{
+	delete matrixKK;
+	delete matrixFF;
+}
+
+void TriangularSylvester::print() const
+{
+	printf("matrix K (%d):\n",matrixK->getDiagonal().getSize());
+	matrixK->print();
+	printf("matrix F (%d):\n",matrixF->getDiagonal().getSize());
+	matrixF->print();
+}
+
+void TriangularSylvester::solve(SylvParams& pars, KronVector& d) const
+{
+	double eig_min = 1e30;
+	solvi(1., d, eig_min);
+	pars.eig_min = sqrt(eig_min);
+}
+
+void TriangularSylvester::solvi(double r, KronVector& d, double& eig_min) const
+{
+	if (d.getDepth() == 0) {
+		QuasiTriangular* t = matrixK->clone(r);
+		t->solvePre(d, eig_min);
+		delete t;
+	} else {
+		for (const_diag_iter di = matrixF->diag_begin();
+			 di != matrixF->diag_end();
+			 ++di) {
+			if ((*di).isReal()) {
+				solviRealAndEliminate(r, di, d, eig_min);
+			} else {
+				solviComplexAndEliminate(r, di, d, eig_min);
+			}
+		}
+	}
+}
+
+
+void TriangularSylvester::solvii(double alpha, double beta1, double beta2,
+								 KronVector& d1, KronVector& d2,
+								 double& eig_min) const
+{
+	KronVector d1tmp(d1);
+	KronVector d2tmp(d2);
+	linEval(alpha, beta1, beta2, d1, d2, d1tmp, d2tmp);
+	solviip(alpha, beta1*beta2, d1, eig_min);
+	solviip(alpha, beta1*beta2, d2, eig_min);
+}
+
+
+void TriangularSylvester::solviip(double alpha, double betas,
+								  KronVector& d, double& eig_min) const
+{
+	// quick exit to solvi if betas is small
+	if (betas < diag_zero_sq) {
+		solvi(alpha, d, eig_min);
+		solvi(alpha, d, eig_min);
+		return;
+	}
+
+	if (d.getDepth() == 0) {
+		double aspbs = alpha*alpha+betas;
+		QuasiTriangular* t= matrixK->clone(2*alpha, aspbs, *matrixKK);
+		t->solvePre(d, eig_min);
+		delete t;
+	} else {
+		const_diag_iter di = matrixF->diag_begin();
+		const_diag_iter dsi = matrixFF->diag_begin();
+		for (; di != matrixF->diag_end(); ++di, ++dsi) {
+			if ((*di).isReal()) {
+				solviipRealAndEliminate(alpha, betas, di, dsi, d, eig_min);
+			} else {
+				solviipComplexAndEliminate(alpha, betas, di, dsi, d, eig_min);
+			}
+		}
+	}
+}
+
+
+void TriangularSylvester::solviRealAndEliminate(double r, const_diag_iter di,
+												KronVector& d, double& eig_min) const
+{
+	// di is real
+	int jbar = (*di).getIndex();
+	double f = *((*di).getAlpha());
+	KronVector dj(d, jbar);
+	// solve system
+	if (abs(r*f) > diag_zero) { 
+		solvi(r*f, dj, eig_min);
+	}
+	// calculate y
+	KronVector y((const KronVector&)dj);
+	KronUtils::multKron(*matrixF, *matrixK, y);
+	y.mult(r);
+	double divisor = 1.0;
+	solviEliminateReal(di, d, y, divisor);
+}
+
+void TriangularSylvester::solviEliminateReal(const_diag_iter di, KronVector& d,
+											 const KronVector& y, double divisor) const
+{
+	for (const_row_iter ri = matrixF->row_begin(*di);
+		 ri != matrixF->row_end(*di);
+		 ++ri) {
+		KronVector dk(d, ri.getCol());
+		dk.add(-(*ri)/divisor, y);
+	}
+}
+
+void TriangularSylvester::solviComplexAndEliminate(double r, const_diag_iter di,
+												   KronVector& d, double& eig_min) const
+{
+	// di is complex
+	int jbar = (*di).getIndex();
+	// pick data
+	double alpha = *(*di).getAlpha();
+	double beta1 = (*di).getBeta2();
+	double beta2 = -(*di).getBeta1();
+	double aspbs = (*di).getDeterminant();
+	KronVector dj(d, jbar);
+	KronVector djj(d, jbar+1);
+	// solve
+	if (r*r*aspbs > diag_zero_sq) { 
+		solvii(r*alpha, r*beta1, r*beta2, dj, djj, eig_min);
+	}
+	KronVector y1(dj);
+	KronVector y2(djj);
+	KronUtils::multKron(*matrixF, *matrixK, y1);
+	KronUtils::multKron(*matrixF, *matrixK, y2);
+	y1.mult(r);
+	y2.mult(r);
+	double divisor = 1.0;
+	solviEliminateComplex(di, d, y1, y2, divisor);
+}
+
+void TriangularSylvester::solviEliminateComplex(const_diag_iter di, KronVector& d,
+												const KronVector& y1, const KronVector& y2,
+												double divisor) const
+{
+	for (const_row_iter ri = matrixF->row_begin(*di);
+		 ri != matrixF->row_end(*di);
+		 ++ri) {
+		KronVector dk(d, ri.getCol());
+		dk.add(-ri.a()/divisor, y1);
+		dk.add(-ri.b()/divisor, y2);
+	}
+}
+
+void TriangularSylvester::solviipRealAndEliminate(double alpha, double betas,
+												  const_diag_iter di, const_diag_iter dsi,
+												  KronVector& d, double& eig_min) const
+{
+	// di, and dsi are real		
+	int jbar = (*di).getIndex();
+	double aspbs = alpha*alpha+betas;
+	// pick data
+	double f = *((*di).getAlpha());
+	double fs = f*f;
+	KronVector dj(d, jbar);
+	// solve
+	if (fs*aspbs > diag_zero_sq) {
+		solviip(f*alpha, fs*betas, dj, eig_min);
+	}
+	KronVector y1((const KronVector&)dj);
+	KronVector y2((const KronVector&)dj);
+	KronUtils::multKron(*matrixF, *matrixK, y1);
+	y1.mult(2*alpha);
+	KronUtils::multKron(*matrixFF, *matrixKK, y2);
+	y2.mult(aspbs);
+	double divisor = 1.0;
+	double divisor2 = 1.0;
+	solviipEliminateReal(di, dsi, d, y1, y2, divisor, divisor2);
+}
+
+void TriangularSylvester::solviipEliminateReal(const_diag_iter di, const_diag_iter dsi,
+											   KronVector& d,
+											   const KronVector& y1, const KronVector& y2,
+											   double divisor, double divisor2) const
+{
+	const_row_iter ri = matrixF->row_begin(*di);
+	const_row_iter rsi = matrixFF->row_begin(*dsi);
+	for (; ri != matrixF->row_end(*di); ++ri, ++rsi) {
+		KronVector dk(d, ri.getCol());
+		dk.add(-(*ri)/divisor, y1);
+		dk.add(-(*rsi)/divisor2, y2);
+	}
+} 
+
+void TriangularSylvester::solviipComplexAndEliminate(double alpha, double betas,
+													 const_diag_iter di, const_diag_iter dsi,
+													 KronVector& d, double& eig_min) const
+{
+	// di, and dsi are complex
+	int jbar = (*di).getIndex();
+	double aspbs = alpha*alpha+betas;
+	// pick data
+	double gamma = *((*di).getAlpha());
+	double delta1 = (*di).getBeta2(); // swap because of transpose
+	double delta2 = -(*di).getBeta1();
+	double gspds = (*di).getDeterminant();
+	KronVector dj(d, jbar);
+	KronVector djj(d, jbar+1);
+	if (gspds*aspbs > diag_zero_sq) {
+		solviipComplex(alpha, betas, gamma, delta1, delta2, dj, djj, eig_min);
+	}
+	// here dj, djj is solution, set y1, y2, y11, y22
+	// y1
+	KronVector y1((const KronVector&) dj);
+	KronUtils::multKron(*matrixF, *matrixK, y1);
+	y1.mult(2*alpha);
+	// y11
+	KronVector y11((const KronVector&) djj);
+	KronUtils::multKron(*matrixF, *matrixK, y11);
+	y11.mult(2*alpha);
+	// y2
+	KronVector y2((const KronVector&) dj);
+	KronUtils::multKron(*matrixFF, *matrixKK, y2);
+	y2.mult(aspbs);
+	// y22
+	KronVector y22((const KronVector&) djj);
+	KronUtils::multKron(*matrixFF, *matrixKK, y22);
+	y22.mult(aspbs);
+
+	double divisor = 1.0;
+	solviipEliminateComplex(di, dsi, d, y1, y11, y2, y22, divisor);
+}
+
+
+void TriangularSylvester::solviipComplex(double alpha, double betas, double gamma,
+										 double delta1, double delta2,
+										 KronVector& d1, KronVector& d2,
+										 double& eig_min) const
+{
+	KronVector d1tmp(d1);
+	KronVector d2tmp(d2);
+	quaEval(alpha, betas, gamma, delta1, delta2,
+			d1, d2, d1tmp, d2tmp);
+	double delta = sqrt(delta1*delta2);
+	double beta = sqrt(betas);
+	double a1 = alpha*gamma - beta*delta;
+	double b1 = alpha*delta + gamma*beta;
+	double a2 = alpha*gamma + beta*delta;
+	double b2 = alpha*delta - gamma*beta;
+	solviip(a2, b2*b2, d1, eig_min);
+	solviip(a1, b1*b1, d1, eig_min);
+	solviip(a2, b2*b2, d2, eig_min);
+	solviip(a1, b1*b1, d2, eig_min);
+}
+
+void TriangularSylvester::solviipEliminateComplex(const_diag_iter di, const_diag_iter dsi,
+												  KronVector& d,
+												  const KronVector& y1, const KronVector& y11,
+												  const KronVector& y2, const KronVector& y22,
+												  double divisor) const
+{
+	const_row_iter ri = matrixF->row_begin(*di);
+	const_row_iter rsi = matrixFF->row_begin(*dsi);
+	for (; ri != matrixF->row_end(*di); ++ri, ++rsi) {
+		KronVector dk(d, ri.getCol());
+		dk.add(-ri.a()/divisor, y1);
+		dk.add(-ri.b()/divisor, y11);
+		dk.add(-rsi.a()/divisor, y2);
+		dk.add(-rsi.b()/divisor, y22);
+	}
+}
+
+void TriangularSylvester::linEval(double alpha, double beta1, double beta2,
+								  KronVector& x1, KronVector& x2,
+								  const ConstKronVector& d1, const ConstKronVector& d2) const
+{
+	KronVector d1tmp(d1); // make copy
+	KronVector d2tmp(d2); // make copy
+	KronUtils::multKron(*matrixF, *matrixK, d1tmp);
+	KronUtils::multKron(*matrixF, *matrixK, d2tmp);
+	x1 = d1;
+	x2 = d2;
+	Vector::mult2a(alpha, beta1, -beta2, x1, x2, d1tmp, d2tmp);
+}
+
+void TriangularSylvester::quaEval(double alpha, double betas,
+								  double gamma, double delta1, double delta2,
+								  KronVector& x1, KronVector& x2,
+								  const ConstKronVector& d1, const ConstKronVector& d2) const
+{
+	KronVector d1tmp(d1); // make copy
+	KronVector d2tmp(d2); // make copy
+	KronUtils::multKron(*matrixF, *matrixK, d1tmp);
+	KronUtils::multKron(*matrixF, *matrixK, d2tmp);
+	x1 = d1;
+	x2 = d2;
+	Vector::mult2a(2*alpha*gamma, 2*alpha*delta1, -2*alpha*delta2,
+				   x1, x2, d1tmp, d2tmp);
+	d1tmp = d1; // restore to d1
+	d2tmp = d2; // restore to d2
+	KronUtils::multKron(*matrixFF, *matrixKK, d1tmp);
+	KronUtils::multKron(*matrixFF, *matrixKK, d2tmp);
+	double aspbs = alpha*alpha + betas;
+	double gspds = gamma*gamma - delta1*delta2;
+	Vector::mult2a(aspbs*gspds, 2*aspbs*gamma*delta1, -2*aspbs*gamma*delta2,
+				   x1, x2, d1tmp, d2tmp);
+}
+
+
+double TriangularSylvester::getEigSep(int depth) const
+{
+	int f_size = matrixF->getDiagonal().getSize();
+	Vector feig(2*f_size);
+	matrixF->getDiagonal().getEigenValues(feig);
+	int k_size = matrixK->getDiagonal().getSize();
+	Vector keig(2*k_size);
+	matrixK->getDiagonal().getEigenValues(keig);
+	
+	KronVector eig(f_size, 2*k_size, depth);
+	multEigVector(eig, feig, keig);
+
+	double min = 1.0e20;
+	for (int i = 0; i < eig.length()/2; i++) {
+		double alpha = eig[2*i];
+		double beta = eig[2*i+1];
+		double ss = (alpha+1)*(alpha+1)+beta*beta;
+		if (min > ss)
+			min = ss;
+	}
+
+	return min;
+}
+
+void TriangularSylvester::multEigVector(KronVector& eig, const Vector& feig,
+										const Vector& keig)
+{
+	int depth = eig.getDepth();
+	int m = eig.getM();
+	int n = eig.getN();
+
+	if (depth == 0) {
+		eig = keig;
+	} else {
+		KronVector aux(m, n, depth-1);
+		multEigVector(aux, feig, keig);
+		for (int i = 0; i < m; i++) {
+			KronVector eigi(eig, i);
+			eigi.zeros();
+			eigi.add(&feig[2*i], aux);
+		}
+	}
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/TriangularSylvester.h b/dynare++/sylv/cc/TriangularSylvester.h
new file mode 100644
index 0000000000000000000000000000000000000000..b4908729e1044826ce43b72983c6f2246151e7fa
--- /dev/null
+++ b/dynare++/sylv/cc/TriangularSylvester.h
@@ -0,0 +1,115 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/TriangularSylvester.h,v 1.1.1.1 2004/06/04 13:01:03 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef TRIANGULAR_SYLVESTER_H
+#define TRIANGULAR_SYLVESTER_H
+
+#include "SylvesterSolver.h"
+#include "KronVector.h"
+#include "QuasiTriangular.h"
+#include "QuasiTriangularZero.h"
+#include "SimilarityDecomp.h"
+
+class TriangularSylvester : public SylvesterSolver {
+	const QuasiTriangular* const matrixKK;
+	const QuasiTriangular* const matrixFF;
+public:
+	TriangularSylvester(const QuasiTriangular& k, const QuasiTriangular& f);
+	TriangularSylvester(const SchurDecompZero& kdecomp, const SchurDecomp& fdecomp);
+	TriangularSylvester(const SchurDecompZero& kdecomp, const SimilarityDecomp& fdecomp);
+	virtual ~TriangularSylvester();
+	void print() const;
+	void solve(SylvParams& pars, KronVector& d) const;
+
+	void solvi(double r, KronVector& d, double& eig_min) const;
+	void solvii(double alpha, double beta1, double beta2,
+				KronVector& d1, KronVector& d2,
+				double& eig_min) const;
+	void solviip(double alpha, double betas,
+				 KronVector& d, double& eig_min) const;
+	/* evaluates:
+	   |x1|   |d1| |alpha -beta1|                       |d1|
+	   |  | = |  |+|            |\otimes F'...\otimes K |  |
+	   |x2|   |d2| |beta2  alpha|                       |d2|
+	*/
+	void linEval(double alpha, double beta1, double beta2,
+				 KronVector& x1, KronVector& x2,
+				 const ConstKronVector& d1, const ConstKronVector& d2) const;
+	void linEval(double alpha, double beta1, double beta2,
+				 KronVector& x1, KronVector& x2,
+				 const KronVector& d1, const KronVector& d2) const
+		{linEval(alpha, beta1, beta2, x1, x2,
+				 ConstKronVector(d1), ConstKronVector(d2));}
+
+	/* evaluates:
+	   |x1|   |d1|          |gamma -delta1|                       |d1|
+	   |  | = |  | + 2alpha*|             |\otimes F'...\otimes K |  | +
+	   |x2|   |d2|          |delta2  gamma|                       |d2|
+
+                                     |gamma -delta1|^2                       |d1|
+                   + (alpha^2+betas)*|             |\otimes F'2...\otimes K2 |  |
+                                     |delta2  gamma|                         |d2|
+	*/
+	void quaEval(double alpha, double betas,
+				 double gamma, double delta1, double delta2,
+				 KronVector& x1, KronVector& x2,
+				 const ConstKronVector& d1, const ConstKronVector& d2) const;
+	void quaEval(double alpha, double betas,
+				 double gamma, double delta1, double delta2,
+				 KronVector& x1, KronVector& x2,
+				 const KronVector& d1, const KronVector& d2) const
+		{quaEval(alpha, betas, gamma, delta1, delta2, x1, x2,
+				 ConstKronVector(d1), ConstKronVector(d2));}
+private:
+	/* returns square of size of minimal eigenvalue of the system solved,
+	   now obsolete */ 
+	double getEigSep(int depth) const;
+	/* recursivelly calculates kronecker product of complex vectors (used in getEigSep) */
+	static void multEigVector(KronVector& eig, const Vector& feig, const Vector& keig);
+	/* auxiliary typedefs */
+	typedef QuasiTriangular::const_diag_iter const_diag_iter;
+	typedef QuasiTriangular::const_row_iter const_row_iter;
+	/* called from solvi */
+	void solviRealAndEliminate(double r, const_diag_iter di,
+							   KronVector& d, double& eig_min) const;
+	void solviComplexAndEliminate(double r, const_diag_iter di,
+								  KronVector& d, double& eig_min) const;
+	/* called from solviip */
+	void solviipRealAndEliminate(double alpha, double betas,
+								 const_diag_iter di, const_diag_iter dsi,
+								 KronVector& d, double& eig_min) const;
+	void solviipComplexAndEliminate(double alpha, double betas,
+									const_diag_iter di, const_diag_iter dsi,
+									KronVector& d, double& eig_min) const;
+	/* eliminations */
+	void solviEliminateReal(const_diag_iter di, KronVector& d,
+							const KronVector& y, double divisor) const;
+	void solviEliminateComplex(const_diag_iter di, KronVector& d,
+							   const KronVector& y1, const KronVector& y2,
+							   double divisor) const;
+	void solviipEliminateReal(const_diag_iter di, const_diag_iter dsi,
+							  KronVector& d,
+							  const KronVector& y1, const KronVector& y2,
+							  double divisor, double divisor2) const;
+	void solviipEliminateComplex(const_diag_iter di, const_diag_iter dsi,
+								 KronVector& d,
+								 const KronVector& y1, const KronVector& y11,
+								 const KronVector& y2, const KronVector& y22,
+								 double divisor) const;
+	/* Lemma 2 */
+	void solviipComplex(double alpha, double betas, double gamma,
+						double delta1, double delta2,
+						KronVector& d1, KronVector& d2,
+						double& eig_min) const;
+	/* norms for what we consider zero on diagonal of F */
+	static double diag_zero;
+	static double diag_zero_sq; // square of diag_zero
+};
+
+#endif /* TRIANGULAR_SYLVESTER_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/Vector.cpp b/dynare++/sylv/cc/Vector.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..931e094e921ef0d056bfbb7def30bc028e9ce5e8
--- /dev/null
+++ b/dynare++/sylv/cc/Vector.cpp
@@ -0,0 +1,372 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/Vector.cpp,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+
+#include "Vector.h"
+#include "GeneralMatrix.h"
+#include "SylvException.h"
+#include "cppblas.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <cmath>
+#include <algorithm>
+#include <limits>
+
+using namespace std;
+
+ZeroPad zero_pad;
+
+Vector::Vector(const Vector& v)
+	: len(v.length()), s(1), data(new double[len]), destroy(true)
+{
+	copy(v.base(), v.skip());
+}
+
+Vector::Vector(const ConstVector& v)
+	: len(v.length()), s(1), data(new double[len]), destroy(true)
+{
+	copy(v.base(), v.skip());
+}
+
+const Vector& Vector::operator=(const Vector& v)
+{
+	if (this == &v)
+		return *this;
+
+	if (v.length() != length()) {
+		throw SYLV_MES_EXCEPTION("Attempt to assign vectors with different lengths.");
+	}
+	if (s == v.s &&
+		(data <= v.data && v.data < data+len*s ||
+		 v.data <= data && data < v.data+v.len*v.s) &&
+		(data-v.data) % s == 0) {
+		printf("this destroy=%d, v destroy=%d, data-v.data=%d, len=%d\n", destroy, v.destroy, data-v.data, len);
+		throw SYLV_MES_EXCEPTION("Attempt to assign overlapping vectors.");
+	}
+	copy(v.base(), v.skip());
+	return *this;
+}
+
+const Vector& Vector::operator=(const ConstVector& v)
+{
+	if (v.length() != length()) {
+		throw SYLV_MES_EXCEPTION("Attempt to assign vectors with different lengths.");
+	}
+	if (v.skip() == 1 && skip() == 1 && (
+			(base() < v.base() + v.length() && base() >= v.base()) ||
+			(base() + length() < v.base() + v.length() &&
+			 base() + length() > v.base()))) {
+		throw SYLV_MES_EXCEPTION("Attempt to assign overlapping vectors.");
+	}
+	copy(v.base(), v.skip());
+	return *this;
+}
+
+void Vector::copy(const double* d, int inc)
+{
+	int n = length();
+	int incy = skip();
+	BLAS_dcopy(&n, d, &inc, base(), &incy);
+}
+
+Vector::Vector(Vector& v, int off, int l)
+	: len(l), s(v.skip()), data(v.base()+off*v.skip()), destroy(false)
+{
+	if (off < 0 || off + length() > v.length())
+		throw SYLV_MES_EXCEPTION("Subvector not contained in supvector.");
+}
+
+Vector::Vector(const Vector& v, int off, int l)
+	: len(l), s(1), data(new double[len]), destroy(true)
+{
+	if (off < 0 || off + length() > v.length())
+		throw SYLV_MES_EXCEPTION("Subvector not contained in supvector.");
+	copy(v.base()+off*v.skip(), v.skip());
+}
+
+Vector::Vector(GeneralMatrix& m, int col)
+	: len(m.numRows()), s(1), data(&(m.get(0, col))), destroy(false)
+{
+}
+
+Vector::Vector(int row, GeneralMatrix& m)
+	: len(m.numCols()), s(m.getLD()), data(&(m.get(row, 0))), destroy(false)
+{
+}
+
+bool Vector::operator==(const Vector& y) const
+{
+	return ConstVector(*this) == y;
+}
+
+bool Vector::operator!=(const Vector& y) const
+{
+	return ConstVector(*this) != y;
+}
+
+bool Vector::operator<(const Vector& y) const
+{
+	return ConstVector(*this) < y;
+}
+
+bool Vector::operator<=(const Vector& y) const
+{
+	return ConstVector(*this) <= y;
+}
+
+bool Vector::operator>(const Vector& y) const
+{
+	return ConstVector(*this) > y;
+}
+
+bool Vector::operator>=(const Vector& y) const
+{
+	return ConstVector(*this) >= y;
+}
+
+void Vector::zeros()
+{
+	if (skip() == 1) {
+		double* p = base();
+		for (int i = 0; i < length()/ZeroPad::length;
+			 i++, p += ZeroPad::length)
+			memcpy(p, zero_pad.getBase(), sizeof(double)*ZeroPad::length);
+		for ( ; p < base()+length(); p++)
+			*p = 0.0;
+	} else {
+		for (int i = 0; i < length(); i++)
+			operator[](i) = 0.0;
+	}
+}
+
+void Vector::nans()
+{
+	for (int i = 0; i < length(); i++)
+		operator[](i) = std::numeric_limits<double>::quiet_NaN();
+}
+
+void Vector::infs()
+{
+	for (int i = 0; i < length(); i++)
+		operator[](i) = std::numeric_limits<double>::infinity();
+}
+
+Vector::~Vector()
+{
+	if (destroy) {
+		delete [] data;
+	}
+}
+
+void Vector::rotatePair(double alpha, double beta1, double beta2, int i)
+{
+	double tmp = alpha*operator[](i) - beta1*operator[](i+1);
+	operator[](i+1) = alpha*operator[](i+1) - beta2*operator[](i);
+	operator[](i) = tmp;
+}
+
+void Vector::add(double r, const Vector& v)
+{
+	add(r, ConstVector(v));
+}
+
+void Vector::add(double r, const ConstVector& v)
+{
+	int n = length();
+	int incx = v.skip();
+	int incy = skip();
+	BLAS_daxpy(&n, &r, v.base(), &incx, base(), &incy);
+}
+
+void Vector::add(const double* z, const Vector& v)
+{
+	add(z, ConstVector(v));
+}
+
+void Vector::add(const double* z, const ConstVector& v)
+{
+	int n = length()/2;
+	int incx = v.skip();
+	int incy = skip();
+	BLAS_zaxpy(&n, z, v.base(), &incx, base(), &incy);
+}
+
+void Vector::mult(double r)
+{
+	int n = length();
+	int incx = skip();
+	BLAS_dscal(&n, &r, base(), &incx);
+}
+
+void Vector::mult2(double alpha, double beta1, double beta2,
+				   Vector& x1, Vector& x2,
+				   const Vector& b1, const Vector& b2)
+{
+	x1.zeros();
+	x2.zeros();
+	mult2a(alpha, beta1, beta2, x1, x2, b1, b2);
+}
+
+void Vector::mult2a(double alpha, double beta1, double beta2,
+					Vector& x1, Vector& x2,
+					const Vector& b1, const Vector& b2)
+{
+	x1.add(alpha, b1);
+	x1.add(-beta1, b2);
+	x2.add(alpha, b2);
+	x2.add(-beta2, b1);
+}
+
+double Vector::getNorm() const
+{
+	ConstVector v(*this);
+	return v.getNorm();
+}
+
+double Vector::getMax() const
+{
+	ConstVector v(*this);
+	return v.getMax();
+}
+
+double Vector::getNorm1() const
+{
+	ConstVector v(*this);
+	return v.getNorm1();
+}
+
+double Vector::dot(const Vector& y) const
+{
+	return ConstVector(*this).dot(ConstVector(y));
+}
+
+bool Vector::isFinite() const
+{
+	return (ConstVector(*this)).isFinite();
+}
+
+void Vector::print() const
+{
+	for (int i = 0; i < length(); i++) {
+		printf("%d\t%8.4g\n", i, operator[](i));
+	}
+}
+
+
+ConstVector::ConstVector(const Vector& v, int off, int l) 
+	: BaseConstVector(l, v.skip(), v.base() + v.skip()*off)
+{
+	if (off < 0 || off + length() > v.length()) {
+		throw SYLV_MES_EXCEPTION("Subvector not contained in supvector.");
+	}
+}
+
+ConstVector::ConstVector(const ConstVector& v, int off, int l) 
+	: BaseConstVector(l, v.skip(), v.base() + v.skip()*off)
+{
+	if (off < 0 || off + length() > v.length()) {
+		throw SYLV_MES_EXCEPTION("Subvector not contained in supvector.");
+	}
+}
+
+ConstVector::ConstVector(const double* d, int skip, int l)
+	: BaseConstVector(l, skip, d)
+{
+}
+
+ConstVector::ConstVector(const ConstGeneralMatrix& m, int col)
+	: BaseConstVector(m.numRows(), 1, &(m.get(0, col)))
+{
+}
+
+ConstVector::ConstVector(int row, const ConstGeneralMatrix& m)
+	: BaseConstVector(m.numCols(), m.getLD(), &(m.get(row, 0)))
+{
+}
+
+bool ConstVector::operator==(const ConstVector& y) const
+{
+	if (length() != y.length())
+		return false;
+	if (length() == 0)
+		return true;
+	int i = 0;
+	while (i < length() && operator[](i) == y[i])
+		i++;
+	return i == length();
+}
+
+bool ConstVector::operator<(const ConstVector& y) const
+{
+	int i = std::min(length(), y.length());
+	int ii = 0;
+	while (ii < i && operator[](ii) == y[ii])
+		ii++;
+	if (ii < i)
+		return operator[](ii) < y[ii];
+	else
+		return length() < y.length();
+}
+
+double ConstVector::getNorm() const
+{
+	double s = 0;
+	for (int i = 0; i < length(); i++) {
+		s+=operator[](i)*operator[](i);
+	}
+	return sqrt(s);
+}
+
+double ConstVector::getMax() const
+{
+	double r = 0;
+	for (int i = 0; i < length(); i++) {
+		if (abs(operator[](i))>r)
+			r = abs(operator[](i));
+	}
+	return r;
+}
+
+double ConstVector::getNorm1() const
+{
+	double norm = 0.0;
+	for (int i = 0; i < length(); i++) {
+		norm += abs(operator[](i));
+	}
+	return norm;
+}
+
+double ConstVector::dot(const ConstVector& y) const
+{
+	if (length() != y.length())
+		throw SYLV_MES_EXCEPTION("Vector has different length in ConstVector::dot.");
+	int n = length();
+	int incx = skip();
+	int incy = y.skip();
+	return BLAS_ddot(&n, base(), &incx, y.base(), &incy);
+}
+
+bool ConstVector::isFinite() const
+{
+	int i = 0;
+	while (i < length() && isfinite(operator[](i)))
+		i++;
+	return i == length();
+}
+
+void ConstVector::print() const
+{
+	for (int i = 0; i < length(); i++) {
+		printf("%d\t%8.4g\n", i, operator[](i));
+	}
+}
+
+
+ZeroPad::ZeroPad()
+{
+	for (int i = 0; i < length; i++)
+		pad[i] = 0.0;
+}
diff --git a/dynare++/sylv/cc/Vector.h b/dynare++/sylv/cc/Vector.h
new file mode 100644
index 0000000000000000000000000000000000000000..be96c7aa557f5502e828a297a2c2aee43bf23946
--- /dev/null
+++ b/dynare++/sylv/cc/Vector.h
@@ -0,0 +1,175 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/Vector.h,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef VECTOR_H
+#define VECTOR_H
+
+/* NOTE! Vector and ConstVector have not common super class in order
+ * to avoid running virtual method invokation mechanism. Some
+ * members, and methods are thus duplicated */ 
+
+#include <stdio.h>
+
+class GeneralMatrix;
+class ConstVector;
+
+class Vector {
+protected:
+	int len;
+	int s;
+	double* data;
+	bool destroy;
+public:
+	Vector() : len(0), s(1), data(0), destroy(false) {}
+	Vector(int l) : len(l), s(1), data(new double[l]), destroy(true) {}
+	Vector(Vector& v) : len(v.length()), s(v.skip()), data(v.base()), destroy(false) {}
+	Vector(const Vector& v);
+	Vector(const ConstVector& v);
+	Vector(const double* d, int l)
+		: len(l), s(1), data(new double[len]), destroy(true)
+		{copy(d, 1);}
+	Vector(double* d, int l)
+		: len(l), s(1), data(d), destroy(false) {}
+	Vector(double* d, int skip, int l)
+		: len(l), s(skip), data(d), destroy(false) {}
+	Vector(Vector& v, int off, int l);
+	Vector(const Vector& v, int off, int l);
+	Vector(GeneralMatrix& m, int col);
+	Vector(int row, GeneralMatrix& m);
+	const Vector& operator=(const Vector& v);
+	const Vector& operator=(const ConstVector& v);
+	double& operator[](int i)
+		{return data[s*i];}
+	const double& operator[](int i) const
+		{return data[s*i];}
+	const double* base() const
+		{return data;}
+	double* base()
+		{return data;}
+	int length() const
+		{return len;}
+	int skip() const
+		{return s;}
+
+	/** Exact equality. */
+	bool operator==(const Vector& y) const;
+	bool operator!=(const Vector& y) const;
+	/** Lexicographic ordering. */
+	bool operator<(const Vector& y) const;
+	bool operator<=(const Vector& y) const;
+	bool operator>(const Vector& y) const;
+	bool operator>=(const Vector& y) const;
+
+	virtual ~Vector();
+	void zeros();
+	void nans();
+	void infs();
+	bool toBeDestroyed() const {return destroy;}
+	void rotatePair(double alpha, double beta1, double beta2, int i);
+	void add(double r, const Vector& v);
+	void add(double r, const ConstVector& v);
+	void add(const double* z, const Vector& v);
+	void add(const double* z, const ConstVector& v);
+	void mult(double r);
+	double getNorm() const;
+	double getMax() const;
+	double getNorm1() const;
+	double dot(const Vector& y) const;
+	bool isFinite() const;
+	void print() const;
+
+	/* multiplies | alpha -beta1|           |b1|   |x1|
+                  |             |\otimes I .|  | = |  |
+                  | -beta2 alpha|           |b2|   |x2|
+	*/
+	static void mult2(double alpha, double beta1, double beta2,
+					  Vector& x1, Vector& x2,
+					  const Vector& b1, const Vector& b2);
+	/* same, but adds instead of set */
+	static void mult2a(double alpha, double beta1, double beta2,
+					   Vector& x1, Vector& x2,
+					   const Vector& b1, const Vector& b2);
+	/* same, but subtracts instead of add */
+	static void mult2s(double alpha, double beta1, double beta2,
+					   Vector& x1, Vector& x2,
+					   const Vector& b1, const Vector& b2)
+		{mult2a(-alpha, -beta1, -beta2, x1, x2, b1, b2);}
+private:
+	void copy(const double* d, int inc);
+	const Vector& operator=(int); // must not be used (not implemented)
+	const Vector& operator=(double); // must not be used (not implemented)
+};
+
+
+class BaseConstVector {
+protected:
+	int len;
+	int s;
+	const double* data;
+public:
+	BaseConstVector(int l, int si, const double* d) : len(l), s(si), data(d) {}
+	BaseConstVector(const BaseConstVector& v) : len(v.len), s(v.s), data(v.data) {}
+	const BaseConstVector& operator=(const BaseConstVector& v)
+		{len = v.len; s = v.s; data = v.data; return *this;}
+	const double& operator[](int i) const
+		{return data[s*i];}
+	const double* base() const
+		{return data;}
+	int length() const
+		{return len;}
+	int skip() const
+		{return s;}
+};
+
+class ConstGeneralMatrix;
+
+class ConstVector : public BaseConstVector {
+public:
+	ConstVector(const Vector& v) : BaseConstVector(v.length(), v.skip(), v.base()) {}
+	ConstVector(const ConstVector& v) : BaseConstVector(v) {}
+	ConstVector(const double* d, int l) : BaseConstVector(l, 1, d) {}
+	ConstVector(const Vector& v, int off, int l);
+	ConstVector(const ConstVector& v, int off, int l);
+	ConstVector(const double* d, int skip, int l);
+	ConstVector(const ConstGeneralMatrix& m, int col);
+	ConstVector(int row, const ConstGeneralMatrix& m);
+	
+	virtual ~ConstVector() {}
+	/** Exact equality. */
+	bool operator==(const ConstVector& y) const;
+	bool operator!=(const ConstVector& y) const
+		{return ! operator==(y);}
+	/** Lexicographic ordering. */
+	bool operator<(const ConstVector& y) const;
+	bool operator<=(const ConstVector& y) const
+		{return operator<(y) || operator==(y);}
+	bool operator>(const ConstVector& y) const
+		{return ! operator<=(y);}
+	bool operator>=(const ConstVector& y) const
+		{return ! operator<(y);}
+
+	double getNorm() const;
+	double getMax() const;
+	double getNorm1() const;
+	double dot(const ConstVector& y) const;
+	bool isFinite() const;
+	void print() const;
+};
+
+class ZeroPad {
+public:
+	static const int length = 16;
+private:
+	double pad[16];
+public:
+	ZeroPad();
+	const double* getBase() const {return pad;} 
+};
+
+#endif /* VECTOR_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/cppblas.h b/dynare++/sylv/cc/cppblas.h
new file mode 100644
index 0000000000000000000000000000000000000000..65df7aeb4bcad6abf6f504e5267f16ecce062e85
--- /dev/null
+++ b/dynare++/sylv/cc/cppblas.h
@@ -0,0 +1,68 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/cppblas.h,v 1.2 2004/11/24 20:42:52 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef CPPBLAS_H
+#define CPPBLAS_H
+
+#if defined(MATLAB) && !defined(__linux__)
+#define BLAS_dgemm dgemm
+#define BLAS_dgemv dgemv
+#define BLAS_dtrsv dtrsv
+#define BLAS_dtrmv dtrmv
+#define BLAS_daxpy daxpy
+#define BLAS_dcopy dcopy
+#define BLAS_zaxpy zaxpy
+#define BLAS_dscal dscal
+#define BLAS_dtrsm dtrsm
+#define BLAS_ddot  ddot
+#else /* defined(MATLAB) && !defined(__linux__) */
+#define BLAS_dgemm dgemm_
+#define BLAS_dgemv dgemv_
+#define BLAS_dtrsv dtrsv_
+#define BLAS_dtrmv dtrmv_
+#define BLAS_daxpy daxpy_
+#define BLAS_dcopy dcopy_
+#define BLAS_zaxpy zaxpy_
+#define BLAS_dscal dscal_
+#define BLAS_dtrsm dtrsm_
+#define BLAS_ddot  ddot_
+#endif /* defined(MATLAB) && !defined(__linux__) */
+
+#define BLCHAR const char*
+#define CONST_BLINT const int*
+#define CONST_BLDOU const double*
+#define BLDOU double*
+
+extern "C" {
+	void BLAS_dgemm(BLCHAR transa, BLCHAR transb, CONST_BLINT m, CONST_BLINT n,
+					CONST_BLINT k, CONST_BLDOU alpha, CONST_BLDOU a, CONST_BLINT lda,
+					CONST_BLDOU b, CONST_BLINT ldb, CONST_BLDOU beta,
+					BLDOU c, CONST_BLINT ldc);
+	void BLAS_dgemv(BLCHAR trans, CONST_BLINT m, CONST_BLINT n, CONST_BLDOU alpha,
+					CONST_BLDOU a, CONST_BLINT lda, CONST_BLDOU x, CONST_BLINT incx,
+					CONST_BLDOU beta, BLDOU y, CONST_BLINT incy);
+	void BLAS_dtrsv(BLCHAR uplo, BLCHAR trans, BLCHAR diag, CONST_BLINT n,
+					CONST_BLDOU a, CONST_BLINT lda, BLDOU x, CONST_BLINT incx);
+	void BLAS_dtrmv(BLCHAR uplo, BLCHAR trans, BLCHAR diag, CONST_BLINT n,
+					CONST_BLDOU a, CONST_BLINT lda, BLDOU x, CONST_BLINT incx);
+	void BLAS_daxpy(CONST_BLINT n, CONST_BLDOU a, CONST_BLDOU x, CONST_BLINT incx,
+					BLDOU y, CONST_BLINT incy);
+	void BLAS_dcopy(CONST_BLINT n, CONST_BLDOU x, CONST_BLINT incx,
+					BLDOU y, CONST_BLINT incy);
+	void BLAS_zaxpy(CONST_BLINT n, CONST_BLDOU a, CONST_BLDOU x, CONST_BLINT incx,
+					BLDOU y, CONST_BLINT incy);
+	void BLAS_dscal(CONST_BLINT n, CONST_BLDOU a, BLDOU x, CONST_BLINT incx);
+	void BLAS_dtrsm(BLCHAR side, BLCHAR uplo, BLCHAR transa, BLCHAR diag, CONST_BLINT m,
+					CONST_BLINT n, CONST_BLDOU alpha, CONST_BLDOU a, CONST_BLINT lda,
+					BLDOU b, CONST_BLINT ldb);
+	double BLAS_ddot(CONST_BLINT n, CONST_BLDOU x, CONST_BLINT incx, CONST_BLDOU y,
+					 CONST_BLINT incy);
+};
+
+
+#endif /* CPPBLAS_H */
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/cc/cpplapack.h b/dynare++/sylv/cc/cpplapack.h
new file mode 100644
index 0000000000000000000000000000000000000000..bee6a4394160e29289cec8f2e3502337d6cae4e6
--- /dev/null
+++ b/dynare++/sylv/cc/cpplapack.h
@@ -0,0 +1,82 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/cc/cpplapack.h,v 1.3 2004/11/24 20:43:10 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef CPPLAPACK_H
+#define CPPLAPACK_H
+
+#if defined(MATLAB) && !defined(__linux__)
+#define LAPACK_dgetrs dgetrs
+#define LAPACK_dgetrf dgetrf
+#define LAPACK_dgees  dgees
+#define LAPACK_dgecon dgecon
+#define LAPACK_dtrexc dtrexc
+#define LAPACK_dtrsyl dtrsyl
+#define LAPACK_dpotrf dpotrf
+#define LAPACK_dgges  dgges
+#define LAPACK_dsyev  dsyev
+#define LAPACK_dsyevr dsyevr
+#else /* MATLAB */
+#define LAPACK_dgetrs dgetrs_
+#define LAPACK_dgetrf dgetrf_
+#define LAPACK_dgees  dgees_
+#define LAPACK_dgecon dgecon_
+#define LAPACK_dtrexc dtrexc_
+#define LAPACK_dtrsyl dtrsyl_
+#define LAPACK_dpotrf dpotrf_
+#define LAPACK_dgges  dgges_
+#define LAPACK_dsyev  dsyev_
+#define LAPACK_dsyevr dsyevr_
+#endif /* MATLAB */
+
+#define LACHAR const char*
+#define CONST_LAINT const int*
+#define LAINT int*
+#define CONST_LADOU const double*
+#define LADOU double*
+typedef int (*DGGESCRIT)(const double*, const double*, const double*);
+
+extern "C" {
+	void LAPACK_dgetrs(LACHAR trans, CONST_LAINT n, CONST_LAINT nrhs,
+					   CONST_LADOU a, CONST_LAINT lda, CONST_LAINT ipiv,
+					   LADOU b, CONST_LAINT ldb, LAINT info);
+	void LAPACK_dgetrf(CONST_LAINT m, CONST_LAINT n, LADOU a,
+					   CONST_LAINT lda, LAINT ipiv, LAINT info);
+	void  LAPACK_dgees(LACHAR jobvs, LACHAR sort, const void* select,
+					   CONST_LAINT n, LADOU a, CONST_LAINT lda, LAINT sdim,
+					   LADOU wr, LADOU wi, LADOU vs, CONST_LAINT ldvs,
+					   LADOU work, CONST_LAINT lwork, const void* bwork, LAINT info);
+	void LAPACK_dgecon(LACHAR norm, CONST_LAINT n, CONST_LADOU a, CONST_LAINT lda,
+					   CONST_LADOU anorm, LADOU rnorm, LADOU work, LAINT iwork,
+					   LAINT info);
+	void LAPACK_dtrexc(LACHAR compq, CONST_LAINT n, LADOU t, CONST_LAINT ldt,
+					   LADOU q, CONST_LAINT ldq, LAINT ifst, LAINT ilst, LADOU work,
+					   LAINT info);
+	void LAPACK_dtrsyl(LACHAR trana, LACHAR tranb, CONST_LAINT isgn, CONST_LAINT m,
+					   CONST_LAINT n, CONST_LADOU a, CONST_LAINT lda, CONST_LADOU b,
+					   CONST_LAINT ldb, LADOU c, CONST_LAINT ldc, LADOU scale,
+					   LAINT info);
+	void LAPACK_dpotrf(LACHAR uplo, CONST_LAINT n, LADOU a, CONST_LAINT lda,
+					   LAINT info);
+	void LAPACK_dgges(LACHAR jobvsl, LACHAR jobvsr, LACHAR sort, DGGESCRIT delztg,
+					  CONST_LAINT n, LADOU a, CONST_LAINT lda, LADOU b, CONST_LAINT ldb,
+					  LAINT sdim, LADOU alphar, LADOU alphai, LADOU beta,
+					  LADOU vsl, CONST_LAINT ldvsl, LADOU vsr, CONST_LAINT ldvsr,
+					  LADOU work, CONST_LAINT lwork, LAINT bwork, LAINT info);
+	void LAPACK_dsyev(LACHAR jobz, LACHAR uplo, CONST_LAINT n, LADOU a, CONST_LAINT lda,
+					  LADOU w, LADOU work, CONST_LAINT lwork, LAINT info); 
+	void LAPACK_dsyevr(LACHAR jobz, LACHAR range, LACHAR uplo, CONST_LAINT n, LADOU a,
+					   CONST_LAINT lda, LADOU lv, LADOU vu, CONST_LAINT il, CONST_LAINT iu,
+					   CONST_LADOU abstol, LAINT m, LADOU w, LADOU z, CONST_LAINT ldz,
+					   LAINT isuppz, LADOU work, CONST_LAINT lwork, LAINT iwork, CONST_LAINT liwork,
+					   LAINT info);
+};
+
+
+#endif /* CPPLAPACK_H */
+
+
+// Local Variables:
+// mode:C++
+// End:
+
diff --git a/dynare++/sylv/change_log.html b/dynare++/sylv/change_log.html
new file mode 100644
index 0000000000000000000000000000000000000000..8fd665f356e4165822b91b00d02f1a56e1845783
--- /dev/null
+++ b/dynare++/sylv/change_log.html
@@ -0,0 +1,140 @@
+<HTML>
+<!-- $Header: /var/lib/cvs/dynare_cpp/sylv/change_log.html,v 1.1.1.1 2004/06/04 13:00:05 kamenik Exp $ -->
+<!-- Tag $Name:  $ -->
+<TITLE>
+Sylvester Solver Change Log
+</TITLE>
+<BODY>
+<TABLE CELLSPACING=2 ALIGN="CENTER" BORDER=1>
+<TR>
+<TD BGCOLOR="#d0d0d0" WIDTH="85"> Tag    </TD>
+<TD BGCOLOR="#d0d0d0" WIDTH="80"> Date   </TD>
+<TD BGCOLOR="#d0d0d0" WIDTH="600"> Description/Changes</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD>2003/09/10</TD>
+<TD>Initial version solving triangular system put to repository</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented solution of general case.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented a memory pool (Paris).</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented MEX interface to the routine (Paris).</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented QuasiTriangularZero (Paris) (not fully used yet).</TD>
+</TR>
+<TR>
+<TD>rel-1</TD>
+<TD>2003/10-02</TD>
+<TD>Version sent to Michel.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Inheritance streamlined, QuasiTriangular inherits from GeneralMatrix.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented block diagonalization algorithm.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Solution routines rewritten so that the output rewrites input,
+considerable memory improvement.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>MEX interface now links with LAPACK library from Matlab.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Added a hack to MEX library loading in order to avoid Matlab crash in Wins.</TD>
+</TR>
+<TR>
+<TD>rel-2</TD>
+<TD>2003/10/15</TD>
+<TD>Version sent to Michel.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>KronUtils now rewrite input by output using less memory.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Added iterative solution algorithm (doubling).</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Introduced abstraction for set of parameters (SylvParams).</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Algorithm enabled to solve problems with singular C.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented a class chooser chossing between QuasiTriangularZero,
+and QuasiTriangular (padded with zero) depending on size of the
+problem. Full use of QuasiTriangularZero.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Reimplemented QuasiTriangular::solve, offdiagonal elements are
+eleiminated by gauss with partial pivoting, not by transformation of
+complex eigenvalues. More stable for ill conditioned eigenvalues.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Reimplemented calculation of eliminating vectors, much more
+numerically stable now.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>Implemented algorithm for ordering of eigenvalues (not used now,
+no numerical improvements).</TD>
+</TR>
+<TR>
+<TD>rel-3</TD>
+<TD>2003/12/4</TD>
+<TD>Version sent to Michel.</TD>
+</TR>
+<TR>
+<TD></TD>
+<TD></TD>
+<TD>GeneralMatrix separated for use outside, in sylv module we use
+its subclass SylvMatrix. Implemented ConstGeneralMatrix (useful outside).
+</TD>
+</TR>
+<TR>
+<TD>rel-4</TD>
+<TD>2004/6/4</TD>
+<TD>Version, which was moved to pythie.cepremap.cnrs.fr repository.</TD>
+</TR>
+</TABLE>
+</BODY>
+</HTML>
diff --git a/dynare++/sylv/matlab/Makefile b/dynare++/sylv/matlab/Makefile
new file mode 100755
index 0000000000000000000000000000000000000000..0cad0930828e22eed773b0d06400830e14b733aa
--- /dev/null
+++ b/dynare++/sylv/matlab/Makefile
@@ -0,0 +1,35 @@
+# $Header: /var/lib/cvs/dynare_cpp/sylv/matlab/Makefile,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $
+
+# Tag $Name:  $
+
+# set directory to sylvester code
+sylv=../cc
+
+LD_LIBS := -llapack -lcblas -lblas -latlas
+CC_FLAGS := -Wall -I../cc
+CC_DEFS := -I/usr/local/matlab73/extern/include -DMATLAB #-DUSE_MEMORY_POOL
+MEX := mex.bat
+
+objects := $(patsubst %.cpp,%.o,$(wildcard ../cc/*.cpp)) 
+headers := $(wildcard ../cc/*.h)
+
+# set mex file suffix
+mex_suffix=dll
+ifeq ($(OSTYPE),linux-gnu)
+	mex_suffix=mexglx
+endif
+
+aa_gensylv.$(mex_suffix): gensylv.cpp ../cc/sylvester.a
+	$(MEX) -I../cc -DMATLAB gensylv.cpp ../cc/sylvester.a c:/matlab6p5/extern/lib/win32/microsoft/msvc60/libmwlapack.lib
+	mv gensylv.$(mex_suffix) aa_gensylv.$(mex_suffix)
+
+../cc/sylvester.a : $(objects)
+	make -C ../cc sylvester.a
+
+../cc/%.o: ../cc/%.cpp $(headers)
+	make EXTERN_DEFS="$(CC_DEFS)" -C ../cc $*.o
+
+clear:
+	make -C ../cc clear
+	rm -f *.dll
+	rm -f *.mexglx
diff --git a/dynare++/sylv/matlab/dummy.h b/dynare++/sylv/matlab/dummy.h
new file mode 100755
index 0000000000000000000000000000000000000000..9ce95e82b593b840c5382b66d3b64eea85266f7e
--- /dev/null
+++ b/dynare++/sylv/matlab/dummy.h
@@ -0,0 +1,3 @@
+/* this is only dummy header file to be able to loadlibrary to MATLAB */
+void mexFunction(int nhls, mxArray* plhs[],
+		 int nhrs, const mxArray* prhs[]);
diff --git a/dynare++/sylv/matlab/gensylv.cpp b/dynare++/sylv/matlab/gensylv.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..9cfad430024f087ebb5a3febda3e13989d100414
--- /dev/null
+++ b/dynare++/sylv/matlab/gensylv.cpp
@@ -0,0 +1,100 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/matlab/gensylv.cpp,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+
+#include "mex.h"
+
+#include "GeneralSylvester.h"
+#include "SylvException.h"
+
+
+void gen_sylv_solve(int order, int n, int m, int zero_cols,
+					const double* A, const double* B,
+					const double* C, double* X)
+{
+	try {
+		GeneralSylvester s(order, n, m, zero_cols, A, B, C, X, false);
+		s.solve();
+	} catch (const SylvException& e) {
+		char mes[1000];
+		e.printMessage(mes, 999);
+		mexErrMsgTxt(mes);
+	}
+}
+
+void gen_sylv_solve_and_check(int order, int n, int m, int zero_cols,
+							  const double* A, const double* B,
+							  const double* C, const double* D,
+							  double* X, mxArray*& p)
+{
+	try {
+		GeneralSylvester s(order, n, m, zero_cols, A, B, C, X, true);
+		s.solve();
+		s.check(D);
+		p = s.getParams().createStructArray();
+	} catch (const SylvException& e) {
+		char mes[1000];
+		e.printMessage(mes, 999);
+		mexErrMsgTxt(mes);
+	}
+}
+
+void checkDimensions(const mwSize* const Adims, const mwSize* const Bdims,
+					 const mwSize* const Cdims, const mwSize* const Ddims,
+					 int order)
+{
+	if (Adims[0] != Adims[1])
+		mexErrMsgTxt("Matrix A must be a square matrix.");
+	if (Adims[0] != Bdims[0])
+		mexErrMsgTxt("Matrix A and matrix B must have the same number of rows.");
+	if (Adims[0] != Ddims[0])
+		mexErrMsgTxt("Matrix A and matrix B must have the same number of rows.");
+	if (Cdims[0] != Cdims[1])
+		mexErrMsgTxt("Matrix C must be square.");
+	if (Bdims[0] < Bdims[1])
+		mexErrMsgTxt("Matrix B must not have more columns than rows.");
+	if (Ddims[1] != power(Cdims[0], order))
+		mexErrMsgTxt("Matrix D has wrong number of columns.");
+}
+
+extern "C" {
+	void mexFunction(int nhls, mxArray* plhs[],
+					 int nhrs, const mxArray* prhs[])
+	{
+		if (nhrs != 5)
+			mexErrMsgTxt("Must have exactly 5 input args.");
+		if (nhls !=1 && nhls != 2)
+			mexErrMsgTxt("Must have 1 or 2 output args.");
+
+		int order = (int)mxGetScalar(prhs[0]);
+		const mxArray* const A = prhs[1];
+		const mxArray* const B = prhs[2];
+		const mxArray* const C = prhs[3];
+		const mxArray* const D = prhs[4];
+		const mwSize* const Adims = mxGetDimensions(A);
+		const mwSize* const Bdims = mxGetDimensions(B);
+		const mwSize* const Cdims = mxGetDimensions(C);
+		const mwSize* const Ddims = mxGetDimensions(D);
+		checkDimensions(Adims, Bdims, Cdims, Ddims, order);
+		int n = Adims[0];
+		int m = Cdims[0];
+		int zero_cols = Bdims[0] - Bdims[1];
+		mxArray* X = mxCreateDoubleMatrix(Ddims[0], Ddims[1], mxREAL);
+		// copy D to X
+		Vector Xvec((double*)mxGetPr(X), power(m, order)*n);
+		ConstVector Dvec((double*)mxGetPr(D), power(m, order)*n);
+		Xvec = Dvec;
+		// solve (or solve and check)
+		if (nhls == 1) {
+			gen_sylv_solve(order, n, m, zero_cols,
+						   mxGetPr(A), mxGetPr(B), mxGetPr(C),
+						   mxGetPr(X));
+		} else if (nhls == 2) {
+			gen_sylv_solve_and_check(order, n, m, zero_cols,
+									 mxGetPr(A), mxGetPr(B), mxGetPr(C),
+									 mxGetPr(D), mxGetPr(X), plhs[1]);
+		}
+		plhs[0] = X;
+	}
+};
diff --git a/dynare++/sylv/matlab/gensylv.m b/dynare++/sylv/matlab/gensylv.m
new file mode 100755
index 0000000000000000000000000000000000000000..b6cb0dccb740dc1fb6cba45a79e66221ea71964a
--- /dev/null
+++ b/dynare++/sylv/matlab/gensylv.m
@@ -0,0 +1,74 @@
+%
+% GENSYLV solves the following matrix equation:
+%           A*X + [0 B]*X*kron(C,..,C) = D,
+%  where
+%       A ...... regular (n,n) matrix,
+%       [0 B] .. (n,n) matrix with a few first
+%                columns equal to zeros
+%       B ...... rectangular (n, nz) matrix with nz<=n 
+%       C ...... regular (m,m) matrix, whose spectrum is
+%                within (-1, 1)
+%       kron(C,..,C)
+%         ...... Kronecker power of C of order 'order'
+%       D .....  rectangular (n, m^order) matrix.
+%
+% X = gensylv(order, A, B, C, D)
+%       returns X as the solution, doesn't perform any checks
+%
+% [X, par] = gensylv(order, A, B, C, D)
+%       solves the system, and performs checks. 'par' is struct
+%       containing information about solution and error norms
+%       returned by the check. This is a list of the struct
+%       members, some of them may be missing in actual returned
+%       value:
+%       method     = method used for solution recursive/iterative
+%       convergence_tol = convergence tolerance for iter. method
+%       max_num_iter    = max number of steps for iter. method
+%       bs_norm    = Bavely Stewart log10 norm for diagonalization
+%       converged  = convergence status for iterative method
+%       iter_last_norm  = residual norm of the last step of iterations
+%       num_iter   = number of iterations performed
+%       f_err1     = norm 1 of abs. error C-V*F*inv(V)
+%       f_errI     = norm Inf of abs. error C-V*F*inv(V)
+%       viv_err1   = norm 1 of abs. error I-V*inv(V)
+%       viv_errI   = norm Inf of abs. error I-V*inv(V)
+%       ivv_err1   = norm 1 of abs. error I-inv(V)*V
+%       ivv_errI   = norm Inf of abs. error I-inv(V)*V
+%       f_blocks   = number of diagonal blocks of F
+%       f_largest  = size of largest diagonal block in F
+%       f_zeros    = number of off diagonal zeros in F
+%       f_offdiag  = number of all offdiagonal elements in F
+%       rcondA1    = reciprocal cond 1 number of A
+%       rcondAI    = reciprocal cond Inf number of A
+%       eig_min    = minimum eigenvalue of vectorized system
+%       mat_err1   = rel. matrix 1 norm of A*X-[0 B]*X*kron(C,..,C)-D
+%       mat_errI   = rel. matrix Inf norm of       --"--
+%       mat_errF   = rel. matrix Frobenius norm of --"--
+%       vec_err1   = rel. vector 1 norm of         --"--
+%       vec_errI   = rel. vector Inf norm of       --"--
+%       cpu_time   = CPU time needed for solution in CPU seconds
+%
+
+% $Header: /var/lib/cvs/dynare_cpp/sylv/matlab/gensylv.m,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $
+% Tag $Name:  $
+
+function [X, varargout] = gensylv(order, A, B, C, D)
+
+% in Windows, ensure that aa_gensylv.dll is loaded, this prevents
+% clearing the function and a successive Matlab crash
+if strcmp('PCWIN', computer)
+  if ~ libisloaded('aa_gensylv') 
+    loadlibrary('aa_gensylv', 'dummy');
+  end
+end
+
+% launch aa_gensylv
+if nargout == 1
+  X = aa_gensylv(order, A, B, C, D);
+elseif nargout == 2
+  [X, par] = aa_gensylv(order, A, B, C, D);
+  varargout(1) = {par};
+else
+  error('Must have 1 or 2 output arguments.');
+end
+  
\ No newline at end of file
diff --git a/dynare++/sylv/matlab/mexopts.bat b/dynare++/sylv/matlab/mexopts.bat
new file mode 100755
index 0000000000000000000000000000000000000000..fe71cb2a0c71504ff6e52ef7ed4c8e30c481084e
--- /dev/null
+++ b/dynare++/sylv/matlab/mexopts.bat
@@ -0,0 +1,59 @@
+@echo off
+rem $Header: /var/lib/cvs/dynare_cpp/sylv/matlab/mexopts.bat,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $
+rem Tag $Name:  $
+rem c:\ondra\wk\sylv\matlab\mexopts.bat
+rem Generated by gnumex.m script in c:\fs\gnumex
+rem gnumex version: 1.08
+rem Compile and link options used for building MEX etc files with
+rem the Mingw/Cygwin tools.  Options here are:
+rem Cygwin (cygwin*.dll) linking
+rem Mex file creation
+rem Standard (safe) linking to temporary libraries
+rem Language C / C++
+rem Matlab version 6.5
+rem
+set MATLAB=C:\MATLAB~2
+set GM_PERLPATH=C:\MATLAB~2\sys\perl\win32\bin\perl.exe
+set GM_UTIL_PATH=c:\fs\gnumex
+set PATH=C:\fs\cygwin\bin;%PATH%
+rem
+rem Added libraries for linking
+set GM_ADD_LIBS=
+rem
+rem Type of file to compile (mex or engine)
+set GM_MEXTYPE=mex
+rem
+rem Language for compilation
+set GM_MEXLANG=c
+rem
+rem def files to be converted to libs
+set GM_DEFS2LINK=libmx.def;libmex.def;libmat.def;_libmatlbmx.def;
+rem
+rem dlltool command line
+set GM_DLLTOOL=c:\fs\gnumex\mexdlltool -E --as C:\fs\cygwin\bin\as.exe
+rem
+rem compiler options; add compiler flags to compflags as desired
+set NAME_OBJECT=-o
+set COMPILER=gcc
+set COMPFLAGS=-c -DMATLAB_MEX_FILE 
+set OPTIMFLAGS=-O3 -mcpu=pentium -malign-double
+set DEBUGFLAGS=-g
+set CPPCOMPFLAGS=%COMPFLAGS% -x c++ 
+set CPPOPTIMFLAGS=%OPTIMFLAGS%
+set CPPDEBUGFLAGS=%DEBUGFLAGS%
+rem
+rem NB Library creation commands occur in linker scripts
+rem
+rem Linker parameters
+set LINKER=%GM_PERLPATH% %GM_UTIL_PATH%\linkmex.pl
+set LINKFLAGS=
+set CPPLINKFLAGS= --driver-name c++ 
+set LINKOPTIMFLAGS=-s
+set LINKDEBUGFLAGS=-g
+set LINK_FILE=
+set LINK_LIB=
+set NAME_OUTPUT=-o %OUTDIR%%MEX_NAME%.dll
+rem
+rem Resource compiler parameters
+set RC_COMPILER=%GM_PERLPATH% %GM_UTIL_PATH%\rccompile.pl --unix -o %OUTDIR%mexversion.res
+set RC_LINKER=
diff --git a/dynare++/sylv/sylvester.tex b/dynare++/sylv/sylvester.tex
new file mode 100644
index 0000000000000000000000000000000000000000..a5476183acc31fc59475ab873eb9ec7dfc93fab3
--- /dev/null
+++ b/dynare++/sylv/sylvester.tex
@@ -0,0 +1,541 @@
+\input amstex
+\documentstyle{amsppt}
+\def\vec{\mathop{\hbox{vec}}}
+\def\otimesi{\mathop{\overset{\ssize i}\to{\otimes}}}
+\def\iF{\,^i\!F}
+\def\imF{\,^{i-1}\!F}
+\def\solvi{\bold{solv1}}
+\def\solvii{\bold{solv2}}
+\def\solviip{\bold{solv2p}}
+
+\topmatter
+\title Solution of Specialized Sylvester Equation\endtitle
+\author Ondra Kamenik\endauthor
+\email ondrej.kamenik@cnb.cz\endemail
+\endtopmatter
+
+\document
+Given the following matrix equation
+$$AX+BX\left(\otimesi C\right)=D,$$ where $A$ is regular $n\times n$
+matrix, $X$ is $n\times m^i$ matrix of unknowns, $B$ is singular
+$n\times n$ matrix, $C$ is $m\times m$ regular matrix with
+$|\beta(C)|<1$ (i.e. modulus of largest eigenvalue is less than one),
+$i$ is an order of Kronecker product, and finally $D$ is $n\times m^i$
+matrix.
+
+First we multiply the equation from the left by $A^{-1}$ to obtain:
+$$X+A^{-1}BX\left(\otimesi C\right)=A^{-1}D$$
+Then we find real Schur decomposition $K=UA^{-1}BU^T$, and
+$F=VCV^T$. The equation can be written as
+$$UX\left(\otimesi V^T\right) +
+KUX\left(\otimesi V^T\right)\left(\otimesi F\right) =
+UA^{-1}D\left(\otimesi V^T\right)$$
+This can be rewritten as
+$$Y + KY\left(\otimesi F\right)=\widehat{D},$$
+and vectorized
+$$\left(I+\otimesi F^T\otimes K\right)\vec(Y)=\vec(\widehat{D})$$
+Let $\iF$ denote $\otimesi F^T$ for the rest of the text.
+
+\proclaim{Lemma 1}
+For any $n\times n$ matrix $A$ and $\beta_1\beta_2>0$, if there is
+exactly one solution of
+$$\left(I_2\otimes I_n +
+\pmatrix\alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix
+\otimes A\right)\pmatrix x_1\cr x_2\endpmatrix = 
+\pmatrix d_1\cr d_2\endpmatrix,$$
+then it can be obtained as solution of
+$$\align
+\left(I_n + 2\alpha A+(\alpha^2+\beta^2)A^2\right)x_1 & = 
+\widehat{d_1}\\
+\left(I_n + 2\alpha A+(\alpha^2+\beta^2)A^2\right)x_2 & = 
+\widehat{d_2}
+ \endalign$$
+where $\beta=\sqrt{\beta1\beta2}$, and 
+$$
+\pmatrix \widehat{d_1}\cr\widehat{d_2}\endpmatrix =
+\left(I_2\otimes I_n +
+\pmatrix\alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix
+\otimes A\right)\pmatrix d_1\cr d_2\endpmatrix$$
+\endproclaim
+
+\demo{Proof} Since 
+$$
+\pmatrix \alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix
+\pmatrix \alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix
+=
+\pmatrix \alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix
+\pmatrix \alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix
+=
+\pmatrix \alpha^2+\beta^2&0\cr 0&\alpha^2+\beta^2\endpmatrix,
+$$
+it is easy to see that if the equation is multiplied by
+$$I_2\otimes I_n +
+\pmatrix\alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix
+\otimes A$$
+we obtain the result. We only need to prove that the matrix is
+regular. But this is clear because matrix
+$$\pmatrix \alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix$$
+collapses an eigenvalue of $A$ to $-1$ iff the matrix
+$$\pmatrix \alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix$$
+does.\qed
+\enddemo
+
+\proclaim{Lemma 2}
+For any $n\times n$ matrix $A$ and $\delta_1\delta_2>0$, if there is
+exactly one solution of
+$$\left(I_2\otimes I_n +
+2\alpha\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A
++(\alpha^2+\beta^2)
+\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix^2\otimes A^2\right)
+\pmatrix x_1\cr x_2\endpmatrix=
+\pmatrix d_1\cr d_2\endpmatrix
+$$
+it can be obtained as
+$$
+\align
+\left(I_n+2a_1A+(a_1^2+b_1^2)A^2\right)\left(I_n+2a_2A+(a_2^2+b_2^2)A^2\right)
+x_1 & =\widehat{d_1}\\
+\left(I_n+2a_1A+(a_1^2+b_1^2)A^2\right)\left(I_n+2a_2A+(a_2^2+b_2^2)A^2\right)
+x_2 & =\widehat{d_2}
+\endalign$$
+where
+$$
+\pmatrix \widehat{d_1}\cr\widehat{d_2}\endpmatrix =
+\left(I_2\otimes I_n +
+2\alpha\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A
++(\alpha^2+\beta^2)
+\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix^2\otimes A^2\right)
+\pmatrix d_1\cr d_2\endpmatrix
+$$
+and
+$$
+\align
+a_1 & = \alpha\gamma - \beta\delta\\
+b_1 & = \alpha\delta + \gamma\beta\\
+a_2 & = \alpha\gamma + \beta\delta\\
+b_2 & = \alpha\delta - \gamma\beta\\
+\delta & = \sqrt{\delta_1\delta_2}
+\endalign$$
+\endproclaim
+
+\demo{Proof}
+The matrix can be written as
+$$\left(I_2\otimes I_n+(\alpha+\roman i\beta)
+\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right)
+\left(I_2\otimes I_n +(\alpha-\roman i\beta)
+\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right).
+$$
+Note that the both matrices are regular since their product is
+regular. For the same reason as in the previous proof, the following
+matrix is also regular
+$$\left(I_2\otimes I_n+(\alpha+\roman i\beta)
+\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right)
+\left(I_2\otimes I_n +(\alpha-\roman i\beta)
+\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right),
+$$
+and we may multiply the equation by this matrix obtaining
+$\widehat{d_1}$ and $\widehat{d_2}$. Note that the four matrices
+commute, that is why we can write the whole product as
+$$
+\align
+\left(I_2\otimes I_n + (\alpha+\roman i\beta)
+\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right)\cdot
+\left(I_2\otimes I_n + (\alpha+\roman i\beta)
+\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right)&\cdot\\
+\left(I_2\otimes I_n + (\alpha-\roman i\beta)
+\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right)\cdot
+\left(I_2\otimes I_n + (\alpha-\roman i\beta)
+\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right)&=\\
+\left(I_2\otimes I_n + 2(\alpha + \roman i\beta)
+\pmatrix\gamma&0\cr 0&\gamma\endpmatrix\otimes A +
+(\alpha + \roman i\beta)^2
+\pmatrix\gamma^2+\delta^2&0\cr 0&\gamma^2+\delta^2\endpmatrix\otimes A^2
+\right)&\cdot\\
+\left(I_2\otimes I_n + 2(\alpha - \roman i\beta)
+\pmatrix\gamma&0\cr 0&\gamma\endpmatrix\otimes A +
+(\alpha - \roman i\beta)^2
+\pmatrix\gamma^2+\delta^2&0\cr 0&\gamma^2+\delta^2\endpmatrix\otimes A^2
+\right)&
+\endalign
+$$
+The product is a diagonal consiting of two $n\times n$ blocks, which are the
+same. The block can be rewritten as product:
+$$
+\align
+(I_n+(\alpha+\roman i\beta)(\gamma+\roman i\delta)A)\cdot
+(I_n+(\alpha+\roman i\beta)(\gamma-\roman i\delta)A)&\cdot\\
+(I_n+(\alpha-\roman i\beta)(\gamma+\roman i\delta)A)\cdot
+(I_n+(\alpha-\roman i\beta)(\gamma-\roman i\delta)A)&
+\endalign
+$$
+and after reordering
+$$
+\align
+(I_n+(\alpha+\roman i\beta)(\gamma+\roman i\delta)A)\cdot
+(I_n+(\alpha-\roman i\beta)(\gamma-\roman i\delta)A)&\cdot\\
+(I_n+(\alpha+\roman i\beta)(\gamma-\roman i\delta)A)\cdot
+(I_n+(\alpha-\roman i\beta)(\gamma+\roman i\delta)A)&=\\
+(I_n+2(\alpha\gamma-\beta\delta)A+
+(\alpha^2+\beta^2)(\gamma^2+\delta^2)A^2)&\cdot\\
+(I_n+2(\alpha\gamma+\beta\delta)A+
+(\alpha^2+\beta^2)(\gamma^2+\delta^2)A^2)&
+\endalign
+$$
+Now it suffices to compare $a_1=\alpha\gamma-\beta\delta$ and verify
+that 
+$$
+\align
+b_1^2 & = (\alpha^2+\beta^2)(\gamma^2+\delta^2)-a_1^2 =\cr
+      & = \alpha^2\gamma^2+\beta^2\gamma^2+\alpha^2\beta^2+\beta^2\delta^2-
+          (\alpha\gamma)^2+2\alpha\beta\gamma\delta-(\beta\delta)^2=\cr
+      & = (\beta\gamma)^2 + (\alpha\beta)^2 + 2\alpha\beta\gamma\delta=\cr
+      & = (\beta\gamma +\alpha\beta)^2
+\endalign
+$$
+For $b_2$ it is done in the same way.
+\qed
+\enddemo
+
+\head The Algorithm\endhead
+
+Below we define three functions of which
+$\vec(Y)=\solvi(1,\vec(\widehat{D}),i)$ provides the solution $Y$. $X$
+is then obtained as $X=U^TY\left(\otimesi V\right)$.
+
+\subhead Synopsis\endsubhead
+
+$F^T$ is $m\times m$ lower quasi-triangular matrix. Let $m_r$ be a
+number of real eigenvalues, $m_c$ number of complex pairs. Thus
+$m=m_r+2m_c$. Let $F_j$ denote
+$j$-th diagonal block of $F^T$ ($1\times 1$ or $2\times 2$ matrix) for
+$j=1,\ldots, m_r+m_c$. For a fixed $j$, let $\bar j$ denote index of the
+first column of $F_j$ in $F^T$. Whenever we write something like
+$(I_{m^i}\otimes I_n+r\iF\otimes K)x = d$, $x$ and $d$ denote column
+vectors of appropriate dimensions, and $x_{\bar j}$ is $\bar j$-th
+partition of $x$, and $x_j=(x_{\bar j}\quad x_{\bar j+1})^T$ if $j$-th
+eigenvalue is complex, and $x_j=x_{\bar j}$ if $j$-th eigenvalue is real.
+
+\subhead Function $\solvi$\endsubhead
+
+The function $x=\solvi(r,d,i)$ solves equation
+$$\left(I_{m^i}\otimes I_n+r\iF\otimes K\right)x = d.$$
+The function proceedes as follows:
+
+If $i=0$, the equation is solved directly, $K$ is upper
+quasi-triangular matrix, so this is easy.
+
+If $i>0$, then we go through diagonal blocks $F_j$ for
+$j=1,\ldots, m_r+m_c$ and perform:
+\roster
+\item if $F_j=(f_{\bar j\bar j}) = (f)$, then we return
+$x_j=\solvi(rf,d_{\bar j}, i-1)$. Then precalculate $y=d_j-x_j$, and
+eliminate guys below $F_j$. This is, for each $k=\bar j+1,\ldots, m$, we
+put
+$$d_k = d_k-rf_{\bar jk}\left(\imF\otimes K\right)x_{\bar j}=
+d_k - \frac{f_{\bar jk}}{f}y$$
+
+\item if $F_j=\pmatrix\alpha&\beta_1\cr -\beta_2&\alpha\endpmatrix$,
+we return $x_j=\solvii(r\alpha, r\beta_1, r\beta_2, d_j, i-1)$. Then
+we precalculate 
+$$y=\left(\pmatrix\alpha&-\beta_1\cr \beta_2&\alpha\endpmatrix
+\otimes I_{m^{i-1}}\otimes I_n\right)
+\pmatrix d_{\bar j} - x_{\bar j}\cr
+         d_{\bar j+1} - x_{\bar j+1}
+\endpmatrix$$
+and eliminate guys below $F_j$. This is, for each $k=\bar j+2,\ldots, n$
+we put
+$$
+\align
+d_k &= d_k - r(f_{{\bar j}k}\quad f_{{\bar j}+1 k})
+               \otimes\left(\imF\otimes K\right)x_j\\
+    &= d_k - \frac{1}{\alpha^2+\beta_1\beta_2}
+              \left((f_{{\bar j}k}\quad f_{{\bar j}+1 k})
+               \otimes I_{m^{i-1}}\otimes I_n\right)y
+\endalign
+$$
+\endroster
+
+\subhead Function $\solvii$\endsubhead
+
+The function $x=\solvii(\alpha, \beta_1, \beta_2, d, i)$ solves
+equation
+$$
+\left(I_2\otimes I_{m^i}\otimes I_n +
+\pmatrix\alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix
+\otimes\iF\otimes K\right)x=d
+$$
+
+According to {\bf Lemma 1} the function returns
+$$
+x=\pmatrix\solviip(\alpha,\beta_1\beta_2,\widehat{d_1},i)\cr
+          \solviip(\alpha,\beta_1\beta_2,\widehat{d_2},i)\endpmatrix,
+$$
+where $\widehat{d_1}$, and $\widehat{d_2}$ are partitions of
+$\widehat{d}$ from the lemma.
+
+\subhead Function $\solviip$\endsubhead
+
+The function $x=\solviip(\alpha,\beta^2,d,i)$ solves equation
+$$
+\left(I_{m^i}\otimes I_n + 2\alpha\iF\otimes K +
+(\alpha^2+\beta^2)\iF^2\otimes K^2\right)x = d
+$$ 
+The function proceedes as follows:
+
+If $i=0$, the matrix $I_n+2\alpha K+(\alpha^2+\beta^2)K^2$ is
+calculated and the solution is obtained directly.
+
+Now note that diagonal blocks of $F^{2T}$ are of the form $F_j^2$,
+since if the $F^T$ is block partitioned according to diagonal blocks,
+then it is lower triangular.
+
+If $i>0$, then we go through diagonal blocks $F_j$ for $j=1,\ldots, m_r+m_c$ 
+and perform:
+\roster
+\item if $F_j=(f_{\bar j\bar j})=(f)$ then $j$-th diagonal block of 
+$$
+I_{m^i}\otimes I_n + 2\alpha\iF\otimes K +
+(\alpha^2+\beta^2)\iF^2\otimes K^2
+$$
+takes the form
+$$
+I_{m^{i-1}}\otimes I_n +2\alpha f\imF\otimes K +
+(\alpha^2+\beta^2)f^2\imF^2\otimes K^2
+$$
+and we can put $x_j = \solviip(f\alpha,f^2\beta^2,d_j,i-1)$.
+
+Then we need to eliminate guys below $F_j$. Note that $|f^2|<|f|$,
+therefore we precalculate $y_2=(\alpha^2+\beta^2)f^2(\imF^2\otimes K^2)x_j$,
+and then precalculate
+$$y_1 = 2\alpha f(\imF\otimes K)x_j = d_j-x_j-y_2.$$
+Let $g_{pq}$ denote element of $F^{2T}$ at position $(q,p)$. 
+The elimination is done by going through $k=\bar j+1,\ldots, m$ and
+putting
+$$
+\align
+d_k &= d_k - \left(2\alpha f_{\bar j k}\imF\otimes K +
+(\alpha^2+\beta^2)g_{\bar j k}\imF^2\otimes K^2\right)x_j\\
+    &= d_k - \frac{f_{\bar j k}}{f}y_1 -
+       \frac{g_{\bar j k}}{f^2}y_2
+\endalign
+$$
+
+\item if $F_j=\pmatrix\gamma&\delta_1\cr -\delta_2&\gamma\endpmatrix$,
+then $j$-th diagonal block of
+$$
+I_{m^i}\otimes I_n + 2\alpha\iF\otimes K +
+(\alpha^2+\beta^2)\iF^2\otimes K^2
+$$
+takes the form
+$$
+I_{m^{i-1}}\otimes I_n +2\alpha
+\pmatrix\gamma&\delta_1\cr -\delta_2&\gamma\endpmatrix\imF\otimes K
++(\alpha^2+\beta^2)
+\pmatrix\gamma&\delta_1\cr -\delta_2&\gamma\endpmatrix^2\imF^2\otimes K^2
+$$
+According to {\bf Lemma 2}, we need to calculate
+$\widehat{d}_{\bar j}$, and $\widehat{d}_{\bar j+1}$, and $a_1$,
+$b_1$, $a_2$, $b_2$. Then we obtain
+$$
+\align
+x_{\bar j}&=
+ \solviip(a_1,b_1^2,\solviip(a_2,b_2^2,\widehat{d}_{\bar j},i-1),i-1)\\
+x_{\bar j+1}&=
+ \solviip(a_1,b_1^2,\solviip(a_2,b_2^2,\widehat{d}_{\bar j+1},i-1),i-1)
+\endalign
+$$
+
+Now we need to eliminate guys below $F_j$. Since $\Vert F^2_j\Vert <
+\Vert F_j\Vert$, we precalculate 
+$$
+\align
+y_2&=(\alpha^2+\beta^2)(\gamma^2+\delta^2)
+\left(I_2\otimes\imF^2\otimes K^2\right)x_j\\
+y_1&=2\alpha(\gamma^2+\delta^2)\left(I_2\otimes\imF\otimes
+K\right)x_j\\
+   &=(\gamma^2+\delta^2)
+      \left(F_j^{-1}
+         \otimes I_{m^{i-1}n}\right)
+      \left(d_j-x_j-\frac{1}{(\gamma^2+\delta^2)}
+        \left(
+         F_j^2
+         \otimes I_{m^{i-1}n}\right)y_2\right)\\
+   &=\left(\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix
+           \otimes I_{m^{i-1}n}\right)(d_j-x_j)
+     -\left(\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix
+           \otimes I_{m^{i-1}n}\right)y_2
+\endalign
+$$
+Then we go through all $k=\bar j+2,\ldots, m$. For clearer formulas, let
+$\bold f_k$ denote pair of $F^T$ elements in $k$-th line below $F_j$,
+this is $\bold f_k=(f_{\bar jk}\quad f_{\bar j+1k})$. And let $\bold g_k$
+denote the same for $F^{2T}$, this is $\bold g_k=(g_{\bar jk}\quad
+g_{\bar j+1k})$. For each $k$ we put
+$$
+\align
+d_k &= d_k - \left(2\alpha\bold f_k\otimes
+                   \imF\otimes K +
+                   (\alpha^2+\beta^2)\bold g_k\otimes
+                   \imF^2\otimes K^2\right)x_j\\
+    &= d_k - \frac{1}{\gamma^2+\delta^2}
+             \left(\bold f_k\otimes
+                   I_{m^{i-1}n}\right)y_1
+           - \frac{1}{\gamma^2+\delta^2}
+             \left(\bold g_k\otimes
+                   I_{m^{i-1}n}\right)y_2
+\endalign
+$$
+
+\endroster
+
+\head Final Notes\endhead
+
+\subhead Numerical Issues of $A^{-1}B$\endsubhead
+
+We began the solution of the Sylvester equation with multiplication by
+$A^{-1}$.  This can introduce numerical errors, and we need more
+numerically stable supplement. Its aim is to make $A$ and $B$
+commutative, this is we need to find a regular matrix $P$, such that
+$(PA)(PB)=(PB)(PA)$. Recall that this is neccessary in solution of
+$$
+(I_2\otimes I_{m^i}\otimes (PA)+(D+C)\otimes\,\iF\otimes (PB))x=d,
+$$
+since this equation is
+multiplied by $I_2\otimes I_{m^i}\otimes (PA)+(D-C)\otimes\,\iF\otimes PB$,
+and the diagonal
+result 
+$$
+I_2\otimes I_{m^i}\otimes (PAPA) + 2D\otimes\,\iF\otimes (PAPB) +
+(D^2-C^2)\otimes\,\iF^2\otimes (PBPB)
+$$
+is obtained only if
+$(PA)(PB)=(PB)(PA)$.
+
+Finding regular solution of $(PA)(PB)=(PB)(PA)$ is equivalent to
+finding regular solution of $APB-BPA=0$. Numerical error of the former
+equation is $P$-times greater than the numerical error of the latter
+equation. And the numerical error of the latter equation also grows
+with the size of $P$. On the other hand, truncation error in $P$
+multiplication decreases with growing the size of $P$. By intuition,
+stability analysis will show that the best choice is some orthonormal
+$P$.
+
+Obviously, since $A$ is regular, the equation $APB-BPA=0$ has solution
+of the form $P=\alpha A^{-1}$, where $\alpha\neq 0$. There is a vector
+space of all solutions $P$ (including singular ones). In precise
+arithmetics, its dimension is $\sum n^2_i$, where $n_i$ is number of
+repetitions of $i$-th eigenvalue of $A^{-1}B$ which is similar to
+$BA^{-1}$. In floating point arithmetics, without any further
+knowledge about $A$, and $B$, we are only sure about dimension $n$
+which is implied by similarity of $A^{-1}B$ and $BA^{-1}$. Now we try
+to find the base of the vector space of solutions.
+
+Let $L$ denote the following linear operator:
+$$L(X)=(AXB-BXA)^T.$$
+
+Let $\vec(X)$ denote a vector made by stacking all the
+columns of $X$. Let $T_n$ denote $n^2\times n^2$ matrix representing
+operator $\vec(X)\mapsto \vec(X^T)$. And
+finally let $M$ denote $n^2\times n^2$ matrix represening the operator
+$L$. It is not difficult to verify that:
+$$M=T_n(B^T\otimes A - A^T\otimes B)$$
+Now we show that $M$ is skew symmetric. Recall that $T_n(X\otimes
+Y)=(Y\otimes X)T_n$, we have:
+$$M^T=(B^T\otimes A - A^T\otimes B)^TT_n=(B\otimes A^T - A\otimes B^T)T_n=
+T_n(A^T\otimes B - B^T\otimes A) = -M$$
+
+We try to solve $M\vec(X) = T_n(0) = 0$. Since $M$ is
+skew symmetric, there is real orthonormal matrix $Q$, such that
+$M=Q\widehat{M}Q^T$, and $\widehat{M}$ is block diagonal matrix
+consisting of $2\times 2$ blocks of the form
+$$\left(\matrix 0&\alpha_i\cr-\alpha_i&0\endmatrix\right),$$
+and of additional zero, if $n^2$ is odd.
+
+Now we solve equation $\widehat{M}y=0$, where $y=Q^T\vec(X)$. Now
+there are $n$ zero rows in $\widehat{M}$ coming from similarity of
+$A^{-1}B$ and $BA^{-1}$ (structural zeros). Note that the additional
+zero for odd $n^2$ is already included in that number, since for odd
+$n^2$ is $n^2-n$ even. Besides those, there are also zeros (esp. in
+floating point arithmetics), coming from repetitive (or close)
+eigenvalues of $A^{-1}B$. If we are able to select the rows with the
+structural zeros, a solution is obtained by picking arbitrary numbers
+for the same positions in $y$, and put $\vec(X)=Qy$.
+
+The following questions need to be answered:
+\roster
+\item How to recognize the structural rows?
+\item Is $A^{-1}$ generated by a $y$, which has non-zero elements only
+on structural rows? Note that $A$ can have repetitive eigenvalues. The
+positive answer to the question implies that in each $n$-partition of
+$y$ there is exactly one structural row.
+\item And very difficult one: How to pick $y$ so that $X$ would be
+regular, or even close to orthonormal (pure orthonormality
+overdeterminates $y$)?
+\endroster
+
+\subhead Making Zeros in $F$\endsubhead
+
+It is clear that the numerical complexity of the proposed algorithm
+strongly depends on a number of non-zero elements in the Schur factor
+$F$. If we were able to find $P$, such that $PFP^{-1}$ has
+substantially less zeros than $F$, then the computation would be
+substantially faster. However, it seems that we have to pay price for
+any additional zero in terms of less numerical stability of $PFP^{-1}$
+multiplication. Consider $P$, and $F$ in form
+$$P=\pmatrix I &X\cr 0&I\endpmatrix,
+\qquad F=\pmatrix A&C\cr 0&B\endpmatrix$$
+we obtain
+$$PFP^{-1}=\pmatrix A& C + XB - AX\cr 0&B \endpmatrix$$
+
+Thus, we need to solve $C = AX - XB$. Its clear that numerical
+stability of operator $Y\mapsto PYP^{-1}$ and its inverse $Y\mapsto
+P^{-1}YP$ is worse with growing norm $\Vert X\Vert$. The norm can be
+as large as $\Vert F\Vert/\delta$, where $\delta$ is a distance of
+eigenspectra of $A$ and $B$. Also, a numerical error of the solution is
+proportional to $\Vert C\Vert/\delta$.
+
+Although, these difficulties cannot be overcome completely, we may
+introduce an algorithm, which works on $F$ with ordered eigenvalues on
+diagonal, and seeks such partitioning to maximize $\delta$ and
+minimize $C$. If the partitioning is found, the algorithm finds $P$
+and then is run for $A$ and $B$ blocks. It stops when further
+partitioning is not possible without breaking some user given limit
+for numerical errors. We have to keep in mind that the numerical
+errors are accumulated in product of all $P$'s of every step.
+
+\subhead Exploiting constant rows in $F$\endsubhead
+
+If some of $F$'s rows consists of the same numbers, or a number of
+distict values within a row is small, then this structure can be
+easily exploited in the algorithm. Recall, that in both functions
+$\solvi$, and $\solviip$, we eliminate guys below diagonal element (or
+block) (of $F^T$), by multiplying solution of the diagonal and
+cancelling it from right side. If the elements below the diagonal
+block are the same, we save one vector multiplication. Note that in
+$\solviip$ we still need to multiply by elements below diagonal of the
+matrix $F^{T2}$, which obviously has not the property. However, the
+heaviest elimination is done at the very top level, in the first call
+to $\solvi$.
+
+Another way of exploitation the property is to proceed all
+calculations in complex numbers. In that case, only $\solvi$ is run.
+
+How the structure can be introduced into the matrix? Following the
+same notation as in previous section, we solve $C = AX - XB$ in order
+to obtain zeros at place of $C$. If it is not possible, we may relax
+the equation by solving $C - R = AX - XB$, where $R$ is suitable
+matrix with constant rows. The matrix $R$ minimizes $\Vert C-R\Vert$
+in order to minimize $\Vert X\Vert$ if $A$, and $B$ are given. Now, in
+the next step we need to introduce zeros (or constant rows) to matrix
+$A$, so we seek for regular matrix $P$, doing the
+job. If found, the product looks like:
+$$\pmatrix P&0\cr 0&I\endpmatrix\pmatrix A&R\cr 0&B\endpmatrix
+\pmatrix P^{-1}&0\cr 0&I\endpmatrix=
+\pmatrix PAP^{-1}&PR\cr 0&B\endpmatrix$$
+Now note, that matrix $PR$ has also constant rows. Thus,
+preconditioning of the matrix in upper left corner doesn't affect the
+property. However, a preconditioning of the matrix in lower right
+corner breaks the property, since we would obtain $RP^{-1}$.
+
+
+\enddocument
diff --git a/dynare++/sylv/testing/MMMatrix.cpp b/dynare++/sylv/testing/MMMatrix.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b3c699da913f99345445bb36694b7f3046d9fb6d
--- /dev/null
+++ b/dynare++/sylv/testing/MMMatrix.cpp
@@ -0,0 +1,71 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/testing/MMMatrix.cpp,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "MMMatrix.h"
+
+#include <stdio.h>
+#include <string.h>
+
+MMMatrixIn::MMMatrixIn(const char* fname)
+{
+	FILE* fd;
+	if (NULL==(fd = fopen(fname,"r")))
+		throw MMException(string("Cannot open file ")+fname+" for reading\n");
+
+	char buffer[1000];
+	// jump over initial comments
+	while (fgets(buffer, 1000, fd) && strncmp(buffer, "%%", 2)) {}
+	// read in number of rows and cols
+	if (!fgets(buffer, 1000, fd))
+		throw MMException(string("Cannot read rows and cols while reading ")+fname+"\n");
+	if (2 != sscanf(buffer, "%d %d", &rows, &cols))
+		throw MMException("Couldn't parse rows and cols\n");
+	// read in data
+	data = (double*) operator new[](rows*cols*sizeof(double));
+	int len = rows*cols;
+	int i = 0;
+	while (fgets(buffer, 1000, fd) && i < len) {
+		if (1 != sscanf(buffer, "%lf", &data[i]))
+			throw MMException(string("Couldn't parse float number ")+buffer+"\n");
+		i++;
+	}
+	if (i < len) {
+		char mes[1000];
+		sprintf(mes,"Couldn't read all %d lines, read %d so far\n",len,i);
+		throw MMException(mes);
+	}
+	fclose(fd);
+}
+
+MMMatrixIn::~MMMatrixIn()
+{
+	operator delete [](data);
+}
+
+
+void MMMatrixOut::write(const char* fname, int rows, int cols, const double* data)
+{
+	FILE* fd;
+	if (NULL==(fd = fopen(fname,"w")))
+		throw MMException(string("Cannot open file ")+fname+" for writing\n");
+
+	if (0 > fprintf(fd, "%%%%MatrixMarket matrix array real general\n"))
+		throw MMException(string("Output error when writing file ")+fname);
+	if (0 > fprintf(fd, "%d %d\n", rows, cols))
+		throw MMException(string("Output error when writing file ")+fname);
+	int running = 0;
+	for (int i = 0; i < cols; i++) {
+		for (int j = 0 ; j < rows; j++) {
+			if (0 > fprintf(fd, "%40.35g\n", data[running]))
+				throw MMException(string("Output error when writing file ")+fname);
+			running++;
+		}
+	}
+	fclose(fd);
+}
+
+void MMMatrixOut::write(const char* fname, const GeneralMatrix& m)
+{
+	write(fname, m.numRows(), m.numCols(), m.base());
+}
diff --git a/dynare++/sylv/testing/MMMatrix.h b/dynare++/sylv/testing/MMMatrix.h
new file mode 100644
index 0000000000000000000000000000000000000000..e30842caf544d40816ac9671f2e08a9196502075
--- /dev/null
+++ b/dynare++/sylv/testing/MMMatrix.h
@@ -0,0 +1,46 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/testing/MMMatrix.h,v 1.1.1.1 2004/06/04 13:01:13 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#ifndef MM_MATRIX_H
+#define MM_MATRIX_H
+
+#include "GeneralMatrix.h"
+#include "SylvMemory.h"
+
+#include <string>
+
+using namespace std;
+
+class MMException : public MallocAllocator {
+	string message;
+public:
+	MMException(string mes) : message(mes) {}
+	MMException(const char* mes) : message(mes) {}
+	const char* getMessage() const {return message.data();}
+};
+
+class MMMatrixIn : public MallocAllocator {
+	double* data;
+	int rows;
+	int cols;
+public:
+	MMMatrixIn(const char* fname);
+	~MMMatrixIn();
+	const double* getData() const {return data;}
+	int size() const {return rows*cols;}
+	int row() const {return rows;}
+	int col() const {return cols;}
+};
+
+class MMMatrixOut : public MallocAllocator {
+public:
+	static void write(const char* fname, int rows, int cols, const double* data);
+	static void write(const char* fname, const GeneralMatrix& m);
+};
+
+#endif /* MM_MATRIX_H */
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/sylv/testing/Makefile b/dynare++/sylv/testing/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..de2b601e544e011db28a4d147f382ba209cca9a7
--- /dev/null
+++ b/dynare++/sylv/testing/Makefile
@@ -0,0 +1,36 @@
+# $Header: /var/lib/cvs/dynare_cpp/sylv/testing/Makefile,v 1.2 2004/09/28 16:16:43 kamenik Exp $
+
+# Tag $Name:  $
+
+LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c
+CC_FLAGS := -Wall -I../cc
+CC_DEFS := #-DUSE_MEMORY_POOL
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g
+	CC_DEFS := $(CC_DEFS) -DDEBUG
+else
+	CC_FLAGS := $(CC_FLAGS) -O2
+endif
+ 
+objects := $(patsubst %.cpp,%.o,$(wildcard ../cc/*.cpp)) 
+headers := $(wildcard ../cc/*.h)
+ 
+clear:
+	make -C ../cc clear
+	rm -f tests.exe
+	rm -f tests
+	rm -f *.o
+
+../cc/%.o : ../cc/%.cpp $(headers)
+	make EXTERN_DEFS="$(CC_DEFS)" -C ../cc $*.o
+
+%.o : %.cpp $(headers)
+	$(CC) $(CC_FLAGS) $(CC_DEFS) -c $*.cpp 
+
+# untar testing data
+tdata.done : tdata.tgz
+	tar -xzf tdata.tgz
+	touch tdata.done
+
+test: $(objects) tests.o MMMatrix.o tdata.done
+	$(CC) $(CC_FLAGS) -o test $(objects) tests.o MMMatrix.o $(LD_LIBS)
diff --git a/dynare++/sylv/testing/tdata.tgz b/dynare++/sylv/testing/tdata.tgz
new file mode 100644
index 0000000000000000000000000000000000000000..e659354f2d439d5dbe9064724793d8715e91ed45
Binary files /dev/null and b/dynare++/sylv/testing/tdata.tgz differ
diff --git a/dynare++/sylv/testing/tests.cpp b/dynare++/sylv/testing/tests.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..27a3f01202962f331a56e8b8ce9c82988faaac77
--- /dev/null
+++ b/dynare++/sylv/testing/tests.cpp
@@ -0,0 +1,1022 @@
+/* $Header: /var/lib/cvs/dynare_cpp/sylv/testing/tests.cpp,v 1.2 2004/07/05 19:55:48 kamenik Exp $ */
+
+/* Tag $Name:  $ */
+
+#include "SylvException.h"
+#include "QuasiTriangular.h"
+#include "QuasiTriangularZero.h"
+#include "Vector.h"
+#include "KronVector.h"
+#include "KronUtils.h"
+#include "TriangularSylvester.h"
+#include "GeneralSylvester.h"
+#include "SylvMemory.h"
+#include "SchurDecompEig.h"
+#include "SimilarityDecomp.h"
+#include "IterativeSylvester.h"
+#include "SylvMatrix.h"
+
+#include "MMMatrix.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+
+#include <cmath>
+
+class TestRunnable : public MallocAllocator {
+	char name[100];
+	static double eps_norm;
+public:
+	TestRunnable(const char* n){strncpy(name, n, 100);}
+	bool test() const;
+	virtual bool run() const =0;
+	const char* getName() const {return name;}
+protected:
+	// declaration of auxiliary static methods
+	static bool quasi_solve(bool trans, const char* mname, const char* vname);
+	static bool mult_kron(bool trans, const char* mname, const char* vname,
+						  const char* cname, int m, int n, int depth);
+	static bool level_kron(bool trans, const char* mname, const char* vname,
+						   const char* cname, int level, int m, int n, int depth);
+	static bool kron_power(const char* m1name, const char* m2name, const char* vname,
+						   const char* cname, int m, int n, int depth);
+	static bool lin_eval(const char* m1name, const char* m2name, const char* vname,
+						 const char* cname, int m, int n, int depth,
+						 double alpha, double beta1, double beta2);
+	static bool qua_eval(const char* m1name, const char* m2name, const char* vname,
+						 const char* cname, int m, int n, int depth,
+						 double alpha, double betas, double gamma,
+						 double delta1, double delta2);
+	static bool tri_sylv(const char* m1name, const char* m2name, const char* vname,
+						 int m, int n, int depth);
+	static bool gen_sylv(const char* aname, const char* bname, const char* cname,
+						 const char* dname, int m, int n, int order);
+	static bool eig_bubble(const char* aname, int from, int to);
+	static bool block_diag(const char* aname, double log10norm = 3.0);
+	static bool iter_sylv(const char* m1name, const char* m2name, const char* vname,
+						  int m, int n, int depth);
+};
+
+double TestRunnable::eps_norm = 1.0e-10;
+
+bool TestRunnable::test() const
+{
+	printf("Running test <%s>\n",name);
+	clock_t start = clock();
+	bool passed = run();
+	clock_t end = clock();
+	printf("CPU time %8.4g (CPU seconds)..................",
+		   ((double)(end-start))/CLOCKS_PER_SEC);
+	if (passed) {
+		printf("passed\n\n");
+		return passed;
+	} else {
+		printf("FAILED\n\n");
+		return passed;
+	}
+}
+
+/**********************************************************/
+/*   auxiliary methods                                    */
+/**********************************************************/
+
+bool TestRunnable::quasi_solve(bool trans, const char* mname, const char* vname)
+{
+	MMMatrixIn mmt(mname);
+	MMMatrixIn mmv(vname);
+
+	SylvMemoryDriver memdriver(1, mmt.row(), mmt.row(), 1);
+	QuasiTriangular* t;
+	QuasiTriangular* tsave;
+	if (mmt.row()==mmt.col()) {
+		t = new QuasiTriangular(mmt.getData(), mmt.row());
+		tsave = new QuasiTriangular(*t);
+	} else if (mmt.row()>mmt.col()) {
+		t = new QuasiTriangularZero(mmt.row()-mmt.col(), mmt.getData(), mmt.col());
+		tsave = new QuasiTriangularZero((const QuasiTriangularZero&)*t);
+	} else {
+		printf("  Wrong quasi triangular dimensions, rows must be >= cols.\n");
+		return false;
+	}
+	ConstVector v(mmv.getData(), mmv.row());
+	Vector x(v.length());
+	double eig_min = 1.0e20;
+	if (trans)
+		t->solveTrans(x, v, eig_min);
+	else
+		t->solve(x, v, eig_min);
+	printf("eig_min = %8.4g\n", eig_min);
+	Vector xx(v.length());
+	if (trans)
+		tsave->multVecTrans(xx, ConstVector(x));
+	else
+		tsave->multVec(xx, ConstVector(x));
+	delete tsave;
+	delete t;
+	xx.add(-1.0, v);
+	xx.add(1.0, x);
+	double norm = xx.getNorm();
+	printf("\terror norm = %8.4g\n",norm);
+	return (norm < eps_norm);
+}
+
+bool TestRunnable::mult_kron(bool trans, const char* mname, const char* vname,
+							 const char* cname, int m, int n, int depth)
+{
+	MMMatrixIn mmt(mname);
+	MMMatrixIn mmv(vname);
+	MMMatrixIn mmc(cname);
+
+	int length = power(m,depth)*n;
+	if (mmt.row() != m ||
+		mmv.row() != length ||
+		mmc.row() != length) {
+		printf("  Incompatible sizes for krom mult action, len=%d, matrow=%d, m=%d, vrow=%d, crow=%d \n",length,mmt.row(), m, mmv.row(), mmc.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(1, m, n, depth);
+	QuasiTriangular t(mmt.getData(), mmt.row());
+	Vector vraw(mmv.getData(), mmv.row());
+	KronVector v(vraw, m, n, depth);
+	Vector craw(mmc.getData(), mmc.row());
+	KronVector c(craw, m, n, depth);
+	if (trans)
+		t.multKronTrans(v);
+	else
+		t.multKron(v);
+	c.add(-1.0, v);
+	double norm = c.getNorm();
+	printf("\terror norm = %8.4g\n",norm);
+	return (norm < eps_norm);
+}
+
+bool TestRunnable::level_kron(bool trans, const char* mname, const char* vname,
+							  const char* cname, int level, int m, int n, int depth)
+{
+	MMMatrixIn mmt(mname);
+	MMMatrixIn mmv(vname);
+	MMMatrixIn mmc(cname);
+
+	int length = power(m,depth)*n;
+	if (level > 0 && mmt.row() != m ||
+		level == 0 && mmt.row() != n ||
+		mmv.row() != length ||
+		mmc.row() != length) {
+		printf("  Incompatible sizes for krom mult action, len=%d, matrow=%d, m=%d, n=%d, vrow=%d, crow=%d \n",length, mmt.row(), m, n, mmv.row(), mmc.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(1, m, n, depth);
+	QuasiTriangular t(mmt.getData(), mmt.row());
+	Vector vraw(mmv.getData(), mmv.row());
+	ConstKronVector v(vraw, m, n, depth);
+	Vector craw(mmc.getData(), mmc.row());
+	KronVector c(craw, m, n, depth);
+	KronVector x(v);
+	if (trans)
+		KronUtils::multAtLevelTrans(level, t, x);
+	else
+		KronUtils::multAtLevel(level, t, x);
+	x.add(-1, c);
+	double norm = x.getNorm();
+	printf("\terror norm = %8.4g\n",norm);
+	return (norm < eps_norm);
+}
+
+bool TestRunnable::kron_power(const char* m1name, const char* m2name, const char* vname,
+							  const char* cname, int m, int n, int depth)
+{
+	MMMatrixIn mmt1(m1name);
+	MMMatrixIn mmt2(m2name);
+	MMMatrixIn mmv(vname);
+	MMMatrixIn mmc(cname);
+
+	int length = power(m,depth)*n;
+	if (mmt1.row() != m ||
+		mmt2.row() != n ||
+		mmv.row() != length ||
+		mmc.row() != length) {
+		printf("  Incompatible sizes for krom power mult action, len=%d, row1=%d, row2=%d, m=%d, n=%d, vrow=%d, crow=%d \n",length,mmt1.row(), mmt2.row(), m, n, mmv.row(), mmc.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(2, m, n, depth);
+	QuasiTriangular t1(mmt1.getData(), mmt1.row());
+	QuasiTriangular t2(mmt2.getData(), mmt2.row());
+	Vector vraw(mmv.getData(), mmv.row());
+	ConstKronVector v(vraw, m, n, depth);
+	Vector craw(mmc.getData(), mmc.row());
+	KronVector c(craw, m, n, depth);
+	KronVector x(v);
+	memdriver.setStackMode(true);
+	KronUtils::multKron(t1, t2, x);
+	memdriver.setStackMode(false);
+	x.add(-1, c);
+	double norm = x.getNorm();
+	printf("\terror norm = %8.4g\n",norm);
+	return (norm < eps_norm);
+}
+
+bool TestRunnable::lin_eval(const char* m1name, const char* m2name, const char* vname,
+							const char* cname, int m, int n, int depth,
+							double alpha, double beta1, double beta2)
+{
+	MMMatrixIn mmt1(m1name);
+	MMMatrixIn mmt2(m2name);
+	MMMatrixIn mmv(vname);
+	MMMatrixIn mmc(cname);
+
+	int length = power(m,depth)*n;
+	if (mmt1.row() != m ||
+		mmt2.row() != n ||
+		mmv.row() != 2*length ||
+		mmc.row() != 2*length) {
+		printf("  Incompatible sizes for lin eval action, len=%d, row1=%d, row2=%d, m=%d, n=%d, vrow=%d, crow=%d \n",length,mmt1.row(), mmt2.row(), m, n, mmv.row(), mmc.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(1, m, n, depth);
+	QuasiTriangular t1(mmt1.getData(), mmt1.row());
+	QuasiTriangular t2(mmt2.getData(), mmt2.row());
+	TriangularSylvester ts(t2, t1);
+	Vector vraw1(mmv.getData(), length);
+	ConstKronVector v1(vraw1, m, n, depth);
+	Vector vraw2(mmv.getData()+length, length);
+	ConstKronVector v2(vraw2, m, n, depth);
+	Vector craw1(mmc.getData(), length);
+	KronVector c1(craw1, m, n, depth);
+	Vector craw2(mmc.getData()+length, length);
+	KronVector c2(craw2, m, n, depth);
+	KronVector x1(m, n, depth);
+	KronVector x2(m, n, depth);
+	memdriver.setStackMode(true);
+	ts.linEval(alpha, beta1, beta2, x1, x2, v1, v2);
+	memdriver.setStackMode(false);
+	x1.add(-1, c1);
+	x2.add(-1, c2);
+	double norm1 = x1.getNorm();
+	double norm2 = x2.getNorm();
+	printf("\terror norm1 = %8.4g\n\terror norm2 = %8.4g\n",norm1,norm2);
+	return (norm1*norm1+norm2*norm2 < eps_norm*eps_norm);
+}
+
+
+bool TestRunnable::qua_eval(const char* m1name, const char* m2name, const char* vname,
+							const char* cname, int m, int n, int depth,
+							double alpha, double betas, double gamma,
+							double delta1, double delta2)
+{
+	MMMatrixIn mmt1(m1name);
+	MMMatrixIn mmt2(m2name);
+	MMMatrixIn mmv(vname);
+	MMMatrixIn mmc(cname);
+
+	int length = power(m,depth)*n;
+	if (mmt1.row() != m ||
+		mmt2.row() != n ||
+		mmv.row() != 2*length ||
+		mmc.row() != 2*length) {
+		printf("  Incompatible sizes for qua eval action, len=%d, row1=%d, row2=%d, m=%d, n=%d, vrow=%d, crow=%d \n",length,mmt1.row(), mmt2.row(), m, n, mmv.row(), mmc.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(3, m, n, depth);
+	QuasiTriangular t1(mmt1.getData(), mmt1.row());
+	QuasiTriangular t2(mmt2.getData(), mmt2.row());
+	TriangularSylvester ts(t2, t1);
+	Vector vraw1(mmv.getData(), length);
+	ConstKronVector v1(vraw1, m, n, depth);
+	Vector vraw2(mmv.getData()+length, length);
+	ConstKronVector v2(vraw2, m, n, depth);
+	Vector craw1(mmc.getData(), length);
+	KronVector c1(craw1, m, n, depth);
+	Vector craw2(mmc.getData()+length, length);
+	KronVector c2(craw2, m, n, depth);
+	KronVector x1(m, n, depth);
+	KronVector x2(m, n, depth);
+	memdriver.setStackMode(true);
+	ts.quaEval(alpha, betas, gamma, delta1, delta2, x1, x2, v1, v2);
+	memdriver.setStackMode(false);
+	x1.add(-1, c1);
+	x2.add(-1, c2);
+	double norm1 = x1.getNorm();
+	double norm2 = x2.getNorm();
+	printf("\terror norm1 = %8.4g\n\terror norm2 = %8.4g\n",norm1,norm2);
+	return (norm1*norm1+norm2*norm2 < 100*eps_norm*eps_norm); // relax norm
+}
+
+bool TestRunnable::tri_sylv(const char* m1name, const char* m2name, const char* vname,
+							int m, int n, int depth)
+{
+	MMMatrixIn mmt1(m1name);
+	MMMatrixIn mmt2(m2name);
+	MMMatrixIn mmv(vname);
+
+	int length = power(m,depth)*n;
+	if (mmt1.row() != m ||
+		mmt2.row() != n ||
+		mmv.row() != length) {
+		printf("  Incompatible sizes for triangular sylvester action, len=%d, row1=%d, row2=%d, m=%d, n=%d, vrow=%d\n",length,mmt1.row(), mmt2.row(), m, n, mmv.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(4, m, n, depth); // need extra 2 for checks done via KronUtils::multKron
+	memdriver.setStackMode(true);
+	QuasiTriangular t1(mmt1.getData(), mmt1.row());
+	QuasiTriangular t2(mmt2.getData(), mmt2.row());
+	TriangularSylvester ts(t2, t1);
+	Vector vraw(mmv.getData(), length);
+	ConstKronVector v(vraw, m, n, depth);
+	KronVector d(v); // copy of v
+	SylvParams pars;
+	ts.solve(pars, d);
+	pars.print("\t");
+	KronVector dcheck((const KronVector&)d);
+	KronUtils::multKron(t1, t2, dcheck);
+	dcheck.add(1.0, d);
+	dcheck.add(-1.0, v);
+	double norm = dcheck.getNorm();
+	double xnorm = v.getNorm();
+	printf("\trel. error norm = %8.4g\n",norm/xnorm);
+	double max = dcheck.getMax();
+	double xmax = v.getMax();
+	printf("\trel. error max = %8.4g\n", max/xmax);
+	memdriver.setStackMode(false);
+	return (norm < xnorm*eps_norm);
+}
+
+bool TestRunnable::gen_sylv(const char* aname, const char* bname, const char* cname,
+							const char* dname, int m, int n, int order)
+{
+	MMMatrixIn mma(aname);
+	MMMatrixIn mmb(bname);
+	MMMatrixIn mmc(cname);
+	MMMatrixIn mmd(dname);
+
+	if (m != mmc.row() || m != mmc.col() ||
+		n != mma.row() || n != mma.col() ||
+		n != mmb.row() || n <  mmb.col() ||
+		n != mmd.row() || power(m, order) != mmd.col()) {
+		printf("  Incompatible sizes for gen_sylv.\n");
+		return false;
+	}
+
+	SylvParams ps(true);
+	GeneralSylvester gs(order, n, m, n-mmb.col(),
+						mma.getData(), mmb.getData(),
+						mmc.getData(), mmd.getData(),
+						ps);
+	gs.solve();
+	gs.check(mmd.getData());
+	const SylvParams& pars = gs.getParams();
+	pars.print("\t");
+	return (*(pars.mat_err1) < eps_norm && *(pars.mat_errI) < eps_norm &&
+			*(pars.mat_errF) < eps_norm && *(pars.vec_err1) < eps_norm &&
+			*(pars.vec_errI) < eps_norm);
+}
+
+bool TestRunnable::eig_bubble(const char* aname, int from, int to)
+{
+	MMMatrixIn mma(aname);
+
+	if (mma.row() != mma.col()) {
+		printf("  Matrix is not square\n");
+		return false;
+	}
+
+	int n = mma.row();
+	SylvMemoryDriver memdriver(3, n, n, 2);
+	QuasiTriangular orig(mma.getData(), n);
+	SchurDecompEig dec((const QuasiTriangular&)orig);
+	QuasiTriangular::diag_iter itf = dec.getT().diag_begin();
+	QuasiTriangular::diag_iter itt = dec.getT().diag_begin();
+	for (int i = 0; i < from; i++)
+		++itf;
+	for (int i = 0; i < to; i++)
+		++itt;
+	itt = dec.bubbleEigen(itf, itt);
+	SqSylvMatrix check(dec.getQ(), dec.getT());
+	check.multRightTrans(dec.getQ());
+	check.add(-1, orig);
+	double norm1 = check.getNorm1();
+	double normInf = check.getNormInf();
+	double onorm1 = orig.getNorm1();
+	double onormInf = orig.getNormInf();
+	printf("\tabs. error1 = %8.4g\n", norm1);
+	printf("\tabs. errorI = %8.4g\n", normInf);
+	printf("\trel. error1 = %8.4g\n", norm1/onorm1);
+	printf("\trel. errorI = %8.4g\n", normInf/onormInf);
+	return (norm1 < eps_norm*onorm1 && normInf < eps_norm*onormInf);
+}
+
+bool TestRunnable::block_diag(const char* aname, double log10norm)
+{
+	MMMatrixIn mma(aname);
+
+	if (mma.row() != mma.col()) {
+		printf("  Matrix is not square\n");
+		return false;
+	}
+
+	int n = mma.row();
+	SylvMemoryDriver memdriver(3, n, n, 2);
+	SqSylvMatrix orig(mma.getData(), n);
+	SimilarityDecomp dec(orig.base(), orig.numRows(), log10norm);
+	dec.getB().printInfo();
+	SqSylvMatrix check(dec.getQ(), dec.getB());
+	check.multRight(dec.getInvQ());
+	check.add(-1, orig);
+	double norm1 = check.getNorm1();
+	double normInf = check.getNormInf();
+	double onorm1 = orig.getNorm1();
+	double onormInf = orig.getNormInf();
+	printf("\terror Q*B*invQ:\n");
+	printf("\tabs. error1 = %8.4g\n", norm1);
+	printf("\tabs. errorI = %8.4g\n", normInf);
+	printf("\trel. error1 = %8.4g\n", norm1/onorm1);
+	printf("\trel. errorI = %8.4g\n", normInf/onormInf);
+	SqSylvMatrix check2(dec.getQ(), dec.getInvQ());
+	SqSylvMatrix in(n);
+	in.setUnit();
+	check2.add(-1, in);
+	double nor1 = check2.getNorm1();
+	double norInf = check2.getNormInf();
+	printf("\terror Q*invQ:\n");
+	printf("\tabs. error1 = %8.4g\n", nor1);
+	printf("\tabs. errorI = %8.4g\n", norInf);
+	return (norm1 < eps_norm*pow(10, log10norm)*onorm1);
+}
+
+bool TestRunnable::iter_sylv(const char* m1name, const char* m2name, const char* vname,
+							 int m, int n, int depth)
+{
+	MMMatrixIn mmt1(m1name);
+	MMMatrixIn mmt2(m2name);
+	MMMatrixIn mmv(vname);
+
+	int length = power(m,depth)*n;
+	if (mmt1.row() != m ||
+		mmt2.row() != n ||
+		mmv.row() != length) {
+		printf("  Incompatible sizes for triangular sylvester iteration, len=%d, row1=%d, row2=%d, m=%d, n=%d, vrow=%d\n",length,mmt1.row(), mmt2.row(), m, n, mmv.row());
+		return false;
+	}
+
+	SylvMemoryDriver memdriver(4, m, n, depth); // need extra 2 for checks done via KronUtils::multKron
+	memdriver.setStackMode(true);
+	QuasiTriangular t1(mmt1.getData(), mmt1.row());
+	QuasiTriangular t2(mmt2.getData(), mmt2.row());
+	IterativeSylvester is(t2, t1);
+	Vector vraw(mmv.getData(), length);
+	ConstKronVector v(vraw, m, n, depth);
+	KronVector d(v); // copy of v
+	SylvParams pars;
+	pars.method = SylvParams::iter;
+	is.solve(pars, d);
+	pars.print("\t");
+	KronVector dcheck((const KronVector&)d);
+	KronUtils::multKron(t1, t2, dcheck);
+	dcheck.add(1.0, d);
+	dcheck.add(-1.0, v);
+	double cnorm = dcheck.getNorm();
+	double xnorm = v.getNorm();
+	printf("\trel. error norm = %8.4g\n",cnorm/xnorm);
+	double max = dcheck.getMax();
+	double xmax = v.getMax();
+	printf("\trel. error max = %8.4g\n", max/xmax);
+	memdriver.setStackMode(false);
+	return (cnorm < xnorm*eps_norm);
+}
+
+/**********************************************************/
+/*   sub classes declarations                             */
+/**********************************************************/
+
+class PureTriangTest : public TestRunnable {
+public:
+	PureTriangTest() : TestRunnable("pure triangular solve (5)") {}
+	bool run() const;
+};
+
+class PureTriangTransTest : public TestRunnable {
+public:
+	PureTriangTransTest() : TestRunnable("pure triangular solve trans (5)") {}
+	bool run() const;
+};
+
+class PureTrLargeTest : public TestRunnable {
+public:
+	PureTrLargeTest() : TestRunnable("pure triangular large solve (300)") {}
+	bool run() const;
+};
+
+class PureTrLargeTransTest : public TestRunnable {
+public:
+	PureTrLargeTransTest() : TestRunnable("pure triangular large solve trans (300)") {}
+	bool run() const;
+};
+
+class QuasiTriangTest : public TestRunnable {
+public:
+	QuasiTriangTest() : TestRunnable("quasi triangular solve (7)") {}
+	bool run() const;
+};
+
+class QuasiTriangTransTest : public TestRunnable {
+public:
+	QuasiTriangTransTest() : TestRunnable("quasi triangular solve trans (7)") {}
+	bool run() const;
+};
+
+class QuasiTrLargeTest : public TestRunnable {
+public:
+	QuasiTrLargeTest() : TestRunnable("quasi triangular solve large (250)") {}
+	bool run() const;
+};
+
+class QuasiTrLargeTransTest : public TestRunnable {
+public:
+	QuasiTrLargeTransTest() : TestRunnable("quasi triangular solve large trans (250)") {}
+	bool run() const;
+};
+
+class QuasiZeroSmallTest : public TestRunnable {
+public:
+	QuasiZeroSmallTest() : TestRunnable("quasi tr. zero small test (2x1)") {}
+	bool run() const;
+};
+
+class MultKronSmallTest : public TestRunnable {
+public:
+	MultKronSmallTest() : TestRunnable("kronecker small mult (2=2x1)") {}
+	bool run() const;
+};
+
+class MultKronTest : public TestRunnable {
+public:
+	MultKronTest() : TestRunnable("kronecker mult (245=7x7x5)") {}
+	bool run() const;
+};
+
+class MultKronSmallTransTest : public TestRunnable {
+public:
+	MultKronSmallTransTest() : TestRunnable("kronecker small trans mult (2=2x1)") {}
+	bool run() const;
+};
+
+class MultKronTransTest : public TestRunnable {
+public:
+	MultKronTransTest() : TestRunnable("kronecker trans mult (245=7x7x5)") {}
+	bool run() const;
+};
+
+class LevelKronTest : public TestRunnable {
+public:
+	LevelKronTest() : TestRunnable("kronecker level mult (1715=7x[7]x7x5)") {}
+	bool run() const;
+};
+
+class LevelKronTransTest : public TestRunnable {
+public:
+	LevelKronTransTest() : TestRunnable("kronecker level trans mult (1715=7x[7]x7x5)") {}
+	bool run() const;
+};
+
+class LevelZeroKronTest : public TestRunnable {
+public:
+	LevelZeroKronTest() : TestRunnable("kronecker level mult (1715=7x7x7x[5])") {}
+	bool run() const;
+};
+
+class LevelZeroKronTransTest : public TestRunnable {
+public:
+	LevelZeroKronTransTest() : TestRunnable("kronecker level trans mult (1715=7x7x7x[5])") {}
+	bool run() const;
+};
+
+class KronPowerTest : public TestRunnable {
+public:
+	KronPowerTest() : TestRunnable("kronecker power mult (1715=7x7x7x5)") {}
+	bool run() const;
+};
+
+class SmallLinEvalTest : public TestRunnable {
+public:
+	SmallLinEvalTest() : TestRunnable("lin eval (24=2 x 2x2x3)") {}
+	bool run() const;
+};
+
+class LinEvalTest : public TestRunnable {
+public:
+	LinEvalTest() : TestRunnable("lin eval (490=2 x 7x7x5)") {}
+	bool run() const;
+};
+
+class SmallQuaEvalTest : public TestRunnable {
+public:
+	SmallQuaEvalTest() : TestRunnable("qua eval (24=2 x 2x2x3)") {}
+	bool run() const;
+};
+
+class QuaEvalTest : public TestRunnable {
+public:
+	QuaEvalTest() : TestRunnable("qua eval (490=2 x 7x7x5)") {}
+	bool run() const;
+};
+
+class TriSylvSmallRealTest : public TestRunnable {
+public:
+	TriSylvSmallRealTest() : TestRunnable("triangular sylvester small real solve (12=2x2x3)") {}
+	bool run() const;
+};
+
+class TriSylvSmallComplexTest : public TestRunnable {
+public:
+	TriSylvSmallComplexTest() : TestRunnable("triangular sylvester small complx solve (12=2x2x3)") {}
+	bool run() const;
+};
+
+class TriSylvTest : public TestRunnable {
+public:
+	TriSylvTest() : TestRunnable("triangular sylvester solve (245=7x7x5)") {}
+	bool run() const;
+};
+
+class TriSylvBigTest : public TestRunnable {
+public:
+	TriSylvBigTest() : TestRunnable("triangular sylvester big solve (48000=40x40x30)") {}
+	bool run() const;
+};
+
+class TriSylvLargeTest : public TestRunnable {
+public:
+	TriSylvLargeTest() : TestRunnable("triangular sylvester large solve (1920000=40x40x40x30)") {}
+	bool run() const;
+};
+
+class IterSylvTest : public TestRunnable {
+public:
+	IterSylvTest() : TestRunnable("iterative sylvester solve (245=7x7x5)") {}
+	bool run() const;
+};
+
+class IterSylvLargeTest : public TestRunnable {
+public:
+	IterSylvLargeTest() : TestRunnable("iterative sylvester large solve (1920000=40x40x40x30)") {}
+	bool run() const;
+};
+
+class GenSylvSmallTest : public TestRunnable {
+public:
+	GenSylvSmallTest() : TestRunnable("general sylvester small solve (18=3x3x2)") {}
+	bool run() const;
+};
+
+class GenSylvTest : public TestRunnable {
+public:
+	GenSylvTest() : TestRunnable("general sylvester solve (12000=20x20x30)") {}
+	bool run() const;
+};
+
+class GenSylvSingTest : public TestRunnable {
+public:
+	GenSylvSingTest() : TestRunnable("general sylvester solve for sing. C (2500000=50x50x50x20)") {}
+	bool run() const;
+};
+
+class GenSylvLargeTest : public TestRunnable {
+public:
+	GenSylvLargeTest() : TestRunnable("general sylvester solve (2500000=50x50x50x20)") {}
+	bool run() const;
+};
+
+class EigBubFrankTest : public TestRunnable {
+public:
+	EigBubFrankTest() : TestRunnable("eig. bubble frank test (12x12)") {}
+	bool run() const;
+};
+
+class EigBubSplitTest : public TestRunnable {
+// complex eigenvalue is split by swapping it with real
+public:
+	EigBubSplitTest() : TestRunnable("eig. bubble complex split test (3x3)") {}
+	bool run() const;
+};
+
+class EigBubSameTest : public TestRunnable {
+// complex eigenevalue bypasses the same complex eigenvalue
+public:
+	EigBubSameTest() : TestRunnable("eig. bubble same test (5x5)") {}
+	bool run() const;
+};
+
+class BlockDiagSmallTest : public TestRunnable {
+public:
+	BlockDiagSmallTest() : TestRunnable("block diagonalization small test (7x7)") {}
+	bool run() const;
+};
+
+class BlockDiagFrankTest : public TestRunnable {
+public:
+	BlockDiagFrankTest() : TestRunnable("block diagonalization of frank (12x12)") {}
+	bool run() const;
+};
+
+class BlockDiagIllCondTest : public TestRunnable {
+public:
+	BlockDiagIllCondTest() : TestRunnable("block diagonalization of ill conditioned (15x15)") {}
+	bool run() const;
+};
+
+class BlockDiagBigTest : public TestRunnable {
+public:
+	BlockDiagBigTest() : TestRunnable("block diagonalization big test (50x50)") {}
+	bool run() const;
+};
+
+/**********************************************************/
+/*   run methods of sub classes                           */
+/**********************************************************/
+
+bool PureTriangTest::run() const
+{
+	return quasi_solve(false, "tr5x5.mm", "v5.mm");
+}
+
+bool PureTriangTransTest::run() const
+{
+	return quasi_solve(true, "tr5x5.mm", "v5.mm");
+}
+
+bool PureTrLargeTest::run() const
+{
+	return quasi_solve(false, "tr300x300.mm", "v300.mm");
+}
+
+bool PureTrLargeTransTest::run() const
+{
+	return quasi_solve(true, "tr300x300.mm", "v300.mm");
+}
+
+bool QuasiTriangTest::run() const
+{
+	return quasi_solve(false, "qt7x7.mm", "v7.mm");
+}
+
+bool QuasiTriangTransTest::run() const
+{
+	return quasi_solve(true, "qt7x7.mm", "v7.mm");
+}
+
+bool QuasiTrLargeTest::run() const
+{
+	return quasi_solve(false, "qt250x250.mm", "v250.mm");
+}
+
+bool QuasiTrLargeTransTest::run() const
+{
+	return quasi_solve(true, "qt250x250.mm", "v250.mm");
+}
+
+bool QuasiZeroSmallTest::run() const
+{
+	return quasi_solve(false, "b2x1.mm", "v2.mm");
+}
+
+bool MultKronSmallTest::run() const
+{
+	return mult_kron(false, "tr2x2.mm", "v2.mm", "vcheck2.mm", 2, 1, 1);
+}
+
+bool MultKronTest::run() const
+{
+	return mult_kron(false, "qt7x7.mm", "v245.mm", "vcheck245.mm", 7, 5, 2);
+}
+
+bool MultKronSmallTransTest::run() const
+{
+	return mult_kron(true, "tr2x2.mm", "v2.mm", "vcheck2a.mm", 2, 1, 1);
+}
+
+bool MultKronTransTest::run() const
+{
+	return mult_kron(true, "qt7x7.mm", "v245.mm", "vcheck245a.mm", 7, 5, 2);
+}
+
+bool LevelKronTest::run() const
+{
+	return level_kron(false, "qt7x7.mm", "v1715.mm", "vcheck1715.mm", 2, 7, 5, 3);
+}
+
+bool LevelKronTransTest::run() const
+{
+	return level_kron(true, "qt7x7.mm", "v1715.mm", "vcheck1715a.mm", 2, 7, 5, 3);
+}
+
+bool LevelZeroKronTest::run() const
+{
+	return level_kron(false, "tr5x5.mm", "v1715.mm", "vcheck1715b.mm", 0, 7, 5, 3);
+}
+
+bool LevelZeroKronTransTest::run() const
+{
+	return level_kron(true, "tr5x5.mm", "v1715.mm", "vcheck1715c.mm", 0, 7, 5, 3);
+}
+
+bool KronPowerTest::run() const
+{
+	return kron_power("qt7x7.mm", "tr5x5.mm", "v1715.mm", "vcheck1715d.mm", 7, 5, 3);
+}
+
+bool SmallLinEvalTest::run() const
+{
+	return lin_eval("qt2x2.mm", "qt3x3.mm", "v24.mm", "vcheck24.mm", 2, 3, 2,
+					 2, 1, 3);
+}
+
+bool LinEvalTest::run() const
+{
+	return lin_eval("qt7x7.mm", "tr5x5.mm", "v490.mm", "vcheck490.mm", 7, 5, 2,
+					 2, 1, 3);
+}
+
+bool SmallQuaEvalTest::run() const
+{
+	return qua_eval("qt2x2.mm", "qt3x3.mm", "v24.mm", "vcheck24q.mm", 2, 3, 2,
+					 -0.5, 3, 2, 1, 3);
+}
+
+bool QuaEvalTest::run() const
+{
+	return qua_eval("qt7x7.mm", "tr5x5.mm", "v490.mm", "vcheck490q.mm", 7, 5, 2,
+					 -0.5, 3, 2, 1, 3);
+}
+
+bool TriSylvSmallRealTest::run() const
+{
+	return tri_sylv("tr2x2.mm", "qt3x3.mm", "v12r.mm", 2, 3, 2);
+}
+
+bool TriSylvSmallComplexTest::run() const
+{
+	return tri_sylv("qt2x2.mm", "qt3x3.mm", "v12r.mm", 2, 3, 2);
+}
+
+bool TriSylvTest::run() const
+{
+	return tri_sylv("qt7x7eig06-09.mm", "tr5x5.mm", "v245r.mm", 7, 5, 2);
+}
+
+bool TriSylvBigTest::run() const
+{
+	return tri_sylv("qt40x40.mm", "qt30x30eig011-095.mm", "v48000.mm", 40, 30, 2);
+}
+
+bool TriSylvLargeTest::run() const
+{
+	return tri_sylv("qt40x40.mm", "qt30x30eig011-095.mm", "v1920000.mm", 40, 30, 3);
+}
+
+bool IterSylvTest::run() const
+{
+	return iter_sylv("qt7x7eig06-09.mm", "qt5x5.mm", "v245r.mm", 7, 5, 2);
+}
+
+bool IterSylvLargeTest::run() const
+{
+	return iter_sylv("qt40x40.mm", "qt30x30eig011-095.mm", "v1920000.mm", 40, 30, 3);
+}
+
+bool GenSylvSmallTest::run() const
+{
+	return gen_sylv("a2x2.mm", "b2x1.mm", "c3x3.mm", "d2x9.mm", 3, 2, 2);
+}
+
+bool GenSylvTest::run() const
+{
+	return gen_sylv("a30x30.mm", "b30x25.mm", "c20x20.mm", "d30x400.mm", 20, 30, 2);
+}
+
+bool GenSylvSingTest::run() const
+{
+	return gen_sylv("a20x20.mm", "b20x4.mm", "c50x50sing.mm", "d20x125000.mm", 50, 20, 3);
+}
+
+bool GenSylvLargeTest::run() const
+{
+	return gen_sylv("a20x20.mm", "b20x15.mm", "c50x50.mm", "d20x125000.mm", 50, 20, 3);
+}
+
+bool EigBubFrankTest::run() const
+{
+	return eig_bubble("qt_frank12x12.mm", 8, 0);
+}
+
+bool EigBubSplitTest::run() const
+{
+	return eig_bubble("qt_eps3x3.mm",1,0);
+}
+
+bool EigBubSameTest::run() const
+{
+	return eig_bubble("qt5x5.mm",2,0);
+}
+
+bool BlockDiagSmallTest::run() const
+{
+	return block_diag("qt7x7.mm", 0.1);
+}
+
+bool BlockDiagFrankTest::run() const
+{
+	return block_diag("qt_frank12x12.mm", 5);
+}
+
+bool BlockDiagIllCondTest::run() const
+{
+	return block_diag("ill_cond15x15.mm", 4.14);
+}
+
+bool BlockDiagBigTest::run() const
+{
+	return block_diag("c50x50.mm", 1.3);
+}
+
+/**********************************************************/
+/*   main                                                 */
+/**********************************************************/
+
+int main()
+{
+	TestRunnable* all_tests[50];
+	// fill in vector of all tests
+	int num_tests = 0;
+	all_tests[num_tests++] = new PureTriangTest();
+	all_tests[num_tests++] = new PureTriangTransTest();
+	all_tests[num_tests++] = new PureTrLargeTest();
+	all_tests[num_tests++] = new PureTrLargeTransTest();
+	all_tests[num_tests++] = new QuasiTriangTest();
+	all_tests[num_tests++] = new QuasiTriangTransTest();
+	all_tests[num_tests++] = new QuasiTrLargeTest();
+	all_tests[num_tests++] = new QuasiTrLargeTransTest();
+	all_tests[num_tests++] = new QuasiZeroSmallTest();
+	all_tests[num_tests++] = new MultKronSmallTest();
+	all_tests[num_tests++] = new MultKronTest();
+	all_tests[num_tests++] = new MultKronSmallTransTest();
+	all_tests[num_tests++] = new MultKronTransTest();
+	all_tests[num_tests++] = new LevelKronTest();
+	all_tests[num_tests++] = new LevelKronTransTest();
+	all_tests[num_tests++] = new LevelZeroKronTest();
+	all_tests[num_tests++] = new LevelZeroKronTransTest();
+	all_tests[num_tests++] = new KronPowerTest();
+	all_tests[num_tests++] = new SmallLinEvalTest();
+	all_tests[num_tests++] = new LinEvalTest();
+	all_tests[num_tests++] = new SmallQuaEvalTest();
+	all_tests[num_tests++] = new QuaEvalTest();
+	all_tests[num_tests++] = new EigBubFrankTest();
+	all_tests[num_tests++] = new EigBubSplitTest();
+	all_tests[num_tests++] = new EigBubSameTest();
+	all_tests[num_tests++] = new BlockDiagSmallTest();
+	all_tests[num_tests++] = new BlockDiagFrankTest();
+	all_tests[num_tests++] = new BlockDiagIllCondTest();
+	all_tests[num_tests++] = new BlockDiagBigTest();
+	all_tests[num_tests++] = new TriSylvSmallRealTest();
+	all_tests[num_tests++] = new TriSylvSmallComplexTest();
+	all_tests[num_tests++] = new TriSylvTest();
+	all_tests[num_tests++] = new TriSylvBigTest();
+	all_tests[num_tests++] = new TriSylvLargeTest();
+	all_tests[num_tests++] = new IterSylvTest();
+	all_tests[num_tests++] = new IterSylvLargeTest();
+	all_tests[num_tests++] = new GenSylvSmallTest();
+	all_tests[num_tests++] = new GenSylvTest();
+	all_tests[num_tests++] = new GenSylvSingTest();
+	all_tests[num_tests++] = new GenSylvLargeTest();
+
+	// launch the tests
+	int success = 0;
+	for (int i = 0; i < num_tests; i++) {
+		try {
+			if (all_tests[i]->test())
+				success++;
+		} catch (const MMException& e) {
+			printf("Caugth MM exception in <%s>:\n%s", all_tests[i]->getName(),
+				   e.getMessage());
+		} catch (SylvException& e) {
+			printf("Caught Sylv exception in %s:\n", all_tests[i]->getName());
+			e.printMessage();
+		}
+	}
+
+	printf("There were %d tests that failed out of %d tests run.\n",
+		   num_tests - success, num_tests);
+
+	// destroy
+	for (int i = 0; i < num_tests; i++) {
+		delete all_tests[i];
+	}
+
+	return 0;
+}
+
diff --git a/dynare++/tests/asset.mod b/dynare++/tests/asset.mod
new file mode 100644
index 0000000000000000000000000000000000000000..c9a5b9e2877ac6ec4b5ad3de84e35513582c315a
--- /dev/null
+++ b/dynare++/tests/asset.mod
@@ -0,0 +1,28 @@
+var y, x;
+varexo e;
+
+parameters theta, rho, bet, xbar;
+
+xbar = 0.0179;
+rho = -0.139;
+theta = -10;
+bet = 0.95;
+
+model;
+y = bet*exp(theta*x(+1))*(1+y(+1));
+x = (1-rho)*xbar + rho*x(-1) + e;
+end;
+
+initval;
+x = 0.0179;
+y = 0.3;
+e = 0;
+end;
+
+vcov = [ 0.0012110];
+
+order = 6;
+
+
+
+
diff --git a/dynare++/tests/c20.dyn b/dynare++/tests/c20.dyn
new file mode 100644
index 0000000000000000000000000000000000000000..a3b7bd1fabfa3dd28c68daaa7a51bac15898811f
--- /dev/null
+++ b/dynare++/tests/c20.dyn
@@ -0,0 +1,497 @@
+var C10_PIE C10_RR C10_RS C10_Y C11_PIE C11_RR C11_RS C11_Y C12_PIE C12_RR C12_RS C12_Y C13_PIE C13_RR C13_RS C13_Y C14_PIE C14_RR C14_RS C14_Y C15_PIE C15_RR C15_RS C15_Y C16_PIE C16_RR C16_RS C16_Y C17_PIE C17_RR C17_RS C17_Y C18_PIE C18_RR C18_RS C18_Y C19_PIE C19_RR C19_RS C19_Y C1_PIE C1_RR C1_RS C1_Y C20_PIE C20_RR C20_RS C20_Y C2_PIE C2_RR C2_RS C2_Y C3_PIE C3_RR C3_RS C3_Y C4_PIE C4_RR C4_RS C4_Y C5_PIE C5_RR C5_RS C5_Y C6_PIE C6_RR C6_RS C6_Y C7_PIE C7_RR C7_RS C7_Y C8_PIE C8_RR C8_RS C8_Y C9_PIE C9_RR C9_RS C9_Y;
+ 
+varexo C1_EPIE C1_EY C1_ERS C2_EPIE C2_EY C2_ERS C3_EPIE C3_EY C3_ERS C4_EPIE C4_EY C4_ERS C5_EPIE C5_EY C5_ERS C6_EPIE C6_EY C6_ERS C7_EPIE C7_EY C7_ERS C8_EPIE C8_EY C8_ERS C9_EPIE C9_EY C9_ERS C10_EPIE C10_EY C10_ERS C11_EPIE C11_EY C11_ERS C12_EPIE C12_EY C12_ERS C13_EPIE C13_EY C13_ERS C14_EPIE C14_EY C14_ERS C15_EPIE C15_EY C15_ERS C16_EPIE C16_EY C16_ERS C17_EPIE C17_EY C17_ERS C18_EPIE C18_EY C18_ERS C19_EPIE C19_EY C19_ERS C20_EPIE C20_EY C20_ERS;
+ 
+parameters C10_CALFA1 C10_CALFA2 C10_CALFA3 C10_CALFA4 C10_CALFA5 C10_CALFA6 C10_CALFA7 C10_CALFA8 C10_CALFA9 C11_CALFA1 C11_CALFA2 C11_CALFA3 C11_CALFA4 C11_CALFA5 C11_CALFA6 C11_CALFA7 C11_CALFA8 C11_CALFA9 C12_CALFA1 C12_CALFA2 C12_CALFA3 C12_CALFA4 C12_CALFA5 C12_CALFA6 C12_CALFA7 C12_CALFA8 C12_CALFA9 C13_CALFA1 C13_CALFA2 C13_CALFA3 C13_CALFA4 C13_CALFA5 C13_CALFA6 C13_CALFA7 C13_CALFA8 C13_CALFA9 C14_CALFA1 C14_CALFA2 C14_CALFA3 C14_CALFA4 C14_CALFA5 C14_CALFA6 C14_CALFA7 C14_CALFA8 C14_CALFA9 C15_CALFA1 C15_CALFA2 C15_CALFA3 C15_CALFA4 C15_CALFA5 C15_CALFA6 C15_CALFA7 C15_CALFA8 C15_CALFA9 C16_CALFA1 C16_CALFA2 C16_CALFA3 C16_CALFA4 C16_CALFA5 C16_CALFA6 C16_CALFA7 C16_CALFA8 C16_CALFA9 C17_CALFA1 C17_CALFA2 C17_CALFA3 C17_CALFA4 C17_CALFA5 C17_CALFA6 C17_CALFA7 C17_CALFA8 C17_CALFA9 C18_CALFA1 C18_CALFA2 C18_CALFA3 C18_CALFA4 C18_CALFA5 C18_CALFA6 C18_CALFA7 C18_CALFA8 C18_CALFA9 C19_CALFA1 C19_CALFA2 C19_CALFA3 C19_CALFA4 C19_CALFA5 C19_CALFA6 C19_CALFA7 C19_CALFA8 C19_CALFA9 C1_CALFA1 C1_CALFA2 C1_CALFA3 C1_CALFA4 C1_CALFA5 C1_CALFA6 C1_CALFA7 C1_CALFA8 C1_CALFA9 C20_CALFA1 C20_CALFA2 C20_CALFA3 C20_CALFA4 C20_CALFA5 C20_CALFA6 C20_CALFA7 C20_CALFA8 C20_CALFA9 C2_CALFA1 C2_CALFA2 C2_CALFA3 C2_CALFA4 C2_CALFA5 C2_CALFA6 C2_CALFA7 C2_CALFA8 C2_CALFA9 C3_CALFA1 C3_CALFA2 C3_CALFA3 C3_CALFA4 C3_CALFA5 C3_CALFA6 C3_CALFA7 C3_CALFA8 C3_CALFA9 C4_CALFA1 C4_CALFA2 C4_CALFA3 C4_CALFA4 C4_CALFA5 C4_CALFA6 C4_CALFA7 C4_CALFA8 C4_CALFA9 C5_CALFA1 C5_CALFA2 C5_CALFA3 C5_CALFA4 C5_CALFA5 C5_CALFA6 C5_CALFA7 C5_CALFA8 C5_CALFA9 C6_CALFA1 C6_CALFA2 C6_CALFA3 C6_CALFA4 C6_CALFA5 C6_CALFA6 C6_CALFA7 C6_CALFA8 C6_CALFA9 C7_CALFA1 C7_CALFA2 C7_CALFA3 C7_CALFA4 C7_CALFA5 C7_CALFA6 C7_CALFA7 C7_CALFA8 C7_CALFA9 C8_CALFA1 C8_CALFA2 C8_CALFA3 C8_CALFA4 C8_CALFA5 C8_CALFA6 C8_CALFA7 C8_CALFA8 C8_CALFA9 C9_CALFA1 C9_CALFA2 C9_CALFA3 C9_CALFA4 C9_CALFA5 C9_CALFA6 C9_CALFA7 C9_CALFA8 C9_CALFA9 C10_PIESTAR C11_PIESTAR C12_PIESTAR C13_PIESTAR C14_PIESTAR C15_PIESTAR C16_PIESTAR C17_PIESTAR C18_PIESTAR C19_PIESTAR C1_PIESTAR C20_PIESTAR C2_PIESTAR C3_PIESTAR C4_PIESTAR C5_PIESTAR C6_PIESTAR C7_PIESTAR C8_PIESTAR C9_PIESTAR;
+C10_CALFA1=0.5;
+C10_CALFA2=0.5;
+C10_CALFA3=0.5;
+C10_CALFA4=0;
+C10_CALFA5=0.75;
+C10_CALFA6=-0.25;
+C10_CALFA7=0.1;
+C10_CALFA8=0.5;
+C10_CALFA9=0.5;
+C11_CALFA1=0.5;
+C11_CALFA2=0.5;
+C11_CALFA3=0.5;
+C11_CALFA4=0;
+C11_CALFA5=0.75;
+C11_CALFA6=-0.25;
+C11_CALFA7=0.1;
+C11_CALFA8=0.5;
+C11_CALFA9=0.5;
+C12_CALFA1=0.5;
+C12_CALFA2=0.5;
+C12_CALFA3=0.5;
+C12_CALFA4=0;
+C12_CALFA5=0.75;
+C12_CALFA6=-0.25;
+C12_CALFA7=0.1;
+C12_CALFA8=0.5;
+C12_CALFA9=0.5;
+C13_CALFA1=0.5;
+C13_CALFA2=0.5;
+C13_CALFA3=0.5;
+C13_CALFA4=0;
+C13_CALFA5=0.75;
+C13_CALFA6=-0.25;
+C13_CALFA7=0.1;
+C13_CALFA8=0.5;
+C13_CALFA9=0.5;
+C14_CALFA1=0.5;
+C14_CALFA2=0.5;
+C14_CALFA3=0.5;
+C14_CALFA4=0;
+C14_CALFA5=0.75;
+C14_CALFA6=-0.25;
+C14_CALFA7=0.1;
+C14_CALFA8=0.5;
+C14_CALFA9=0.5;
+C15_CALFA1=0.5;
+C15_CALFA2=0.5;
+C15_CALFA3=0.5;
+C15_CALFA4=0;
+C15_CALFA5=0.75;
+C15_CALFA6=-0.25;
+C15_CALFA7=0.1;
+C15_CALFA8=0.5;
+C15_CALFA9=0.5;
+C16_CALFA1=0.5;
+C16_CALFA2=0.5;
+C16_CALFA3=0.5;
+C16_CALFA4=0;
+C16_CALFA5=0.75;
+C16_CALFA6=-0.25;
+C16_CALFA7=0.1;
+C16_CALFA8=0.5;
+C16_CALFA9=0.5;
+C17_CALFA1=0.5;
+C17_CALFA2=0.5;
+C17_CALFA3=0.5;
+C17_CALFA4=0;
+C17_CALFA5=0.75;
+C17_CALFA6=-0.25;
+C17_CALFA7=0.1;
+C17_CALFA8=0.5;
+C17_CALFA9=0.5;
+C18_CALFA1=0.5;
+C18_CALFA2=0.5;
+C18_CALFA3=0.5;
+C18_CALFA4=0;
+C18_CALFA5=0.75;
+C18_CALFA6=-0.25;
+C18_CALFA7=0.1;
+C18_CALFA8=0.5;
+C18_CALFA9=0.5;
+C19_CALFA1=0.5;
+C19_CALFA2=0.5;
+C19_CALFA3=0.5;
+C19_CALFA4=0;
+C19_CALFA5=0.75;
+C19_CALFA6=-0.25;
+C19_CALFA7=0.1;
+C19_CALFA8=0.5;
+C19_CALFA9=0.5;
+C1_CALFA1=0.5;
+C1_CALFA2=0.5;
+C1_CALFA3=0.5;
+C1_CALFA4=0;
+C1_CALFA5=0.75;
+C1_CALFA6=-0.25;
+C1_CALFA7=0.1;
+C1_CALFA8=0.5;
+C1_CALFA9=0.5;
+C20_CALFA1=0.5;
+C20_CALFA2=0.5;
+C20_CALFA3=0.5;
+C20_CALFA4=0;
+C20_CALFA5=0.75;
+C20_CALFA6=-0.25;
+C20_CALFA7=0.1;
+C20_CALFA8=0.5;
+C20_CALFA9=0.5;
+C2_CALFA1=0.5;
+C2_CALFA2=0.5;
+C2_CALFA3=0.5;
+C2_CALFA4=0;
+C2_CALFA5=0.75;
+C2_CALFA6=-0.25;
+C2_CALFA7=0.1;
+C2_CALFA8=0.5;
+C2_CALFA9=0.5;
+C3_CALFA1=0.5;
+C3_CALFA2=0.5;
+C3_CALFA3=0.5;
+C3_CALFA4=0;
+C3_CALFA5=0.75;
+C3_CALFA6=-0.25;
+C3_CALFA7=0.1;
+C3_CALFA8=0.5;
+C3_CALFA9=0.5;
+C4_CALFA1=0.5;
+C4_CALFA2=0.5;
+C4_CALFA3=0.5;
+C4_CALFA4=0;
+C4_CALFA5=0.75;
+C4_CALFA6=-0.25;
+C4_CALFA7=0.1;
+C4_CALFA8=0.5;
+C4_CALFA9=0.5;
+C5_CALFA1=0.5;
+C5_CALFA2=0.5;
+C5_CALFA3=0.5;
+C5_CALFA4=0;
+C5_CALFA5=0.75;
+C5_CALFA6=-0.25;
+C5_CALFA7=0.1;
+C5_CALFA8=0.5;
+C5_CALFA9=0.5;
+C6_CALFA1=0.5;
+C6_CALFA2=0.5;
+C6_CALFA3=0.5;
+C6_CALFA4=0;
+C6_CALFA5=0.75;
+C6_CALFA6=-0.25;
+C6_CALFA7=0.1;
+C6_CALFA8=0.5;
+C6_CALFA9=0.5;
+C7_CALFA1=0.5;
+C7_CALFA2=0.5;
+C7_CALFA3=0.5;
+C7_CALFA4=0;
+C7_CALFA5=0.75;
+C7_CALFA6=-0.25;
+C7_CALFA7=0.1;
+C7_CALFA8=0.5;
+C7_CALFA9=0.5;
+C8_CALFA1=0.5;
+C8_CALFA2=0.5;
+C8_CALFA3=0.5;
+C8_CALFA4=0;
+C8_CALFA5=0.75;
+C8_CALFA6=-0.25;
+C8_CALFA7=0.1;
+C8_CALFA8=0.5;
+C8_CALFA9=0.5;
+C9_CALFA1=0.5;
+C9_CALFA2=0.5;
+C9_CALFA3=0.5;
+C9_CALFA4=0;
+C9_CALFA5=0.75;
+C9_CALFA6=-0.25;
+C9_CALFA7=0.1;
+C9_CALFA8=0.5;
+C9_CALFA9=0.5;
+C10_PIESTAR=2.5;
+C11_PIESTAR=2.5;
+C12_PIESTAR=2.5;
+C13_PIESTAR=2.5;
+C14_PIESTAR=2.5;
+C15_PIESTAR=2.5;
+C16_PIESTAR=2.5;
+C17_PIESTAR=2.5;
+C18_PIESTAR=2.5;
+C19_PIESTAR=2.5;
+C1_PIESTAR=2.5;
+C20_PIESTAR=2.5;
+C2_PIESTAR=2.5;
+C3_PIESTAR=2.5;
+C4_PIESTAR=2.5;
+C5_PIESTAR=2.5;
+C6_PIESTAR=2.5;
+C7_PIESTAR=2.5;
+C8_PIESTAR=2.5;
+C9_PIESTAR=2.5;
+ 
+model; 
+          C1_PIE = C1_CALFA1*C1_PIE(1)+(1-C1_CALFA1)*C1_PIE(-1)+C1_CALFA2*(C1_Y+0.1)+C1_CALFA3*(C1_Y+0.1)^2/2+C1_EPIE ;
+          C1_Y = C1_CALFA4*C1_Y(1)+C1_CALFA5*C1_Y(-1)+C1_CALFA6*C1_RR+C1_EY+C1_CALFA7*(0+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C1_RR = C1_RS-C1_PIE(1) ;
+          C1_RS = C1_PIE(1)+C1_CALFA8*(C1_PIE-C1_PIESTAR)+C1_CALFA9*C1_Y+C1_ERS ;
+          C2_PIE = C2_CALFA1*C2_PIE(1)+(1-C2_CALFA1)*C2_PIE(-1)+C2_CALFA2*(C2_Y+0.1)+C2_CALFA3*(C2_Y+0.1)^2/2+C2_EPIE ;
+          C2_Y = C2_CALFA4*C2_Y(1)+C2_CALFA5*C2_Y(-1)+C2_CALFA6*C2_RR+C2_EY+C2_CALFA7*(0+C1_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C2_RR = C2_RS-C2_PIE(1) ;
+          C2_RS = C2_PIE(1)+C2_CALFA8*(C2_PIE-C2_PIESTAR)+C2_CALFA9*C2_Y+C2_ERS ;
+          C3_PIE = C3_CALFA1*C3_PIE(1)+(1-C3_CALFA1)*C3_PIE(-1)+C3_CALFA2*(C3_Y+0.1)+C3_CALFA3*(C3_Y+0.1)^2/2+C3_EPIE ;
+          C3_Y = C3_CALFA4*C3_Y(1)+C3_CALFA5*C3_Y(-1)+C3_CALFA6*C3_RR+C3_EY+C3_CALFA7*(0+C1_Y+C2_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C3_RR = C3_RS-C3_PIE(1) ;
+          C3_RS = C3_PIE(1)+C3_CALFA8*(C3_PIE-C3_PIESTAR)+C3_CALFA9*C3_Y+C3_ERS ;
+          C4_PIE = C4_CALFA1*C4_PIE(1)+(1-C4_CALFA1)*C4_PIE(-1)+C4_CALFA2*(C4_Y+0.1)+C4_CALFA3*(C4_Y+0.1)^2/2+C4_EPIE ;
+          C4_Y = C4_CALFA4*C4_Y(1)+C4_CALFA5*C4_Y(-1)+C4_CALFA6*C4_RR+C4_EY+C4_CALFA7*(0+C1_Y+C2_Y+C3_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C4_RR = C4_RS-C4_PIE(1) ;
+          C4_RS = C4_PIE(1)+C4_CALFA8*(C4_PIE-C4_PIESTAR)+C4_CALFA9*C4_Y+C4_ERS ;
+          C5_PIE = C5_CALFA1*C5_PIE(1)+(1-C5_CALFA1)*C5_PIE(-1)+C5_CALFA2*(C5_Y+0.1)+C5_CALFA3*(C5_Y+0.1)^2/2+C5_EPIE ;
+          C5_Y = C5_CALFA4*C5_Y(1)+C5_CALFA5*C5_Y(-1)+C5_CALFA6*C5_RR+C5_EY+C5_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C5_RR = C5_RS-C5_PIE(1) ;
+          C5_RS = C5_PIE(1)+C5_CALFA8*(C5_PIE-C5_PIESTAR)+C5_CALFA9*C5_Y+C5_ERS ;
+          C6_PIE = C6_CALFA1*C6_PIE(1)+(1-C6_CALFA1)*C6_PIE(-1)+C6_CALFA2*(C6_Y+0.1)+C6_CALFA3*(C6_Y+0.1)^2/2+C6_EPIE ;
+          C6_Y = C6_CALFA4*C6_Y(1)+C6_CALFA5*C6_Y(-1)+C6_CALFA6*C6_RR+C6_EY+C6_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C6_RR = C6_RS-C6_PIE(1) ;
+          C6_RS = C6_PIE(1)+C6_CALFA8*(C6_PIE-C6_PIESTAR)+C6_CALFA9*C6_Y+C6_ERS ;
+          C7_PIE = C7_CALFA1*C7_PIE(1)+(1-C7_CALFA1)*C7_PIE(-1)+C7_CALFA2*(C7_Y+0.1)+C7_CALFA3*(C7_Y+0.1)^2/2+C7_EPIE ;
+          C7_Y = C7_CALFA4*C7_Y(1)+C7_CALFA5*C7_Y(-1)+C7_CALFA6*C7_RR+C7_EY+C7_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C7_RR = C7_RS-C7_PIE(1) ;
+          C7_RS = C7_PIE(1)+C7_CALFA8*(C7_PIE-C7_PIESTAR)+C7_CALFA9*C7_Y+C7_ERS ;
+          C8_PIE = C8_CALFA1*C8_PIE(1)+(1-C8_CALFA1)*C8_PIE(-1)+C8_CALFA2*(C8_Y+0.1)+C8_CALFA3*(C8_Y+0.1)^2/2+C8_EPIE ;
+          C8_Y = C8_CALFA4*C8_Y(1)+C8_CALFA5*C8_Y(-1)+C8_CALFA6*C8_RR+C8_EY+C8_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C8_RR = C8_RS-C8_PIE(1) ;
+          C8_RS = C8_PIE(1)+C8_CALFA8*(C8_PIE-C8_PIESTAR)+C8_CALFA9*C8_Y+C8_ERS ;
+          C9_PIE = C9_CALFA1*C9_PIE(1)+(1-C9_CALFA1)*C9_PIE(-1)+C9_CALFA2*(C9_Y+0.1)+C9_CALFA3*(C9_Y+0.1)^2/2+C9_EPIE ;
+          C9_Y = C9_CALFA4*C9_Y(1)+C9_CALFA5*C9_Y(-1)+C9_CALFA6*C9_RR+C9_EY+C9_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C9_RR = C9_RS-C9_PIE(1) ;
+          C9_RS = C9_PIE(1)+C9_CALFA8*(C9_PIE-C9_PIESTAR)+C9_CALFA9*C9_Y+C9_ERS ;
+          C10_PIE = C10_CALFA1*C10_PIE(1)+(1-C10_CALFA1)*C10_PIE(-1)+C10_CALFA2*(C10_Y+0.1)+C10_CALFA3*(C10_Y+0.1)^2/2+C10_EPIE ;
+          C10_Y = C10_CALFA4*C10_Y(1)+C10_CALFA5*C10_Y(-1)+C10_CALFA6*C10_RR+C10_EY+C10_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C10_RR = C10_RS-C10_PIE(1) ;
+          C10_RS = C10_PIE(1)+C10_CALFA8*(C10_PIE-C10_PIESTAR)+C10_CALFA9*C10_Y+C10_ERS ;
+          C11_PIE = C11_CALFA1*C11_PIE(1)+(1-C11_CALFA1)*C11_PIE(-1)+C11_CALFA2*(C11_Y+0.1)+C11_CALFA3*(C11_Y+0.1)^2/2+C11_EPIE ;
+          C11_Y = C11_CALFA4*C11_Y(1)+C11_CALFA5*C11_Y(-1)+C11_CALFA6*C11_RR+C11_EY+C11_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C11_RR = C11_RS-C11_PIE(1) ;
+          C11_RS = C11_PIE(1)+C11_CALFA8*(C11_PIE-C11_PIESTAR)+C11_CALFA9*C11_Y+C11_ERS ;
+          C12_PIE = C12_CALFA1*C12_PIE(1)+(1-C12_CALFA1)*C12_PIE(-1)+C12_CALFA2*(C12_Y+0.1)+C12_CALFA3*(C12_Y+0.1)^2/2+C12_EPIE ;
+          C12_Y = C12_CALFA4*C12_Y(1)+C12_CALFA5*C12_Y(-1)+C12_CALFA6*C12_RR+C12_EY+C12_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C12_RR = C12_RS-C12_PIE(1) ;
+          C12_RS = C12_PIE(1)+C12_CALFA8*(C12_PIE-C12_PIESTAR)+C12_CALFA9*C12_Y+C12_ERS ;
+          C13_PIE = C13_CALFA1*C13_PIE(1)+(1-C13_CALFA1)*C13_PIE(-1)+C13_CALFA2*(C13_Y+0.1)+C13_CALFA3*(C13_Y+0.1)^2/2+C13_EPIE ;
+          C13_Y = C13_CALFA4*C13_Y(1)+C13_CALFA5*C13_Y(-1)+C13_CALFA6*C13_RR+C13_EY+C13_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C13_RR = C13_RS-C13_PIE(1) ;
+          C13_RS = C13_PIE(1)+C13_CALFA8*(C13_PIE-C13_PIESTAR)+C13_CALFA9*C13_Y+C13_ERS ;
+          C14_PIE = C14_CALFA1*C14_PIE(1)+(1-C14_CALFA1)*C14_PIE(-1)+C14_CALFA2*(C14_Y+0.1)+C14_CALFA3*(C14_Y+0.1)^2/2+C14_EPIE ;
+          C14_Y = C14_CALFA4*C14_Y(1)+C14_CALFA5*C14_Y(-1)+C14_CALFA6*C14_RR+C14_EY+C14_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C14_RR = C14_RS-C14_PIE(1) ;
+          C14_RS = C14_PIE(1)+C14_CALFA8*(C14_PIE-C14_PIESTAR)+C14_CALFA9*C14_Y+C14_ERS ;
+          C15_PIE = C15_CALFA1*C15_PIE(1)+(1-C15_CALFA1)*C15_PIE(-1)+C15_CALFA2*(C15_Y+0.1)+C15_CALFA3*(C15_Y+0.1)^2/2+C15_EPIE ;
+          C15_Y = C15_CALFA4*C15_Y(1)+C15_CALFA5*C15_Y(-1)+C15_CALFA6*C15_RR+C15_EY+C15_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C16_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C15_RR = C15_RS-C15_PIE(1) ;
+          C15_RS = C15_PIE(1)+C15_CALFA8*(C15_PIE-C15_PIESTAR)+C15_CALFA9*C15_Y+C15_ERS ;
+          C16_PIE = C16_CALFA1*C16_PIE(1)+(1-C16_CALFA1)*C16_PIE(-1)+C16_CALFA2*(C16_Y+0.1)+C16_CALFA3*(C16_Y+0.1)^2/2+C16_EPIE ;
+          C16_Y = C16_CALFA4*C16_Y(1)+C16_CALFA5*C16_Y(-1)+C16_CALFA6*C16_RR+C16_EY+C16_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C17_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C16_RR = C16_RS-C16_PIE(1) ;
+          C16_RS = C16_PIE(1)+C16_CALFA8*(C16_PIE-C16_PIESTAR)+C16_CALFA9*C16_Y+C16_ERS ;
+          C17_PIE = C17_CALFA1*C17_PIE(1)+(1-C17_CALFA1)*C17_PIE(-1)+C17_CALFA2*(C17_Y+0.1)+C17_CALFA3*(C17_Y+0.1)^2/2+C17_EPIE ;
+          C17_Y = C17_CALFA4*C17_Y(1)+C17_CALFA5*C17_Y(-1)+C17_CALFA6*C17_RR+C17_EY+C17_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C18_Y+C19_Y+C20_Y)/19 ;
+          C17_RR = C17_RS-C17_PIE(1) ;
+          C17_RS = C17_PIE(1)+C17_CALFA8*(C17_PIE-C17_PIESTAR)+C17_CALFA9*C17_Y+C17_ERS ;
+          C18_PIE = C18_CALFA1*C18_PIE(1)+(1-C18_CALFA1)*C18_PIE(-1)+C18_CALFA2*(C18_Y+0.1)+C18_CALFA3*(C18_Y+0.1)^2/2+C18_EPIE ;
+          C18_Y = C18_CALFA4*C18_Y(1)+C18_CALFA5*C18_Y(-1)+C18_CALFA6*C18_RR+C18_EY+C18_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C19_Y+C20_Y)/19 ;
+          C18_RR = C18_RS-C18_PIE(1) ;
+          C18_RS = C18_PIE(1)+C18_CALFA8*(C18_PIE-C18_PIESTAR)+C18_CALFA9*C18_Y+C18_ERS ;
+          C19_PIE = C19_CALFA1*C19_PIE(1)+(1-C19_CALFA1)*C19_PIE(-1)+C19_CALFA2*(C19_Y+0.1)+C19_CALFA3*(C19_Y+0.1)^2/2+C19_EPIE ;
+          C19_Y = C19_CALFA4*C19_Y(1)+C19_CALFA5*C19_Y(-1)+C19_CALFA6*C19_RR+C19_EY+C19_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C20_Y)/19 ;
+          C19_RR = C19_RS-C19_PIE(1) ;
+          C19_RS = C19_PIE(1)+C19_CALFA8*(C19_PIE-C19_PIESTAR)+C19_CALFA9*C19_Y+C19_ERS ;
+          C20_PIE = C20_CALFA1*C20_PIE(1)+(1-C20_CALFA1)*C20_PIE(-1)+C20_CALFA2*(C20_Y+0.1)+C20_CALFA3*(C20_Y+0.1)^2/2+C20_EPIE ;
+          C20_Y = C20_CALFA4*C20_Y(1)+C20_CALFA5*C20_Y(-1)+C20_CALFA6*C20_RR+C20_EY+C20_CALFA7*(0+C1_Y+C2_Y+C3_Y+C4_Y+C5_Y+C6_Y+C7_Y+C8_Y+C9_Y+C10_Y+C11_Y+C12_Y+C13_Y+C14_Y+C15_Y+C16_Y+C17_Y+C18_Y+C19_Y)/19 ;
+          C20_RR = C20_RS-C20_PIE(1) ;
+          C20_RS = C20_PIE(1)+C20_CALFA8*(C20_PIE-C20_PIESTAR)+C20_CALFA9*C20_Y+C20_ERS ;
+end; 
+ 
+initval; 
+C10_PIE=2.5;
+C10_RR=0;
+C10_RS=2.5;
+C10_Y=0;
+C11_PIE=2.5;
+C11_RR=0;
+C11_RS=2.5;
+C11_Y=0;
+C12_PIE=2.5;
+C12_RR=0;
+C12_RS=2.5;
+C12_Y=0;
+C13_PIE=2.5;
+C13_RR=0;
+C13_RS=2.5;
+C13_Y=0;
+C14_PIE=2.5;
+C14_RR=0;
+C14_RS=2.5;
+C14_Y=0;
+C15_PIE=2.5;
+C15_RR=0;
+C15_RS=2.5;
+C15_Y=0;
+C16_PIE=2.5;
+C16_RR=0;
+C16_RS=2.5;
+C16_Y=0;
+C17_PIE=2.5;
+C17_RR=0;
+C17_RS=2.5;
+C17_Y=0;
+C18_PIE=2.5;
+C18_RR=0;
+C18_RS=2.5;
+C18_Y=0;
+C19_PIE=2.5;
+C19_RR=0;
+C19_RS=2.5;
+C19_Y=0;
+C1_PIE=2.5;
+C1_RR=0;
+C1_RS=2.5;
+C1_Y=0;
+C20_PIE=2.5;
+C20_RR=0;
+C20_RS=2.5;
+C20_Y=0;
+C2_PIE=2.5;
+C2_RR=0;
+C2_RS=2.5;
+C2_Y=0;
+C3_PIE=2.5;
+C3_RR=0;
+C3_RS=2.5;
+C3_Y=0;
+C4_PIE=2.5;
+C4_RR=0;
+C4_RS=2.5;
+C4_Y=0;
+C5_PIE=2.5;
+C5_RR=0;
+C5_RS=2.5;
+C5_Y=0;
+C6_PIE=2.5;
+C6_RR=0;
+C6_RS=2.5;
+C6_Y=0;
+C7_PIE=2.5;
+C7_RR=0;
+C7_RS=2.5;
+C7_Y=0;
+C8_PIE=2.5;
+C8_RR=0;
+C8_RS=2.5;
+C8_Y=0;
+C9_PIE=2.5;
+C9_RR=0;
+C9_RS=2.5;
+C9_Y=0;
+C1_EPIE=0;
+C1_EY=0;
+C1_ERS=0;
+C2_EPIE=0;
+C2_EY=0;
+C2_ERS=0;
+C3_EPIE=0;
+C3_EY=0;
+C3_ERS=0;
+C4_EPIE=0;
+C4_EY=0;
+C4_ERS=0;
+C5_EPIE=0;
+C5_EY=0;
+C5_ERS=0;
+C6_EPIE=0;
+C6_EY=0;
+C6_ERS=0;
+C7_EPIE=0;
+C7_EY=0;
+C7_ERS=0;
+C8_EPIE=0;
+C8_EY=0;
+C8_ERS=0;
+C9_EPIE=0;
+C9_EY=0;
+C9_ERS=0;
+C10_EPIE=0;
+C10_EY=0;
+C10_ERS=0;
+C11_EPIE=0;
+C11_EY=0;
+C11_ERS=0;
+C12_EPIE=0;
+C12_EY=0;
+C12_ERS=0;
+C13_EPIE=0;
+C13_EY=0;
+C13_ERS=0;
+C14_EPIE=0;
+C14_EY=0;
+C14_ERS=0;
+C15_EPIE=0;
+C15_EY=0;
+C15_ERS=0;
+C16_EPIE=0;
+C16_EY=0;
+C16_ERS=0;
+C17_EPIE=0;
+C17_EY=0;
+C17_ERS=0;
+C18_EPIE=0;
+C18_EY=0;
+C18_ERS=0;
+C19_EPIE=0;
+C19_EY=0;
+C19_ERS=0;
+C20_EPIE=0;
+C20_EY=0;
+C20_ERS=0;
+end; 
+
+vcov = [
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0; 
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 
+];
+
+order = 1;
+
diff --git a/dynare++/tests/czech2.mod b/dynare++/tests/czech2.mod
new file mode 100644
index 0000000000000000000000000000000000000000..2e7adeb785fc5b17f19408c2226f34b086cec925
--- /dev/null
+++ b/dynare++/tests/czech2.mod
@@ -0,0 +1,712 @@
+var PIE4EU ZZ_PIE4EU ZZ_RNOMEU GDPGAPEU ZZ_DRNOMEU AF AH BIGGAMF BIGGAMH BIGGAMIMPF BIGGAMIMPH BIGGAMMF BIGGAMMH BIGGAMNF BIGGAMNH BIGGAMQF BIGGAMQH BIGGAM_MONF BIGGAM_MONH BIGGAM_MOTF BIGGAM_MOTH BIGGAM_O_NF BIGGAM_O_NH BIGGAM_O_TF BIGGAM_O_TH CAPAF CAPAH CF CF_NAT CH CH_NAT CURBALF_RAT CURBALH_RAT DEEF DEEH DEPEX EXPORTSF EXPORTSF_NAT EXPORTSH EXPORTSH_NAT EYEF EYEH GAF GAH GAMMAF GAMMAH GDPF GDPF_NAT GDPGAPF GDPGAPH GDPH GDPH_NAT GF_NAT GH_NAT GNF GNH HF HH IMPORTSF IMPORTSF_NAT IMPORTSH IMPORTSH_NAT KF KH KNF KNF_RAT KNH KNH_RAT KTF KTF_RAT KTH KTH_RAT K_OF K_OF_RAT K_OH K_OH_RAT LANDF LANDH LF LH LNF LNH LTF LTH L_OF L_OH MARGUTF MARGUTH MF MF_NAT MH MH_NAT M_ONF M_ONH M_OTF M_OTH NF NH NNF NNH O_NF O_NH O_TF O_TH PIE4F PIE4H PIE4TARF PIE4TARH PIEBARMF PIEBARMH PIEBARQF PIEBARQH PIEF PIEH PIENF PIENH PIEWF PIEWH PSIF PSIH PSIPRIMEF PSIPRIMEH QF QH Q_ONF Q_ONH Q_OTF Q_OTH REALBF REALBH REALEX REALEXF REALEXH REALFINH REALMCNF REALMCNH REALMCTF REALMCTH REALMONEYF REALMONEYH REALPBARMF REALPBARMH REALPBARQF REALPBARQH REALPLANDF REALPLANDH REALPMF REALPMH REALPNF REALPNH REALPQF REALPQH REALPXF REALPXH REALP_MOF REALP_MOH REALP_ONF REALP_ONH REALP_OTF REALP_OTH REALP_QOF REALP_QOH REALRF REALRH REALTBALF REALTBALH REALWF REALWH RNOMF RNOMH SHOPF SHOPH SHOPPRIMEF SHOPPRIMEH TF TH T_OF T_OH VELOF VELOH VPRIMEF VPRIMEH XF_NAT XH_NAT XI ZBF ZBH ZEYEF ZEYEH ZNF ZNH ZTF ZTH ZUF ZUH ZZ_CF ZZ_CH ZZ_CURBALF_RAT ZZ_CURBALH_RAT ZZ_C_SHARF ZZ_C_SHARH ZZ_DPIE4F ZZ_DPIE4H ZZ_DRNOMF ZZ_DRNOMH ZZ_EXPORTSF ZZ_EXPORTSH ZZ_EYEF ZZ_EYEH ZZ_EYE_SHARF ZZ_EYE_SHARH ZZ_GDPF ZZ_GDPH ZZ_GDP_PPP_RATH ZZ_GF ZZ_GH ZZ_IMPORTSF ZZ_IMPORTSH ZZ_M_ON_SHARF ZZ_M_ON_SHARH ZZ_M_OT_SHARF ZZ_M_OT_SHARH ZZ_M_O_SHARF ZZ_M_O_SHARH ZZ_M_SHARF ZZ_M_SHARH ZZ_N_SHARF ZZ_N_SHARH ZZ_PIE4F ZZ_PIE4H ZZ_REALEX ZZ_RNOMF ZZ_RNOMH ZZ_UTILITYF ZZ_UTILITYH ZZ_XBALF_TOT_RAT ZZ_XBALH_TOT_RAT Z_OF Z_OH PIEH_1 PIEH_2 PIEF_1 PIEF_2 REALBH_1 VALUE VALUE2;
+ 
+varexo E_ZBH E_ZUH E_ZUF E_ZEYEH E_ZEYEF E_GAMMAH E_GAMMAF E_LANDH E_LANDF E_GAH E_GAF E_CAPAH E_CAPAF;
+ 
+parameters COSTLF COSTLH EPSF EPSH EPSQMF EPSQMH GLAMBDAF GLAMBDAH SIGMAF SIGMAH SSF SSH XR1F XR1H XR2F XR2H XR3F XR3H XR4F XR4H ALPHANF ALPHANH ALPHATF ALPHATH ALPHA_OF ALPHA_OH A_ONEF A_ONEH A_ZEROF A_ZEROH B0F B0H B1F B1H B2F B2H BET CAPAF_SS CAPAH_SS CHI0 CHI1 CHI2 CHI3 COSTF COSTH COST_MONF COST_MONH COST_MOTF COST_MOTH COST_O_NF COST_O_NH COST_O_TF COST_O_TH DELTAF DELTAH EPS_ONF EPS_ONH EPS_OTF EPS_OTH ETAF ETAH E_PIE4TARF E_PIE4TARH E_ZBF GAMA_NF GAMA_NH GAMA_TF GAMA_TH GAMMAF_SS GAMMAH_SS GAMMA_LANDF GAMMA_LANDH GA_RATF GA_RATH GDPF_EXOG GDPH_EXOG GN_RATF GN_RATH LANDF_SS LANDH_SS LILCAPPA1F LILCAPPA1H LILCAPPA2F LILCAPPA2H LILCAPPA3F LILCAPPA3H LILCAPPA4F LILCAPPA4H LILCAPPA5F LILCAPPA5H LILCAPPA6F LILCAPPA6H LILCAPPA7F LILCAPPA7H LILCAPPA8F LILCAPPA8H NYF NYH NY_NF NY_NH NY_TF NY_TH OMEGA0F OMEGA0H OMEGAF OMEGAH PHIF PHIH PIE4TARF_SS PIE4TARH_SS REALEX_EXOG REALPBARMF_EXOG REALPBARMH_EXOG REALPNF_EXOG REALPNH_EXOG REALP_MOF_EXOG REALP_MOH_EXOG RNOM_EXOGF RNOM_EXOGH THETAF THETAH XDUMF XDUMH XIXI_NF XIXI_NH XIXI_OF XIXI_OH XIXI_TF XIXI_TH ZBF_SS ZBH_SS ZEDF ZEDH ZEYEF_SS ZEYEH_SS ZNF_SS ZNH_SS ZTF_SS ZTH_SS ZUF_SS ZUH_SS Z_OF_SS Z_OH_SS;
+
+COSTLF=5;
+COSTLH=5;
+EPSF=1.1;
+EPSH=1.1;
+EPSQMF=4;
+EPSQMH=1.1;
+GLAMBDAF=0;
+GLAMBDAH=0;
+SIGMAF=0.333333333333333;
+SIGMAH=0.333333333333333;
+SSF=0.95;
+SSH=0.05;
+XR1F=1;
+XR1H=1;
+XR2F=0;
+XR2H=0;
+XR3F=0.5;
+XR3H=0.5;
+XR4F=0;
+XR4H=0;
+ALPHANF=0.33;
+ALPHANH=0.4;
+ALPHATF=0.33;
+ALPHATH=0.4;
+ALPHA_OF=0.2;
+ALPHA_OH=0.2;
+A_ONEF=0.075;
+A_ONEH=0.075;
+A_ZEROF=0.011;
+A_ZEROH=0.011;
+B0F=1;
+B0H=1;
+B1F=0.95;
+B1H=0.95;
+B2F=1;
+B2H=1;
+BET=0.99263753614514;
+CAPAF_SS=11;
+CAPAH_SS=11;
+CHI0=0.05;
+CHI1=.1;
+CHI2=0;
+CHI3=0;
+COSTF=5;
+COSTH=5;
+COST_MONF=5;
+COST_MONH=5;
+COST_MOTF=5;
+COST_MOTH=5;
+COST_O_NF=5;
+COST_O_NH=5;
+COST_O_TF=5;
+COST_O_TH=5;
+DELTAF=0.025;
+DELTAH=0.025;
+EPS_ONF=3;
+EPS_ONH=3;
+EPS_OTF=3;
+EPS_OTH=3;
+ETAF=0.35;
+ETAH=0.2;
+E_PIE4TARF=0;
+E_PIE4TARH=0;
+E_ZBF=0;
+GAMA_NF=0.3;
+GAMA_NH=0.1;
+GAMA_TF=0.3;
+GAMA_TH=0.5;
+GAMMAF_SS=0.5;
+GAMMAH_SS=0.25;
+GAMMA_LANDF=0.1;
+GAMMA_LANDH=0.1;
+GA_RATF=0.05;
+GA_RATH=0.05;
+GDPF_EXOG=2.41500497257461;
+GDPH_EXOG=2.90512477822209;
+GN_RATF=0.1;
+GN_RATH=0.1;
+LANDF_SS=0.1;
+LANDH_SS=0.1;
+LILCAPPA1F=0;
+LILCAPPA1H=0;
+LILCAPPA2F=800;
+LILCAPPA2H=400;
+LILCAPPA3F=0;
+LILCAPPA3H=0;
+LILCAPPA4F=800;
+LILCAPPA4H=400;
+LILCAPPA5F=0;
+LILCAPPA5H=0;
+LILCAPPA6F=800;
+LILCAPPA6H=400;
+LILCAPPA7F=0;
+LILCAPPA7H=0;
+LILCAPPA8F=0;
+LILCAPPA8H=0;
+NYF=0.98;
+NYH=0.0499999999999999;
+NY_NF=0.98;
+NY_NH=0.0499999999999999;
+NY_TF=0.98;
+NY_TH=0.02;
+OMEGA0F=60;
+OMEGA0H=60;
+OMEGAF=5;
+OMEGAH=5;
+PHIF=3;
+PHIH=3;
+PIE4TARF_SS=1.125;
+PIE4TARH_SS=1.125;
+REALEX_EXOG=1.3734519289908;
+REALPBARMF_EXOG=0.87146958398196;
+REALPBARMH_EXOG=1.19072687148694;
+REALPNF_EXOG=0.840675522925242;
+REALPNH_EXOG=0.902486321747893;
+REALP_MOF_EXOG=0.966533486000563;
+REALP_MOH_EXOG=1.63690883121281;
+RNOM_EXOGF=1.00741707177773;
+RNOM_EXOGH=1.00741707177773;
+THETAF=6;
+THETAH=6;
+XDUMF=1;
+XDUMH=1;
+XIXI_NF=0.75;
+XIXI_NH=0.75;
+XIXI_OF=0.75;
+XIXI_OH=0.75;
+XIXI_TF=0.75;
+XIXI_TH=0.75;
+ZBF_SS=0;
+ZBH_SS=0;
+ZEDF=2.5;
+ZEDH=2.5;
+ZEYEF_SS=0;
+ZEYEH_SS=0;
+ZNF_SS=1;
+ZNH_SS=1;
+ZTF_SS=1;
+ZTH_SS=0.6;
+ZUF_SS=1;
+ZUH_SS=1;
+Z_OF_SS=1;
+Z_OH_SS=1;
+ 
+model; 
+PIE4EU = SSH*PIE4H+(1-SSH)*PIE4F;
+ZZ_PIE4EU = SSH*ZZ_PIE4H+(1-SSH)*ZZ_PIE4F;
+ZZ_RNOMEU = SSH*ZZ_RNOMH+(1-SSH)*ZZ_RNOMF;
+
+ZZ_DRNOMEU = SSH*ZZ_DRNOMH+(1-SSH)*ZZ_DRNOMF;
+GDPGAPEU = SSH*GDPGAPH+(1-SSH)*GDPGAPF;
+
+          ZZ_UTILITYH = (ZUH*(CH-HH)^(1-SIGMAH)-1)/(1-SIGMAH)-CAPAH*LH^(1+ZEDH)/(1+ZEDH) ;
+          ZZ_GH = 100*log(GH_NAT) ;
+          ZZ_CURBALH_RAT = CURBALH_RAT*100 ;
+          ZZ_M_SHARH = REALPBARMH*MH/AH ;
+          ZZ_M_O_SHARH = (REALP_MOH*M_ONH+REALP_MOH*M_OTH)/AH ;
+          ZZ_M_ON_SHARH = REALP_MOH*M_ONH/AH ;
+          ZZ_M_OT_SHARH = REALP_MOH*M_OTH/AH ;
+          ZZ_N_SHARH = NH*REALPNH/AH ;
+          ZZ_EYE_SHARH = EYEH/GDPH ;
+          ZZ_C_SHARH = CH/GDPH ;
+          ZZ_GDPH = 100*log(GDPH_NAT) ;
+          ZZ_CH = 100*log(CH_NAT) ;
+          ZZ_EYEH = 100*log(EYEH) ;
+          ZZ_EXPORTSH = 100*log(EXPORTSH_NAT) ;
+          ZZ_IMPORTSH = 100*log(IMPORTSH_NAT) ;
+          ZZ_XBALH_TOT_RAT = 100*(EXPORTSH_NAT-IMPORTSH_NAT)/GDPH_NAT ;
+          ZZ_PIE4H = 100*(PIE4H-1) ;
+          ZZ_DPIE4H = ZZ_PIE4H-ZZ_PIE4H(-1) ;
+          ZZ_RNOMH = 100*(RNOMH^4-1) ;
+          ZZ_DRNOMH = ZZ_RNOMH-ZZ_RNOMH(-1) ;
+          100*(PIE4TARH-1) = 1*100*(PIE4TARH_SS-1)+(1-1)*100*(PIE4TARH(-1)-1)+E_PIE4TARH ;
+          log(ZUH) = 0.3*log(ZUH_SS)+0.7*log(ZUH(-1))+E_ZUH ;
+          ZBH = 0.3*ZBH_SS+0.7*ZBH(-1)+E_ZBH ;
+          log(LANDH) = 0.05*log(LANDH_SS)+0.95*log(LANDH(-1))+E_LANDH ;
+          log(ZTH) = 0.05*log(ZTH_SS)+0.95*log(ZTH(-1))+E_LANDH ;
+          log(ZNH) = 0.05*log(ZNH_SS)+0.95*log(ZNH(-1))+E_LANDH ;
+          log(Z_OH) = 0.05*log(Z_OH_SS)+0.95*log(Z_OH(-1))+E_LANDH ;
+          ZEYEH = 0.05*ZEYEH_SS+0.95*ZEYEH(-1)+E_ZEYEH ;
+          CAPAH = 0.05*CAPAH_SS+0.95*CAPAH(-1)+E_CAPAH ;
+          log(GAMMAH) = 0.05*log(GAMMAH_SS)+0.95*log(GAMMAH(-1))+E_GAMMAH ;
+          BIGGAM_O_NH = 1-COST_O_NH/2*(O_NH/NH/(O_NH(-1)/NH(-1))-1)^2 ;
+          BIGGAM_O_TH = 1-COST_O_TH/2*(O_TH/TH/(O_TH(-1)/TH(-1))-1)^2 ;
+          O_NH = GAMA_NH*NH/ZNH*(REALP_ONH/(REALMCNH*ZNH))^(-XIXI_NH)*(BIGGAM_O_NH-COST_O_NH*(O_NH/NH/(O_NH(-1)/NH(-1))-1)*O_NH/NH/(O_NH(-1)/NH(-1)))^XIXI_NH/BIGGAM_O_NH ;
+          O_TH = GAMA_TH*TH/ZTH*(REALP_OTH/(REALMCTH*ZTH))^(-XIXI_TH)*(BIGGAM_O_TH-COST_O_TH*(O_TH/TH/(O_TH(-1)/TH(-1))-1)*O_TH/TH/(O_TH(-1)/TH(-1)))^XIXI_NH/BIGGAM_O_TH ;
+          NH = ZNH*((1-ALPHANH-GAMA_NH)^(1/XIXI_NH)*LNH^(1-1/XIXI_NH)+ALPHANH^(1/XIXI_NH)*KNH^(1-1/XIXI_NH)+GAMA_NH^(1/XIXI_NH)*(BIGGAM_O_NH*O_NH)^(1-1/XIXI_NH))^(XIXI_NH/(XIXI_NH-1)) ;
+          TH = ZTH*((1-ALPHATH-GAMA_TH)^(1/XIXI_TH)*LTH^(1-1/XIXI_TH)+ALPHATH^(1/XIXI_TH)*KTH^(1-1/XIXI_TH)+GAMA_TH^(1/XIXI_TH)*(BIGGAM_O_TH*O_TH)^(1-1/XIXI_TH))^(XIXI_TH/(XIXI_TH-1)) ;
+          REALMCNH = 1/ZNH*((1-ALPHANH-GAMA_NH)*REALWH^(1-XIXI_NH)+ALPHANH*REALRH^(1-XIXI_NH)+GAMA_NH*REALP_ONH^(1-XIXI_NH)*(BIGGAM_O_NH-COST_O_NH*(O_NH/NH/(O_NH(-1)/NH(-1))-1)*O_NH/NH/(O_NH(-1)/NH(-1)))^(XIXI_NH-1))^(1/(1-XIXI_NH)) ;
+          REALMCTH = 1/ZTH*((1-ALPHATH-GAMA_TH)*REALWH^(1-XIXI_TH)+ALPHATH*REALRH^(1-XIXI_TH)+GAMA_TH*REALP_OTH^(1-XIXI_TH)*(BIGGAM_O_TH-COST_O_TH*(O_TH/TH/(O_TH(-1)/TH(-1))-1)*O_TH/TH/(O_TH(-1)/TH(-1)))^(XIXI_TH-1))^(1/(1-XIXI_TH)) ;
+          MARGUTH = (CH-B2H*HH)^(-SIGMAH)*ZUH ;
+          HH = (1-B0H)*HH(-1)+B0H*B1H*CH(-1) ;
+          VPRIMEH = CAPAH*LH^ZEDH ;
+          AH^(1-1/EPSH) = GAMMAH^(1/EPSH)*(NYH^(1/EPSQMH)*QH^(1-1/EPSQMH)+(1-NYH)^(1/EPSQMH)*(MH*BIGGAMIMPH)^(1-1/EPSQMH))^(EPSQMH/(EPSQMH-1)*(1-1/EPSH))+(1-GAMMAH)^(1/EPSH)*NNH^(1-1/EPSH) ;
+          QH = GAMMAH*NYH*REALPQH^(-EPSQMH)*AH*REALPXH^(EPSQMH-EPSH) ;
+          MH = GAMMAH*(1-NYH)*REALPMH^(-EPSQMH)*AH*REALPXH^(EPSQMH-EPSH)*1/BIGGAMIMPH*(BIGGAMIMPH-COSTH*(MH/AH/(MH(-1)/AH(-1))-1)*MH/AH/(MH(-1)/AH(-1)))^EPSQMH ;
+          REALPXH = (NYH*REALPQH^(1-EPSQMH)+(1-NYH)*REALPMH^(1-EPSQMH)*(BIGGAMIMPH-COSTH*(MH/AH/(MH(-1)/AH(-1))-1)*MH/AH/(MH(-1)/AH(-1)))^(EPSQMH-1))^(1/(1-EPSQMH)) ;
+          BIGGAMIMPH = 1-COSTH/2*(MH/AH/(MH(-1)/AH(-1))-1)^2 ;
+          NNH = (1-GAMMAH)*REALPNH^(-EPSH)*AH ;
+          NH = NNH+ETAH*MH+ETAH*QH+GNH ;
+          PIENH = REALPNH/REALPNH(-1)*PIEH ;
+          BIGGAMNH = LILCAPPA3H/2*(PIENH/PIE4TARH^0.25-1)^2+LILCAPPA4H/2*(PIENH/PIENH(-1)-1)^2 ;
+          -(1-BIGGAMNH)*(REALPNH*(1-THETAH)+THETAH*REALMCNH) = -(REALPNH-REALMCNH)*(LILCAPPA3H*PIENH/PIE4TARH^0.25*(PIENH/PIE4TARH^0.25-1)+LILCAPPA4H*PIENH/PIENH(-1)*(PIENH/PIENH(-1)-1))+DEEH*PIEH(+1)*(REALPNH(+1)-REALMCNH(+1))*NH(+1)/NH*(LILCAPPA3H*PIENH(+1)/PIE4TARH^0.25*(PIENH(+1)/PIE4TARH^0.25-1)+LILCAPPA4H*PIENH(+1)/PIENH*(PIENH(+1)/PIENH-1)) ;
+          PIEBARQH = PIEH*REALPBARQH/REALPBARQH(-1) ;
+          BIGGAMQH = LILCAPPA5H/2*(PIEBARQH/PIE4TARH^0.25-1)^2+LILCAPPA6H/2*(PIEBARQH/PIEBARQH(-1)-1)^2 ;
+          -(1-BIGGAMQH)*REALPBARQH/REALPQH*(REALPBARQH*(1-THETAH)+ETAH*REALPNH+THETAH*REALMCTH) = -(REALPBARQH-REALMCTH)*(LILCAPPA5H*PIEBARQH/PIE4TARH^0.25*(PIEBARQH/PIE4TARH^0.25-1)+LILCAPPA6H*PIEBARQH/PIEBARQH(-1)*(PIEBARQH/PIEBARQH(-1)-1))+DEEH*PIEH(+1)*(REALPBARQH(+1)-REALMCTH(+1))*QH(+1)/QH*(LILCAPPA5H*PIEBARQH(+1)/PIE4TARH^0.25*(PIEBARQH(+1)/PIE4TARH^0.25-1)+LILCAPPA6H*PIEBARQH(+1)/PIEBARQH*(PIEBARQH(+1)/PIEBARQH-1)) ;
+          REALPQH = REALPBARQH+ETAH*REALPNH ;
+          KH = KH(-1)*(1-DELTAH)+PSIH(-1)*KH(-1) ;
+          PSIH = EYEH/KH-OMEGAH/2*(EYEH/KH-DELTAH*(1+ZEYEH))^2-OMEGA0H/2*(EYEH/KH-EYEH(-1)/KH(-1))^2 ;
+          PSIPRIMEH = 1-OMEGAH*(EYEH/KH-DELTAH*(1+ZEYEH))-OMEGA0H*(EYEH/KH-EYEH(-1)/KH(-1)) ;
+          1/PSIPRIMEH = DEEH*PIEH(+1)*(REALRH(+1)+1/PSIPRIMEH(+1)*(1-DELTAH+PSIH(+1)*(1-PSIPRIMEH(+1)*EYEH(+1)/(PSIH(+1)*KH(+1))))) ;
+          BIGGAMH = LILCAPPA1H/2*(PIEWH/PIE4TARH^0.25-1)^2+LILCAPPA2H/2*(PIEWH/PIEWH(-1)-1)^2 ;
+          PIEH*REALWH/REALWH(-1) = PIEWH ;
+          REALWH = PHIH*VPRIMEH/MARGUTH*((PHIH-1)*(1-BIGGAMH)+PIEWH*LILCAPPA1H/PIE4TARH^0.25*(PIEWH/PIE4TARH^0.25-1)+PIEWH/PIEWH(-1)*LILCAPPA2H*(PIEWH/PIEWH(-1)-1)-DEEH*PIEWH(+1)*LH(+1)/LH*LILCAPPA1H*PIEWH(+1)/PIE4TARH^0.25*(PIEWH(+1)/PIE4TARH^0.25-1)-DEEH*PIEWH(+1)*LH(+1)/LH*LILCAPPA2H*PIEWH(+1)/(REALWH/REALWH(-1))*(PIEWH(+1)/PIEWH-1))^(-1) ;
+          DEEH = BET*MARGUTH(+1)/PIEH(+1)/MARGUTH*(1+SHOPH+SHOPPRIMEH*VELOH)/(1+SHOPH(+1)+SHOPPRIMEH(+1)*VELOH(+1)) ;
+          SHOPH = A_ZEROH*VELOH+A_ONEH/VELOH-2*(A_ZEROH*A_ONEH)^0.5 ;
+          SHOPPRIMEH = A_ZEROH-A_ONEH*VELOH^(-2) ;
+          VELOH = CH/REALMONEYH ;
+          DEEH = 1-SHOPPRIMEH*VELOH^2 ;
+          1 = RNOMH*DEEH ;
+/*
+          100*(RNOMH^4-1) = (1-XDUMH)*100*(RNOM_EXOGH^4-1)+XDUMH*(XR3H*100*(RNOMH(-1)^4-1)+(1-XR3H)*(100*((1/BET*PIE4H^0.25)^4-1))+XR1H*(100*(PIE4H-1)-100*(PIE4TARH-1))+XR4H*100*(DEPEX^4-1)+XR2H*GDPGAPH) ;
+*/
+ 100*(RNOMH^4-1) = 100*(RNOMH(-1)^4-1)+1000*100*(DEPEX^4-1);
+         GDPGAPH = 100*(GDPH_NAT-GDPH_EXOG)/GDPH_EXOG ;
+          PIE4H = PIEH*PIEH(-1)*PIEH_1(-1)*PIEH_2(-1) ;
+          AH = CH*(1+SHOPH)+EYEH+GAH ;
+          GAH = .05*(GA_RATH*(GLAMBDAH*GDPH_NAT+(1-GLAMBDAH)*GDPH_EXOG))+.95*GAH(-1)+E_GAH;
+          GNH = GN_RATH*(GLAMBDAH*GDPH_NAT+(1-GLAMBDAH)*GDPH_EXOG)/REALPNH_EXOG ;
+          PIEBARMH = PIEH*REALPBARMH/REALPBARMH(-1) ;
+          BIGGAMMH = LILCAPPA7H/2*(PIEBARMH/PIE4TARH^0.25-1)^2+LILCAPPA8H/2*(PIEBARMH/PIEBARMH(-1)-1)^2 ;
+          REALPMH = REALPBARMH+ETAH*REALPNH ;
+          KNH_RAT = ALPHANH/(1-ALPHANH-GAMA_NH)*(REALWH/REALRH)^XIXI_NH ;
+          KTH_RAT = ALPHATH/(1-ALPHATH-GAMA_TH)*(REALWH/REALRH)^XIXI_TH ;
+          KNH_RAT = KNH/LNH ;
+          KTH_RAT = KTH/LTH ;
+          KH = KTH+KNH+K_OH ;
+          LH = (LNH+LTH+L_OH)*(1-COSTLH/2*(LNH/(LTH+L_OH)/(LNH(-1)/(LTH(-1)+L_OH(-1)))-1)^2) ;
+          T_OH = Z_OH*((1-ALPHA_OH-GAMMA_LANDH)^(1/XIXI_OH)*L_OH^(1-1/XIXI_OH)+ALPHA_OH^(1/XIXI_OH)*K_OH^(1-1/XIXI_OH)+GAMMA_LANDH^(1/XIXI_OH)*LANDH^(1-1/XIXI_OH))^(XIXI_OH/(XIXI_OH-1)) ;
+          Q_ONH = NY_NH*(REALP_QOH/REALP_ONH)^(-EPS_ONH)*O_NH ;
+          Q_OTH = NY_TH*(REALP_QOH/REALP_OTH)^(-EPS_OTH)*O_TH ;
+          M_ONH = (1-NY_NH)*(REALP_MOH/REALP_ONH)^(-EPS_ONH)*O_NH*(BIGGAM_MONH-COST_MONH*(M_ONH/O_NH/(M_ONH(-1)/O_NH(-1))-1)*M_ONH/O_NH/(M_ONH(-1)/O_NH(-1)))^EPS_ONH/BIGGAM_MONH ;
+          M_OTH = (1-NY_TH)*(REALP_MOH/REALP_OTH)^(-EPS_OTH)*O_TH*(BIGGAM_MOTH-COST_MOTH*(M_OTH/O_TH/(M_OTH(-1)/O_TH(-1))-1)*M_OTH/O_TH/(M_OTH(-1)/O_TH(-1)))^EPS_OTH/BIGGAM_MOTH ;
+          BIGGAM_MONH = 1-COST_MONH/2*(M_ONH/O_NH/(M_ONH(-1)/O_NH(-1))-1)^2 ;
+          BIGGAM_MOTH = 1-COST_MOTH/2*(M_OTH/O_TH/(M_OTH(-1)/O_TH(-1))-1)^2 ;
+          K_OH_RAT = ALPHA_OH/(1-ALPHA_OH-GAMMA_LANDH)*(REALWH/REALRH)^XIXI_OH ;
+          K_OH_RAT = K_OH/L_OH ;
+          REALP_QOH = 1/Z_OH*((1-ALPHA_OH-GAMMA_LANDH)*REALWH^(1-XIXI_OH)+ALPHA_OH*REALRH^(1-XIXI_OH)+GAMMA_LANDH*REALPLANDH^(1-XIXI_OH))^(1/(1-XIXI_OH)) ;
+          LANDH = GAMMA_LANDH*(REALPLANDH/(REALP_QOH*Z_OH))^(-XIXI_OH)*T_OH/Z_OH ;
+          REALP_ONH = (NY_NH*REALP_QOH^(1-EPS_ONH)+(1-NY_NH)*REALP_MOH^(1-EPS_ONH)*(BIGGAM_MONH-COST_MONH*(M_ONH/O_NH/(M_ONH(-1)/O_NH(-1))-1)*M_ONH/O_NH/(M_ONH(-1)/O_NH(-1)))^(EPS_ONH-1))^(1/(1-EPS_ONH)) ;
+          REALP_OTH = (NY_TH*REALP_QOH^(1-EPS_OTH)+(1-NY_TH)*REALP_MOH^(1-EPS_OTH)*(BIGGAM_MOTH-COST_MOTH*(M_OTH/O_TH/(M_OTH(-1)/O_TH(-1))-1)*M_OTH/O_TH/(M_OTH(-1)/O_TH(-1)))^(EPS_OTH-1))^(1/(1-EPS_OTH)) ;
+          SSH*TH = SSH*QH+SSF*MF ;
+          SSH*T_OH = SSH*Q_ONH+SSH*Q_OTH+SSF*M_ONF+SSF*M_OTF ;
+          REALP_MOH = REALP_QOF*REALEXH ;
+          ZZ_GDP_PPP_RATH = GDPH/REALEX/GDPF ;
+          XI = CHI0*(exp(CHI1*REALEX*REALBH)+CHI2*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH)^2+CHI3*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH-REALEX(-1)*(REALBH(-1)-REALBH_1(-1)/PIEF(-1))/GDPH(-1))^2-1)/(exp(CHI1*REALEX*REALBH)+CHI2*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH)^2+CHI3*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH-REALEX(-1)*(REALBH(-1)-REALBH_1(-1)/PIEF(-1))/GDPH(-1))^2+1)+ZBH ;
+          1 = RNOMF*(1-XI)*DEEH*DEPEX(+1) ;
+          DEPEX = PIEH/PIEF*REALEX/REALEX(-1) ;
+          REALFINH = RNOMF(-1)*(1-XI(-1))*REALEX*REALBH(-1)/PIEF ;
+          SSH*DEEH*PIEH(+1)*REALFINH(+1) = SSH*REALFINH+SSH*RNOMF(-1)*XI(-1)*REALEX*REALBH(-1)/PIEF+REALTBALH ;
+          REALEXH = REALEX ;
+          REALEXF = 1/REALEXH ;
+          ZZ_REALEX = 100*log(REALEX) ;
+          -(1-BIGGAMMH)*REALPBARMH/REALPMH*(REALPBARMH/REALEX*(1-THETAF)+ETAH*REALPNH/REALEX+THETAF*REALMCTF) = -(REALPBARMH/REALEX-REALMCTF)*(LILCAPPA7H*PIEBARMH/PIE4TARH^0.25*(PIEBARMH/PIE4TARH^0.25-1)+LILCAPPA8H*PIEBARMH/PIEBARMH(-1)*(PIEBARMH/PIEBARMH(-1)-1))+DEEF*PIEF(+1)*(REALPBARMH(+1)/REALEX(+1)-REALMCTF(+1))*MH(+1)/MH*(LILCAPPA7H*PIEBARMH(+1)/PIE4TARH^0.25*(PIEBARMH(+1)/PIE4TARH^0.25-1)+LILCAPPA8H*PIEBARMH(+1)/PIEBARMH*(PIEBARMH(+1)/PIEBARMH-1)) ;
+          GDPH = AH+REALPNH*GNH+EXPORTSH-IMPORTSH+(RNOMF(-1)-1)*REALEX*REALBH(-1)/PIEF ;
+          GDPH_NAT = AH+REALPNH_EXOG*GNH+EXPORTSH_NAT-IMPORTSH_NAT ;
+          CH_NAT = CH*(1+SHOPH) ;
+          GH_NAT = GAH+REALPNH_EXOG*GNH ;
+          XH_NAT = SSF/SSH*REALEX_EXOG*REALPBARMF_EXOG*MF ;
+          MH_NAT = REALPBARMH_EXOG*MH ;
+          CURBALH_RAT = REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH ;
+          REALTBALH = SSF*(REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF)*REALEX-SSH*(REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH) ;
+          EXPORTSH = SSF/SSH*(REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF)*REALEX ;
+          IMPORTSH = REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH ;
+          EXPORTSH_NAT = SSF/SSH*(REALPBARMF_EXOG*MF+REALP_MOF_EXOG*M_ONF+REALP_MOF_EXOG*M_OTF)*REALEX_EXOG ;
+          IMPORTSH_NAT = REALPBARMH_EXOG*MH+REALP_MOH_EXOG*M_ONH+REALP_MOH_EXOG*M_OTH ;
+          ZZ_UTILITYF = (ZUF*(CF-HF)^(1-SIGMAF)-1)/(1-SIGMAF)-CAPAF*LF^(1+ZEDF)/(1+ZEDF) ;
+          ZZ_GF = 100*log(GF_NAT) ;
+          ZZ_CURBALF_RAT = CURBALF_RAT*100 ;
+          ZZ_M_SHARF = REALPBARMF*MF/AF ;
+          ZZ_M_O_SHARF = (REALP_MOF*M_ONF+REALP_MOF*M_OTF)/AF ;
+          ZZ_M_ON_SHARF = REALP_MOF*M_ONF/AF ;
+          ZZ_M_OT_SHARF = REALP_MOF*M_OTF/AF ;
+          ZZ_N_SHARF = NF*REALPNF/AF ;
+          ZZ_EYE_SHARF = EYEF/GDPF ;
+          ZZ_C_SHARF = CF/GDPF ;
+          ZZ_GDPF = 100*log(GDPF_NAT) ;
+          ZZ_CF = 100*log(CF_NAT) ;
+          ZZ_EYEF = 100*log(EYEF) ;
+          ZZ_EXPORTSF = 100*log(EXPORTSF_NAT) ;
+          ZZ_IMPORTSF = 100*log(IMPORTSF_NAT) ;
+          ZZ_XBALF_TOT_RAT = 100*(EXPORTSF_NAT-IMPORTSF_NAT)/GDPF_NAT ;
+          ZZ_PIE4F = 100*(PIE4F-1) ;
+          ZZ_DPIE4F = ZZ_PIE4F-ZZ_PIE4F(-1) ;
+          ZZ_RNOMF = 100*(RNOMF^4-1) ;
+          ZZ_DRNOMF = ZZ_RNOMF-ZZ_RNOMF(-1) ;
+          100*(PIE4TARF-1) = 1*100*(PIE4TARF_SS-1)+(1-1)*100*(PIE4TARF(-1)-1)+E_PIE4TARF ;
+          log(ZUF) = 0.3*log(ZUF_SS)+0.7*log(ZUF(-1))+E_ZUF ;
+          ZBF = 0.3*ZBF_SS+0.7*ZBF(-1)+E_ZBF ;
+          log(LANDF) = 0.05*log(LANDF_SS)+0.95*log(LANDF(-1))+E_LANDF ;
+          log(ZTF) = 0.05*log(ZTF_SS)+0.95*log(ZTF(-1))+E_LANDF ;
+          log(ZNF) = 0.05*log(ZNF_SS)+0.95*log(ZNF(-1))+E_LANDF ;
+          log(Z_OF) = 0.05*log(Z_OF_SS)+0.95*log(Z_OF(-1))+E_LANDF ;
+          ZEYEF = 0.05*ZEYEF_SS+0.95*ZEYEF(-1)+E_ZEYEF ;
+          CAPAF = 0.05*CAPAF_SS+0.95*CAPAF(-1)+E_CAPAF ;
+          log(GAMMAF) = 0.05*log(GAMMAF_SS)+0.95*log(GAMMAF(-1))+E_GAMMAF ;
+          BIGGAM_O_NF = 1-COST_O_NF/2*(O_NF/NF/(O_NF(-1)/NF(-1))-1)^2 ;
+          BIGGAM_O_TF = 1-COST_O_TF/2*(O_TF/TF/(O_TF(-1)/TF(-1))-1)^2 ;
+          O_NF = GAMA_NF*NF/ZNF*(REALP_ONF/(REALMCNF*ZNF))^(-XIXI_NF)*(BIGGAM_O_NF-COST_O_NF*(O_NF/NF/(O_NF(-1)/NF(-1))-1)*O_NF/NF/(O_NF(-1)/NF(-1)))^XIXI_NF/BIGGAM_O_NF ;
+          O_TF = GAMA_TF*TF/ZTF*(REALP_OTF/(REALMCTF*ZTF))^(-XIXI_TF)*(BIGGAM_O_TF-COST_O_TF*(O_TF/TF/(O_TF(-1)/TF(-1))-1)*O_TF/TF/(O_TF(-1)/TF(-1)))^XIXI_NF/BIGGAM_O_TF ;
+          NF = ZNF*((1-ALPHANF-GAMA_NF)^(1/XIXI_NF)*LNF^(1-1/XIXI_NF)+ALPHANF^(1/XIXI_NF)*KNF^(1-1/XIXI_NF)+GAMA_NF^(1/XIXI_NF)*(BIGGAM_O_NF*O_NF)^(1-1/XIXI_NF))^(XIXI_NF/(XIXI_NF-1)) ;
+          TF = ZTF*((1-ALPHATF-GAMA_TF)^(1/XIXI_TF)*LTF^(1-1/XIXI_TF)+ALPHATF^(1/XIXI_TF)*KTF^(1-1/XIXI_TF)+GAMA_TF^(1/XIXI_TF)*(BIGGAM_O_TF*O_TF)^(1-1/XIXI_TF))^(XIXI_TF/(XIXI_TF-1)) ;
+          REALMCNF = 1/ZNF*((1-ALPHANF-GAMA_NF)*REALWF^(1-XIXI_NF)+ALPHANF*REALRF^(1-XIXI_NF)+GAMA_NF*REALP_ONF^(1-XIXI_NF)*(BIGGAM_O_NF-COST_O_NF*(O_NF/NF/(O_NF(-1)/NF(-1))-1)*O_NF/NF/(O_NF(-1)/NF(-1)))^(XIXI_NF-1))^(1/(1-XIXI_NF)) ;
+          REALMCTF = 1/ZTF*((1-ALPHATF-GAMA_TF)*REALWF^(1-XIXI_TF)+ALPHATF*REALRF^(1-XIXI_TF)+GAMA_TF*REALP_OTF^(1-XIXI_TF)*(BIGGAM_O_TF-COST_O_TF*(O_TF/TF/(O_TF(-1)/TF(-1))-1)*O_TF/TF/(O_TF(-1)/TF(-1)))^(XIXI_TF-1))^(1/(1-XIXI_TF)) ;
+          MARGUTF = (CF-B2F*HF)^(-SIGMAF)*ZUF ;
+          HF = (1-B0F)*HF(-1)+B0F*B1F*CF(-1) ;
+          VPRIMEF = CAPAF*LF^ZEDF ;
+          AF^(1-1/EPSF) = GAMMAF^(1/EPSF)*(NYF^(1/EPSQMF)*QF^(1-1/EPSQMF)+(1-NYF)^(1/EPSQMF)*(MF*BIGGAMIMPF)^(1-1/EPSQMF))^(EPSQMF/(EPSQMF-1)*(1-1/EPSF))+(1-GAMMAF)^(1/EPSF)*NNF^(1-1/EPSF) ;
+          QF = GAMMAF*NYF*REALPQF^(-EPSQMF)*AF*REALPXF^(EPSQMF-EPSF) ;
+          MF = GAMMAF*(1-NYF)*REALPMF^(-EPSQMF)*AF*REALPXF^(EPSQMF-EPSF)*1/BIGGAMIMPF*(BIGGAMIMPF-COSTF*(MF/AF/(MF(-1)/AF(-1))-1)*MF/AF/(MF(-1)/AF(-1)))^EPSQMF ;
+          REALPXF = (NYF*REALPQF^(1-EPSQMF)+(1-NYF)*REALPMF^(1-EPSQMF)*(BIGGAMIMPF-COSTF*(MF/AF/(MF(-1)/AF(-1))-1)*MF/AF/(MF(-1)/AF(-1)))^(EPSQMF-1))^(1/(1-EPSQMF)) ;
+          BIGGAMIMPF = 1-COSTF/2*(MF/AF/(MF(-1)/AF(-1))-1)^2 ;
+          NNF = (1-GAMMAF)*REALPNF^(-EPSF)*AF ;
+          NF = NNF+ETAF*MF+ETAF*QF+GNF ;
+          PIENF = REALPNF/REALPNF(-1)*PIEF ;
+          BIGGAMNF = LILCAPPA3F/2*(PIENF/PIE4TARF^0.25-1)^2+LILCAPPA4F/2*(PIENF/PIENF(-1)-1)^2 ;
+          -(1-BIGGAMNF)*(REALPNF*(1-THETAF)+THETAF*REALMCNF) = -(REALPNF-REALMCNF)*(LILCAPPA3F*PIENF/PIE4TARF^0.25*(PIENF/PIE4TARF^0.25-1)+LILCAPPA4F*PIENF/PIENF(-1)*(PIENF/PIENF(-1)-1))+DEEF*PIEF(+1)*(REALPNF(+1)-REALMCNF(+1))*NF(+1)/NF*(LILCAPPA3F*PIENF(+1)/PIE4TARF^0.25*(PIENF(+1)/PIE4TARF^0.25-1)+LILCAPPA4F*PIENF(+1)/PIENF*(PIENF(+1)/PIENF-1)) ;
+          PIEBARQF = PIEF*REALPBARQF/REALPBARQF(-1) ;
+          BIGGAMQF = LILCAPPA5F/2*(PIEBARQF/PIE4TARF^0.25-1)^2+LILCAPPA6F/2*(PIEBARQF/PIEBARQF(-1)-1)^2 ;
+          -(1-BIGGAMQF)*REALPBARQF/REALPQF*(REALPBARQF*(1-THETAF)+ETAF*REALPNF+THETAF*REALMCTF) = -(REALPBARQF-REALMCTF)*(LILCAPPA5F*PIEBARQF/PIE4TARF^0.25*(PIEBARQF/PIE4TARF^0.25-1)+LILCAPPA6F*PIEBARQF/PIEBARQF(-1)*(PIEBARQF/PIEBARQF(-1)-1))+DEEF*PIEF(+1)*(REALPBARQF(+1)-REALMCTF(+1))*QF(+1)/QF*(LILCAPPA5F*PIEBARQF(+1)/PIE4TARF^0.25*(PIEBARQF(+1)/PIE4TARF^0.25-1)+LILCAPPA6F*PIEBARQF(+1)/PIEBARQF*(PIEBARQF(+1)/PIEBARQF-1)) ;
+          REALPQF = REALPBARQF+ETAF*REALPNF ;
+          KF = KF(-1)*(1-DELTAF)+PSIF(-1)*KF(-1) ;
+          PSIF = EYEF/KF-OMEGAF/2*(EYEF/KF-DELTAF*(1+ZEYEF))^2-OMEGA0F/2*(EYEF/KF-EYEF(-1)/KF(-1))^2 ;
+          PSIPRIMEF = 1-OMEGAF*(EYEF/KF-DELTAF*(1+ZEYEF))-OMEGA0F*(EYEF/KF-EYEF(-1)/KF(-1)) ;
+          1/PSIPRIMEF = DEEF*PIEF(+1)*(REALRF(+1)+1/PSIPRIMEF(+1)*(1-DELTAF+PSIF(+1)*(1-PSIPRIMEF(+1)*EYEF(+1)/(PSIF(+1)*KF(+1))))) ;
+          BIGGAMF = LILCAPPA1F/2*(PIEWF/PIE4TARF^0.25-1)^2+LILCAPPA2F/2*(PIEWF/PIEWF(-1)-1)^2 ;
+          PIEF*REALWF/REALWF(-1) = PIEWF ;
+          REALWF = PHIF*VPRIMEF/MARGUTF*((PHIF-1)*(1-BIGGAMF)+PIEWF*LILCAPPA1F/PIE4TARF^0.25*(PIEWF/PIE4TARF^0.25-1)+PIEWF/PIEWF(-1)*LILCAPPA2F*(PIEWF/PIEWF(-1)-1)-DEEF*PIEWF(+1)*LF(+1)/LF*LILCAPPA1F*PIEWF(+1)/PIE4TARF^0.25*(PIEWF(+1)/PIE4TARF^0.25-1)-DEEF*PIEWF(+1)*LF(+1)/LF*LILCAPPA2F*PIEWF(+1)/(REALWF/REALWF(-1))*(PIEWF(+1)/PIEWF-1))^(-1) ;
+          DEEF = BET*MARGUTF(+1)/PIEF(+1)/MARGUTF*(1+SHOPF+SHOPPRIMEF*VELOF)/(1+SHOPF(+1)+SHOPPRIMEF(+1)*VELOF(+1)) ;
+          SHOPF = A_ZEROF*VELOF+A_ONEF/VELOF-2*(A_ZEROF*A_ONEF)^0.5 ;
+          SHOPPRIMEF = A_ZEROF-A_ONEF*VELOF^(-2) ;
+          VELOF = CF/REALMONEYF ;
+          DEEF = 1-SHOPPRIMEF*VELOF^2 ;
+          1 = RNOMF*DEEF ;
+         
+  100*(RNOMF^4-1) = (1-XDUMF)*100*(RNOM_EXOGF^4-1)
+  +XDUMF*(XR3F*100*(RNOMF(-1)^4-1)+
+  (1-XR3F)*(100*((1/BET*PIE4EU^0.25)^4-1))
+  +XR1F*(100*(PIE4EU-1)-100*(PIE4TARF-1))+XR4F*100*(DEPEX^4-1)+XR2F*GDPGAPEU) ;
+                
+          
+          
+          GDPGAPF = 100*(GDPF_NAT-GDPF_EXOG)/GDPF_EXOG ;
+          PIE4F = PIEF*PIEF(-1)*PIEF_1(-1)*PIEF_2(-1) ;
+          AF = CF*(1+SHOPF)+EYEF+GAF ;
+          GAF = .05*(GA_RATF*(GLAMBDAF*GDPF_NAT+(1-GLAMBDAF)*GDPF_EXOG))+.95*GAF(-1)+E_GAF; 
+          GNF = GN_RATF*(GLAMBDAF*GDPF_NAT+(1-GLAMBDAF)*GDPF_EXOG)/REALPNF_EXOG ;
+          PIEBARMF = PIEF*REALPBARMF/REALPBARMF(-1) ;
+          BIGGAMMF = LILCAPPA7F/2*(PIEBARMF/PIE4TARF^0.25-1)^2+LILCAPPA8F/2*(PIEBARMF/PIEBARMF(-1)-1)^2 ;
+          REALPMF = REALPBARMF+ETAF*REALPNF ;
+          KNF_RAT = ALPHANF/(1-ALPHANF-GAMA_NF)*(REALWF/REALRF)^XIXI_NF ;
+          KTF_RAT = ALPHATF/(1-ALPHATF-GAMA_TF)*(REALWF/REALRF)^XIXI_TF ;
+          KNF_RAT = KNF/LNF ;
+          KTF_RAT = KTF/LTF ;
+          KF = KTF+KNF+K_OF ;
+          LF = (LNF+LTF+L_OF)*(1-COSTLF/2*(LNF/(LTF+L_OF)/(LNF(-1)/(LTF(-1)+L_OF(-1)))-1)^2) ;
+          T_OF = Z_OF*((1-ALPHA_OF-GAMMA_LANDF)^(1/XIXI_OF)*L_OF^(1-1/XIXI_OF)+ALPHA_OF^(1/XIXI_OF)*K_OF^(1-1/XIXI_OF)+GAMMA_LANDF^(1/XIXI_OF)*LANDF^(1-1/XIXI_OF))^(XIXI_OF/(XIXI_OF-1)) ;
+          Q_ONF = NY_NF*(REALP_QOF/REALP_ONF)^(-EPS_ONF)*O_NF ;
+          Q_OTF = NY_TF*(REALP_QOF/REALP_OTF)^(-EPS_OTF)*O_TF ;
+          M_ONF = (1-NY_NF)*(REALP_MOF/REALP_ONF)^(-EPS_ONF)*O_NF*(BIGGAM_MONF-COST_MONF*(M_ONF/O_NF/(M_ONF(-1)/O_NF(-1))-1)*M_ONF/O_NF/(M_ONF(-1)/O_NF(-1)))^EPS_ONF/BIGGAM_MONF ;
+          M_OTF = (1-NY_TF)*(REALP_MOF/REALP_OTF)^(-EPS_OTF)*O_TF*(BIGGAM_MOTF-COST_MOTF*(M_OTF/O_TF/(M_OTF(-1)/O_TF(-1))-1)*M_OTF/O_TF/(M_OTF(-1)/O_TF(-1)))^EPS_OTF/BIGGAM_MOTF ;
+          BIGGAM_MONF = 1-COST_MONF/2*(M_ONF/O_NF/(M_ONF(-1)/O_NF(-1))-1)^2 ;
+          BIGGAM_MOTF = 1-COST_MOTF/2*(M_OTF/O_TF/(M_OTF(-1)/O_TF(-1))-1)^2 ;
+          K_OF_RAT = ALPHA_OF/(1-ALPHA_OF-GAMMA_LANDF)*(REALWF/REALRF)^XIXI_OF ;
+          K_OF_RAT = K_OF/L_OF ;
+          REALP_QOF = 1/Z_OF*((1-ALPHA_OF-GAMMA_LANDF)*REALWF^(1-XIXI_OF)+ALPHA_OF*REALRF^(1-XIXI_OF)+GAMMA_LANDF*REALPLANDF^(1-XIXI_OF))^(1/(1-XIXI_OF)) ;
+          LANDF = GAMMA_LANDF*(REALPLANDF/(REALP_QOF*Z_OF))^(-XIXI_OF)*T_OF/Z_OF ;
+          REALP_ONF = (NY_NF*REALP_QOF^(1-EPS_ONF)+(1-NY_NF)*REALP_MOF^(1-EPS_ONF)*(BIGGAM_MONF-COST_MONF*(M_ONF/O_NF/(M_ONF(-1)/O_NF(-1))-1)*M_ONF/O_NF/(M_ONF(-1)/O_NF(-1)))^(EPS_ONF-1))^(1/(1-EPS_ONF)) ;
+          REALP_OTF = (NY_TF*REALP_QOF^(1-EPS_OTF)+(1-NY_TF)*REALP_MOF^(1-EPS_OTF)*(BIGGAM_MOTF-COST_MOTF*(M_OTF/O_TF/(M_OTF(-1)/O_TF(-1))-1)*M_OTF/O_TF/(M_OTF(-1)/O_TF(-1)))^(EPS_OTF-1))^(1/(1-EPS_OTF)) ;
+          SSF*TF = SSF*QF+SSH*MH ;
+          SSF*T_OF = SSF*Q_ONF+SSF*Q_OTF+SSH*M_ONH+SSH*M_OTH ;
+          REALP_MOF = REALP_QOH*REALEXF ;
+          SSH*REALBH+SSF*REALBF = 0 ;
+          REALTBALF = SSF*(REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF)-SSH*(REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH)*1/REALEX ;
+          EXPORTSF = SSH/SSF*(REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH)*1/REALEX ;
+          IMPORTSF = REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF ;
+          EXPORTSF_NAT = SSH/SSF*(REALPBARMH_EXOG*MH+REALP_MOH_EXOG*M_ONH+REALP_MOH_EXOG*M_OTH)*1/REALEX_EXOG ;
+          IMPORTSF_NAT = REALPBARMF_EXOG*MF+REALP_MOF_EXOG*M_ONF+REALP_MOF_EXOG*M_OTF ;
+          -(1-BIGGAMMF)*REALPBARMF/REALPMF*(REALPBARMF*REALEX*(1-THETAH)+ETAF*REALPNF*REALEX+THETAH*REALMCTH) = -(REALPBARMF*REALEX-REALMCTH)*(LILCAPPA7F*PIEBARMF/PIE4TARF^0.25*(PIEBARMF/PIE4TARF^0.25-1)+LILCAPPA8F*PIEBARMF/PIEBARMF(-1)*(PIEBARMF/PIEBARMF(-1)-1))+DEEH*PIEH(+1)*(REALPBARMF(+1)*REALEX(+1)-REALMCTH(+1))*MF(+1)/MF*(LILCAPPA7F*PIEBARMF(+1)/PIE4TARF^0.25*(PIEBARMF(+1)/PIE4TARF^0.25-1)+LILCAPPA8F*PIEBARMF(+1)/PIEBARMF*(PIEBARMF(+1)/PIEBARMF-1)) ;
+          GDPF = AF+REALPNF*GNF+EXPORTSF-IMPORTSF+(RNOMF(-1)-1)*REALBF(-1)/PIEF ;
+          GDPF_NAT = AF+REALPNF_EXOG*GNF+EXPORTSF_NAT-IMPORTSF_NAT ;
+          CF_NAT = CF*(1+SHOPF) ;
+          GF_NAT = GAF+REALPNF_EXOG*GNF ;
+          XF_NAT = SSH/SSF*1/REALEX_EXOG*REALPBARMH_EXOG*MH ;
+          MF_NAT = REALPBARMF_EXOG*MF ;
+          CURBALF_RAT = -(REALTBALH/REALEX/SSF/GDPF)+(RNOMF(-1)-1)*REALBF(-1)/PIEF/GDPF ;
+PIEH_1 = PIEH(-1);
+PIEH_2 = PIEH_1(-1);
+PIEF_1 = PIEF(-1);
+PIEF_2 = PIEF_1(-1);
+REALBH_1 = REALBH(-1);
+
+VALUE = ZZ_UTILITYH + BET*VALUE(+1);
+VALUE2 = ZUH*CH^(1-SIGMAH)/(1-SIGMAH)-CAPAH*LH^(1+ZEDH)/(1+ZEDH) + BET*VALUE2(+1);
+end;
+ 
+initval; 
+AF=2.17350447531715;
+AH=2.61461230039988;
+BIGGAMF=0;
+BIGGAMH=0;
+BIGGAMIMPF=1;
+BIGGAMIMPH=1;
+BIGGAMMF=0;
+BIGGAMMH=0;
+BIGGAMNF=0;
+BIGGAMNH=0;
+BIGGAMQF=0;
+BIGGAMQH=0;
+BIGGAM_MONF=1;
+BIGGAM_MONH=1;
+BIGGAM_MOTF=1;
+BIGGAM_MOTH=1;
+BIGGAM_O_NF=1;
+BIGGAM_O_NH=1;
+BIGGAM_O_TF=1;
+BIGGAM_O_TH=1;
+CAPAF=11;
+CAPAH=11;
+CF=1.77599320017707;
+CF_NAT=1.77797456682707;
+CH=2.10139281352027;
+CH_NAT=2.10373720855446;
+CURBALF_RAT=2.20209042676066e-018;
+CURBALH_RAT=0;
+DEEF=0.963834712172592;
+DEEH=0.963834712172592;
+DEPEX=1;
+EXPORTSF=0.0374229290542059;
+EXPORTSF_NAT=0.0374229290542059;
+EXPORTSH=0.976573287861717;
+EXPORTSH_NAT=0.976573287861717;
+EYEF=0.27477965986135;
+EYEH=0.365618852934316;
+GAF=0.12075024862873;
+GAH=0.145256238911104;
+GAMMAF=0.5;
+GAMMAH=0.25;
+GDPF=2.41500497257461;
+GDPF_NAT=2.41500497257461;
+GDPGAPF=0;
+GDPGAPH=0;
+GDPH=2.90512477822209;
+GDPH_NAT=2.90512477822209;
+GF_NAT=0.362250745886191;
+GH_NAT=0.435768716733313;
+GNF=0.287269571519256;
+GNH=0.321902361090147;
+HF=1.68719354016822;
+HH=1.99632317284426;
+IMPORTSF=0.0374229290542059;
+IMPORTSF_NAT=0.0374229290542059;
+IMPORTSH=0.976573287861718;
+IMPORTSH_NAT=0.976573287861718;
+KF=10.991186394454;
+KH=14.6247541173726;
+KNF=6.33686501417153;
+KNF_RAT=22.6981730731029;
+KNH=11.034700665508;
+KNH_RAT=22.8755992006951;
+KTF=2.97137434524903;
+KTF_RAT=22.6981730731029;
+KTH=2.23720856941572;
+KTH_RAT=114.377996003476;
+K_OF=1.68294703503345;
+K_OF_RAT=7.27127622255245;
+K_OH=1.35284488244891;
+K_OH_RAT=8.16985685739111;
+LANDF=0.1;
+LANDH=0.1;
+LF=0.64153899810027;
+LH=0.667528221502678;
+LNF=0.279179517830034;
+LNH=0.482378650224502;
+LTF=0.130908083909629;
+LTH=0.019559781143112;
+L_OF=0.231451396360608;
+L_OH=0.165589790135064;
+MARGUTF=2.24145263303312;
+MARGUTH=2.11921125101343;
+MF=0.0196445696804563;
+MF_NAT=0.0171196449669319;
+MH=0.438784845846124;
+MH_NAT=0.522472906750236;
+M_ONF=0.0143006671963624;
+M_ONH=0.134410532365428;
+M_OTF=0.00670562423725087;
+M_OTH=0.143002828997546;
+NF=1.91582345366461;
+NH=2.609674642079;
+NNF=1.31534385473198;
+NNH=2.19524942542191;
+O_NF=0.387338325509274;
+O_NH=0.147043832240678;
+O_TF=0.18162406186278;
+O_TH=0.148205762233076;
+PIE4F=1.125;
+PIE4H=1.125;
+PIE4TARF=1.125;
+PIE4TARH=1.125;
+PIEBARMF=1.02988357195356;
+PIEBARMH=1.02988357195356;
+PIEBARQF=1.02988357195356;
+PIEBARQH=1.02988357195356;
+PIEF=1.02988357195356;
+PIEF_1=1.02988357195356;
+PIEF_2=1.02988357195356;
+PIEH=1.02988357195356;
+PIEH_1=1.02988357195356;
+PIEH_2=1.02988357195356;
+PIENF=1.02988357195356;
+PIENH=1.02988357195356;
+PIEWF=1.02988357195356;
+PIEWH=1.02988357195356;
+PSIF=0.025;
+PSIH=0.025;
+PSIPRIMEF=1;
+PSIPRIMEH=1;
+QF=0.875241222929181;
+QH=0.0238294319885835;
+Q_ONF=0.373740369418894;
+Q_ONH=0.0132636199615755;
+Q_OTF=0.175247940896905;
+Q_OTH=0.00547180886242481;
+REALBF=0;
+REALBH=0;
+REALBH_1=0;
+REALEX=1.3734519289908;
+REALEXF=0.728092464608345;
+REALEXH=1.3734519289908;
+REALFINH=0;
+REALMCNF=0.700562935771035;
+REALMCNH=0.752071934789911;
+REALMCTF=0.700562935771035;
+REALMCTH=0.930081384894704;
+REALMONEYF=0.558667031035572;
+REALMONEYH=0.661026677383566;
+REALPBARMF=0.87146958398196;
+REALPBARMH=1.19072687148694;
+REALPBARQF=0.899522809530009;
+REALPBARQH=1.15219711474356;
+REALPLANDF=0.554831427212494;
+REALPLANDH=0.414697221827051;
+REALPMF=1.16570601700579;
+REALPMH=1.37122413583652;
+REALPNF=0.840675522925242;
+REALPNH=0.902486321747893;
+REALPQF=1.19375924255384;
+REALPQH=1.33269437909314;
+REALPXF=1.19317131724075;
+REALPXH=1.36926881180313;
+REALP_MOF=0.966533486000563;
+REALP_MOH=1.63690883121281;
+REALP_ONF=1.18566549908199;
+REALP_ONH=1.61601524261254;
+REALP_OTF=1.18566549908199;
+REALP_OTH=1.62845456685201;
+REALP_QOF=1.1918209852569;
+REALP_QOH=1.32748728078168;
+REALRF=0.0324170717777328;
+REALRH=0.0324170717777329;
+REALTBALF=-6.93889390390723e-018;
+REALTBALH=-6.93889390390723e-018;
+REALWF=2.42667732699502;
+REALWH=2.83454771236558;
+RNOMF=1.03752229232945;
+RNOMH=1.03752229232945;
+SHOPF=0.00111563864647424;
+SHOPH=0.00111563864647424;
+SHOPPRIMEF=0.00357861859467432;
+SHOPPRIMEH=0.00357861859467432;
+TF=0.89833516218424;
+TH=0.397076255917254;
+T_OF=0.563589013545429;
+T_OH=0.417854966062653;
+VALUE=-2.621110285550203e+02;
+VALUE2=2.305114066037580e+02;
+VELOF=3.17898336847443;
+VELOH=3.17898336847443;
+VPRIMEF=3.62618818940983;
+VPRIMEH=4.00467026905301;
+XF_NAT=0.0200215045456245;
+XH_NAT=0.446747178665936;
+XI=0;
+ZBF=0;
+ZBH=0;
+ZEYEF=0;
+ZEYEH=0;
+ZNF=1;
+ZNH=1;
+ZTF=1;
+ZTH=0.6;
+ZUF=1;
+ZUH=1;
+ZZ_CF=57.5474832617676;
+ZZ_CH=74.3715386197541;
+ZZ_CURBALF_RAT=2.20209042676066e-016;
+ZZ_CURBALH_RAT=0;
+ZZ_C_SHARF=0.735399396831762;
+ZZ_C_SHARH=0.723339950584259;
+ZZ_DPIE4F=0;
+ZZ_DPIE4H=0;
+ZZ_DRNOMF=0;
+ZZ_DRNOMH=0;
+ZZ_EXPORTSF=-328.547168610049;
+ZZ_EXPORTSH=-2.37054799079326;
+ZZ_EYEF=-129.17857393452;
+ZZ_EYEH=-100.616387362469;
+ZZ_EYE_SHARF=0.113780163180538;
+ZZ_EYE_SHARH=0.12585306341233;
+ZZ_GDPF=88.1701346139521;
+ZZ_GDPH=106.647634229781;
+ZZ_GDP_PPP_RATH=0.875857186130553;
+ZZ_GF=-101.541863874636;
+ZZ_GH=-83.0643642588075;
+ZZ_IMPORTSF=-328.547168610049;
+ZZ_IMPORTSH=-2.37054799079323;
+ZZ_M_ON_SHARF=0.0063593490946998;
+ZZ_M_ON_SHARH=0.084149297164759;
+ZZ_M_OT_SHARF=0.00298191719568198;
+ZZ_M_OT_SHARH=0.0895286056899133;
+ZZ_M_O_SHARF=0.00934126629038178;
+ZZ_M_O_SHARH=0.173677902854672;
+ZZ_M_SHARF=0.00787651700806085;
+ZZ_M_SHARH=0.19982806118916;
+ZZ_N_SHARF=0.741008772713445;
+ZZ_N_SHARH=0.90078198910348;
+ZZ_PIE4F=12.5;
+ZZ_PIE4H=12.5;
+ZZ_REALEX=31.7327227026121;
+ZZ_RNOMF=15.8749999999999;
+ZZ_RNOMH=15.8749999999999;
+ZZ_UTILITYF=-1.86610854895021;
+ZZ_UTILITYH=-1.9297829736965;
+ZZ_XBALF_TOT_RAT=0;
+ZZ_XBALH_TOT_RAT=-7.6432037132987e-015;
+Z_OF=1;
+Z_OH=1;
+
+E_ZBH=0;
+
+E_ZUH=0;
+E_ZUF=0;
+
+E_ZEYEH=0;
+E_ZEYEF=0;
+
+E_GAMMAH=0;
+E_GAMMAF=0;
+
+E_LANDH=0;
+E_LANDF=0;
+
+E_GAH = 0;
+E_GAF = 0;
+
+E_CAPAH=0;
+E_CAPAF=0;
+
+ZZ_PIE4EU= 12.5;
+ZZ_RNOMEU=15.8749999999999;
+GDPGAPEU= 0;
+ZZ_DRNOMEU=0;
+PIE4EU = 1.125;
+end;    
+
+vcov = [
+/* E_ZBH 0.000289 */
+ 0.000289 0 0 0 0 0 0 0 0 0 0 0 0;
+/* E_ZUH */
+0 0.000016 0 0 0 0 0 0 0 0 0 0 0;
+/* E_ZUF */
+0 0 0.000001 0 0 0 0 0 0 0 0 0 0;
+/* E_ZEYEH */
+0 0 0 0.0049 0 0 0 0 0 0 0 0 0;
+/* E_ZEYEF */
+0 0 0 0 0.000025 0 0 0 0 0 0 0 0;
+/* E_GAMMAH */
+0 0 0 0 0 0.0004 0 0 0 0 0 0 0;
+/* E_GAMMAF */
+0 0 0 0 0 0 0.000324 0 0 0 0 0 0;
+/* E_LANDH */
+0 0 0 0 0 0 0 0.000004 0 0 0 0 0;
+/* E_LANDF */
+0 0 0 0 0 0 0 0 0.00000001 0 0 0 0;
+/* E_GAH */ 
+0 0 0 0 0 0 0 0 0 0.00001225 0 0 0;
+/* E_GAF */
+0 0 0 0 0 0 0 0 0 0 0.0000005625 0 0;
+/* E_CAPAH */
+0 0 0 0 0 0 0 0 0 0 0 0.1 0;
+/* E_CAPAF */
+0 0 0 0 0 0 0 0 0 0 0 0 0.0001
+];
+
+order = 2;
\ No newline at end of file
diff --git a/dynare++/tests/dm7.mod b/dynare++/tests/dm7.mod
new file mode 100644
index 0000000000000000000000000000000000000000..0a31faf4b466af0f07cca1cbaefe6759f46cc103
--- /dev/null
+++ b/dynare++/tests/dm7.mod
@@ -0,0 +1,40 @@
+var C K r w N tau I;
+varexo e;
+
+parameters alph bet delt thet tau_m rho;
+alph = 0.3;
+bet = 0.96;
+thet = 0.3;
+delt = 0.05;
+tau_m = 0.35;
+rho = 0.8;
+
+model;
+C = C(+1)/(bet*(r(+1)+1-delt));
+I = K(-1)^alph*N^(1-alph)-C;
+K = I+(1-delt)*K(-1);
+N = 1-(1-thet)*C/(thet*w);
+r = (1-tau)*alph*(K(-1)/N)^(alph-1);
+w = (1-tau)*(1-alph)*(K(-1)/N)^alph;
+tau = (1-rho)*tau_m + rho*tau(-1)+e;
+end;
+
+initval;
+C=0.2;
+I=0.02;
+K=0.5;
+N=0.18;
+r=0.09;
+w=0.6;
+tau=0.35;
+e=0;
+end;
+
+vcov = [0.007208];
+
+order=7;
+
+
+
+
+
diff --git a/dynare++/tests/example1.mod b/dynare++/tests/example1.mod
new file mode 100644
index 0000000000000000000000000000000000000000..ca528c0d9a6aaa7652fcb3fb9dc845c00926374f
--- /dev/null
+++ b/dynare++/tests/example1.mod
@@ -0,0 +1,41 @@
+// this is an example from the tutorial
+
+var Y, C, K, A, H, B;
+
+varexo EPS, NU;
+
+parameters rho, beta, alpha, delta, theta, psi, tau;
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 1/(1.03^0.25);
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+
+
+model;
+C*theta*H^(1+psi) = (1-alpha)*Y;
+beta*exp(B)*C/exp(B(1))/C(1)*
+  (exp(B(1))*alpha*Y(1)/K(1)+1-delta) = 1;
+Y = exp(A)*K^alpha*H^(1-alpha);
+K = exp(B(-1))*(Y(-1)-C(-1)) + (1-delta)*K(-1);
+A = rho*A(-1) + tau*B(-1) + EPS;
+B = tau*A(-1) + rho*B(-1) + NU;
+end;
+
+initval;
+A = 0;
+B = 0;
+H = ((1-alpha)/(theta*(1-(delta*alpha)/(1/beta-1+delta))))^(1/(1+psi));
+Y = (alpha/(1/beta-1+delta))^(alpha/(1-alpha))*H;
+K = alpha/(1/beta-1+delta)*Y;
+C = Y - delta*K;
+end;
+
+vcov = [
+  0.0002  0.00005;
+  0.00005 0.0001
+];
+
+order = 7;
diff --git a/dynare++/tests/example1_optim.mod b/dynare++/tests/example1_optim.mod
new file mode 100644
index 0000000000000000000000000000000000000000..0bd57477c7b3dbfb2f36058406daba905729a9d2
--- /dev/null
+++ b/dynare++/tests/example1_optim.mod
@@ -0,0 +1,43 @@
+// this is a file trying to replicate example1.mod as optimization of a social planner
+// it serves also as an example combining +2 lead and optimal policy
+
+var Y, C, K, A, H, B;
+
+varexo EPS, NU;
+
+parameters rho, beta, alpha, delta, theta, psi, tau;
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 1/(1.03^0.25);
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+
+planner_objective log(C)-theta*H^(1+psi)/(1+psi);
+
+planner_discount beta;
+
+model;
+//Y = exp(A)*K^alpha*H^(1-alpha);
+Y = exp(A)*exp(A(+1))*exp(A(+2))*K^alpha*H^(1-alpha);
+K = exp(B(-1))*(Y(-1)-C(-1)) + (1-delta)*K(-1);
+A = rho*A(-1) + tau*B(-1) + EPS;
+B = tau*A(-1) + rho*B(-1) + NU;
+end;
+
+initval;
+A = 0;
+B = 0;
+H = ((1-alpha)/(theta*(1-(delta*alpha)/(1/beta-1+delta))))^(1/(1+psi));
+Y = (alpha/(1/beta-1+delta))^(alpha/(1-alpha))*H;
+K = alpha/(1/beta-1+delta)*Y;
+C = Y - delta*K;
+end;
+
+vcov = [
+  0.0002  0.00005;
+  0.00005 0.0001
+];
+
+order = 2;
diff --git a/dynare++/tests/gentay1a.dyn b/dynare++/tests/gentay1a.dyn
new file mode 100644
index 0000000000000000000000000000000000000000..8c2a9566d6104daba72f1a09268667ff889056c2
--- /dev/null
+++ b/dynare++/tests/gentay1a.dyn
@@ -0,0 +1,671 @@
+var AF AH BIGGAMF BIGGAMH BIGGAMIMPF BIGGAMIMPH BIGGAMMF BIGGAMMH BIGGAMNF BIGGAMNH BIGGAMQF BIGGAMQH BIGGAM_MONF BIGGAM_MONH BIGGAM_MOTF BIGGAM_MOTH BIGGAM_O_NF BIGGAM_O_NH BIGGAM_O_TF BIGGAM_O_TH CAPAF CAPAH CF CF_NAT CH CH_NAT CURBALF_RAT CURBALH_RAT DEEF DEEH DEPEX EXPORTSF EXPORTSF_NAT EXPORTSH EXPORTSH_NAT EYEF EYEH GAF GAH GAMMAF GAMMAH GDPF GDPF_NAT GDPGAPF GDPGAPH GDPH GDPH_NAT GF_NAT GH_NAT GNF GNH HF HH IMPORTSF IMPORTSF_NAT IMPORTSH IMPORTSH_NAT KF KH KNF KNF_RAT KNH KNH_RAT KTF KTF_RAT KTH KTH_RAT K_OF K_OF_RAT K_OH K_OH_RAT LANDF LANDH LF LH LNF LNH LTF LTH L_OF L_OH MARGUTF MARGUTH MF MF_NAT MH MH_NAT M_ONF M_ONH M_OTF M_OTH NF NH NNF NNH O_NF O_NH O_TF O_TH PIE4F PIE4H PIE4TARF PIE4TARH PIEBARMF PIEBARMH PIEBARQF PIEBARQH PIEF PIEH PIENF PIENH PIEWF PIEWH PSIF PSIH PSIPRIMEF PSIPRIMEH QF QH Q_ONF Q_ONH Q_OTF Q_OTH REALBF REALBH REALEX REALEXF REALEXH REALFINH REALMCNF REALMCNH REALMCTF REALMCTH REALMONEYF REALMONEYH REALPBARMF REALPBARMH REALPBARQF REALPBARQH REALPLANDF REALPLANDH REALPMF REALPMH REALPNF REALPNH REALPQF REALPQH REALPXF REALPXH REALP_MOF REALP_MOH REALP_ONF REALP_ONH REALP_OTF REALP_OTH REALP_QOF REALP_QOH REALRF REALRH REALTBALF REALTBALH REALWF REALWH RNOMF RNOMH SHOPF SHOPH SHOPPRIMEF SHOPPRIMEH TF TH T_OF T_OH VELOF VELOH VPRIMEF VPRIMEH XF_NAT XH_NAT XI ZBF ZBH ZEYEF ZEYEH ZNF ZNH ZTF ZTH ZUF ZUH ZZ_CF ZZ_CH ZZ_CURBALF_RAT ZZ_CURBALH_RAT ZZ_C_SHARF ZZ_C_SHARH ZZ_DPIE4F ZZ_DPIE4H ZZ_DRNOMF ZZ_DRNOMH ZZ_EXPORTSF ZZ_EXPORTSH ZZ_EYEF ZZ_EYEH ZZ_EYE_SHARF ZZ_EYE_SHARH ZZ_GDPF ZZ_GDPH ZZ_GDP_PPP_RATH ZZ_GF ZZ_GH ZZ_IMPORTSF ZZ_IMPORTSH ZZ_M_ON_SHARF ZZ_M_ON_SHARH ZZ_M_OT_SHARF ZZ_M_OT_SHARH ZZ_M_O_SHARF ZZ_M_O_SHARH ZZ_M_SHARF ZZ_M_SHARH ZZ_N_SHARF ZZ_N_SHARH ZZ_PIE4F ZZ_PIE4H ZZ_REALEX ZZ_RNOMF ZZ_RNOMH ZZ_UTILITYF ZZ_UTILITYH ZZ_XBALF_TOT_RAT ZZ_XBALH_TOT_RAT Z_OF Z_OH PIEF1 PIEF2 PIEH1 PIEH2 REALBH1;
+ 
+varexo E_ZBH E_ZUH E_ZUF E_ZEYEH E_ZEYEF E_GAMMAH E_GAMMAF E_LANDH E_LANDF E_GAH E_GAF E_CAPAH E_CAPAF;
+ 
+parameters COSTLF COSTLH EPSF EPSH EPSQMF EPSQMH GLAMBDAF GLAMBDAH SIGMAF SIGMAH SSF SSH XR1F XR1H XR2F XR2H XR3F XR3H XR4F XR4H ALPHANF ALPHANH ALPHATF ALPHATH ALPHA_OF ALPHA_OH A_ONEF A_ONEH A_ZEROF A_ZEROH B0F B0H B1F B1H B2F B2H BET CAPAF_SS CAPAH_SS CHI0 CHI1 CHI2 CHI3 COSTF COSTH COST_MONF COST_MONH COST_MOTF COST_MOTH COST_O_NF COST_O_NH COST_O_TF COST_O_TH DELTAF DELTAH EPS_ONF EPS_ONH EPS_OTF EPS_OTH ETAF ETAH E_PIE4TARF E_PIE4TARH E_ZBF GAMA_NF GAMA_NH GAMA_TF GAMA_TH GAMMAF_SS GAMMAH_SS GAMMA_LANDF GAMMA_LANDH GA_RATF GA_RATH GDPF_EXOG GDPH_EXOG GN_RATF GN_RATH LANDF_SS LANDH_SS LILCAPPA1F LILCAPPA1H LILCAPPA2F LILCAPPA2H LILCAPPA3F LILCAPPA3H LILCAPPA4F LILCAPPA4H LILCAPPA5F LILCAPPA5H LILCAPPA6F LILCAPPA6H LILCAPPA7F LILCAPPA7H LILCAPPA8F LILCAPPA8H NYF NYH NY_NF NY_NH NY_TF NY_TH OMEGA0F OMEGA0H OMEGAF OMEGAH PHIF PHIH PIE4TARF_SS PIE4TARH_SS REALEX_EXOG REALPBARMF_EXOG REALPBARMH_EXOG REALPNF_EXOG REALPNH_EXOG REALP_MOF_EXOG REALP_MOH_EXOG RNOM_EXOGF RNOM_EXOGH THETAF THETAH XDUMF XDUMH XIXI_NF XIXI_NH XIXI_OF XIXI_OH XIXI_TF XIXI_TH ZBF_SS ZBH_SS ZEDF ZEDH ZEYEF_SS ZEYEH_SS ZNF_SS ZNH_SS ZTF_SS ZTH_SS ZUF_SS ZUH_SS Z_OF_SS Z_OH_SS;
+
+COSTLF=5;
+COSTLH=5;
+EPSF=1.1;
+EPSH=1.1;
+EPSQMF=4;
+EPSQMH=1.1;
+GLAMBDAF=0;
+GLAMBDAH=0;
+SIGMAF=0.333333333333333;
+SIGMAH=0.333333333333333;
+SSF=0.95;
+SSH=0.05;
+XR1F=1;
+XR1H=0.40;
+XR2F=0;
+XR2H=0.1;
+XR3F=0.5;
+XR3H=0.84;
+XR4F=0;
+XR4H=0;
+ALPHANF=0.33;
+ALPHANH=0.4;
+ALPHATF=0.33;
+ALPHATH=0.4;
+ALPHA_OF=0.2;
+ALPHA_OH=0.2;
+A_ONEF=0.075;
+A_ONEH=0.075;
+A_ZEROF=0.011;
+A_ZEROH=0.011;
+B0F=1;
+B0H=1;
+B1F=0.95;
+B1H=0.95;
+B2F=1;
+B2H=1;
+BET=0.99263753614514;
+CAPAF_SS=11;
+CAPAH_SS=11;
+CHI0=0.05;
+CHI1=.1;
+CHI2=0;
+CHI3=0;
+COSTF=5;
+COSTH=5;
+COST_MONF=5;
+COST_MONH=5;
+COST_MOTF=5;
+COST_MOTH=5;
+COST_O_NF=5;
+COST_O_NH=5;
+COST_O_TF=5;
+COST_O_TH=5;
+DELTAF=0.025;
+DELTAH=0.025;
+EPS_ONF=3;
+EPS_ONH=3;
+EPS_OTF=3;
+EPS_OTH=3;
+ETAF=0.35;
+ETAH=0.2;
+E_PIE4TARF=0;
+E_PIE4TARH=0;
+E_ZBF=0;
+GAMA_NF=0.3;
+GAMA_NH=0.1;
+GAMA_TF=0.3;
+GAMA_TH=0.5;
+GAMMAF_SS=0.5;
+GAMMAH_SS=0.25;
+GAMMA_LANDF=0.1;
+GAMMA_LANDH=0.1;
+GA_RATF=0.05;
+GA_RATH=0.05;
+GDPF_EXOG=2.41500497257461;
+GDPH_EXOG=2.90512477822209;
+GN_RATF=0.1;
+GN_RATH=0.1;
+LANDF_SS=0.1;
+LANDH_SS=0.1;
+LILCAPPA1F=0;
+LILCAPPA1H=0;
+LILCAPPA2F=800;
+LILCAPPA2H=400;
+LILCAPPA3F=0;
+LILCAPPA3H=0;
+LILCAPPA4F=800;
+LILCAPPA4H=400;
+LILCAPPA5F=0;
+LILCAPPA5H=0;
+LILCAPPA6F=800;
+LILCAPPA6H=400;
+LILCAPPA7F=0;
+LILCAPPA7H=0;
+LILCAPPA8F=0;
+LILCAPPA8H=0;
+NYF=0.98;
+NYH=0.0499999999999999;
+NY_NF=0.98;
+NY_NH=0.0499999999999999;
+NY_TF=0.98;
+NY_TH=0.02;
+OMEGA0F=60;
+OMEGA0H=60;
+OMEGAF=5;
+OMEGAH=5;
+PHIF=3;
+PHIH=3;
+PIE4TARF_SS=1.125;
+PIE4TARH_SS=1.125;
+REALEX_EXOG=1.3734519289908;
+REALPBARMF_EXOG=0.87146958398196;
+REALPBARMH_EXOG=1.19072687148694;
+REALPNF_EXOG=0.840675522925242;
+REALPNH_EXOG=0.902486321747893;
+REALP_MOF_EXOG=0.966533486000563;
+REALP_MOH_EXOG=1.63690883121281;
+RNOM_EXOGF=1.00741707177773;
+RNOM_EXOGH=1.00741707177773;
+THETAF=6;
+THETAH=6;
+XDUMF=1;
+XDUMH=1;
+XIXI_NF=0.75;
+XIXI_NH=0.75;
+XIXI_OF=0.75;
+XIXI_OH=0.75;
+XIXI_TF=0.75;
+XIXI_TH=0.75;
+ZBF_SS=0;
+ZBH_SS=0;
+ZEDF=2.5;
+ZEDH=2.5;
+ZEYEF_SS=0;
+ZEYEH_SS=0;
+ZNF_SS=1;
+ZNH_SS=1;
+ZTF_SS=1;
+ZTH_SS=0.6;
+ZUF_SS=1;
+ZUH_SS=1;
+Z_OF_SS=1;
+Z_OH_SS=1;
+ 
+model; 
+          ZZ_UTILITYH = (ZUH*(CH-HH)^(1-SIGMAH)-1)/(1-SIGMAH)-CAPAH*LH^(1+ZEDH)/(1+ZEDH) ;
+          ZZ_GH = 100*log(GH_NAT) ;
+          ZZ_CURBALH_RAT = CURBALH_RAT*100 ;
+          ZZ_M_SHARH = REALPBARMH*MH/AH ;
+          ZZ_M_O_SHARH = (REALP_MOH*M_ONH+REALP_MOH*M_OTH)/AH ;
+          ZZ_M_ON_SHARH = REALP_MOH*M_ONH/AH ;
+          ZZ_M_OT_SHARH = REALP_MOH*M_OTH/AH ;
+          ZZ_N_SHARH = NH*REALPNH/AH ;
+          ZZ_EYE_SHARH = EYEH/GDPH ;
+          ZZ_C_SHARH = CH/GDPH ;
+          ZZ_GDPH = 100*log(GDPH_NAT) ;
+          ZZ_CH = 100*log(CH_NAT) ;
+          ZZ_EYEH = 100*log(EYEH) ;
+          ZZ_EXPORTSH = 100*log(EXPORTSH_NAT) ;
+          ZZ_IMPORTSH = 100*log(IMPORTSH_NAT) ;
+          ZZ_XBALH_TOT_RAT = 100*(EXPORTSH_NAT-IMPORTSH_NAT)/GDPH_NAT ;
+          ZZ_PIE4H = 100*(PIE4H-1) ;
+          ZZ_DPIE4H = ZZ_PIE4H-ZZ_PIE4H(-1) ;
+          ZZ_RNOMH = 100*(RNOMH^4-1) ;
+          ZZ_DRNOMH = ZZ_RNOMH-ZZ_RNOMH(-1) ;
+          100*(PIE4TARH-1) = 1*100*(PIE4TARH_SS-1)+(1-1)*100*(PIE4TARH(-1)-1)+E_PIE4TARH ;
+          log(ZUH) = 0.3*log(ZUH_SS)+0.7*log(ZUH(-1))+E_ZUH ;
+          ZBH = 0.3*ZBH_SS+0.7*ZBH(-1)+E_ZBH ;
+          log(LANDH) = 0.05*log(LANDH_SS)+0.95*log(LANDH(-1))+E_LANDH ;
+          log(ZTH) = 0.05*log(ZTH_SS)+0.95*log(ZTH(-1))+E_LANDH ;
+          log(ZNH) = 0.05*log(ZNH_SS)+0.95*log(ZNH(-1))+E_LANDH ;
+          log(Z_OH) = 0.05*log(Z_OH_SS)+0.95*log(Z_OH(-1))+E_LANDH ;
+          ZEYEH = 0.05*ZEYEH_SS+0.95*ZEYEH(-1)+E_ZEYEH ;
+          CAPAH = 0.05*CAPAH_SS+0.95*CAPAH(-1)+E_CAPAH ;
+          log(GAMMAH) = 0.05*log(GAMMAH_SS)+0.95*log(GAMMAH(-1))+E_GAMMAH ;
+          BIGGAM_O_NH = 1-COST_O_NH/2*(O_NH/NH/(O_NH(-1)/NH(-1))-1)^2 ;
+          BIGGAM_O_TH = 1-COST_O_TH/2*(O_TH/TH/(O_TH(-1)/TH(-1))-1)^2 ;
+          O_NH = GAMA_NH*NH/ZNH*(REALP_ONH/(REALMCNH*ZNH))^(-XIXI_NH)*(BIGGAM_O_NH-COST_O_NH*(O_NH/NH/(O_NH(-1)/NH(-1))-1)*O_NH/NH/(O_NH(-1)/NH(-1)))^XIXI_NH/BIGGAM_O_NH ;
+          O_TH = GAMA_TH*TH/ZTH*(REALP_OTH/(REALMCTH*ZTH))^(-XIXI_TH)*(BIGGAM_O_TH-COST_O_TH*(O_TH/TH/(O_TH(-1)/TH(-1))-1)*O_TH/TH/(O_TH(-1)/TH(-1)))^XIXI_NH/BIGGAM_O_TH ;
+          NH = ZNH*((1-ALPHANH-GAMA_NH)^(1/XIXI_NH)*LNH^(1-1/XIXI_NH)+ALPHANH^(1/XIXI_NH)*KNH^(1-1/XIXI_NH)+GAMA_NH^(1/XIXI_NH)*(BIGGAM_O_NH*O_NH)^(1-1/XIXI_NH))^(XIXI_NH/(XIXI_NH-1)) ;
+          TH = ZTH*((1-ALPHATH-GAMA_TH)^(1/XIXI_TH)*LTH^(1-1/XIXI_TH)+ALPHATH^(1/XIXI_TH)*KTH^(1-1/XIXI_TH)+GAMA_TH^(1/XIXI_TH)*(BIGGAM_O_TH*O_TH)^(1-1/XIXI_TH))^(XIXI_TH/(XIXI_TH-1)) ;
+          REALMCNH = 1/ZNH*((1-ALPHANH-GAMA_NH)*REALWH^(1-XIXI_NH)+ALPHANH*REALRH^(1-XIXI_NH)+GAMA_NH*REALP_ONH^(1-XIXI_NH)*(BIGGAM_O_NH-COST_O_NH*(O_NH/NH/(O_NH(-1)/NH(-1))-1)*O_NH/NH/(O_NH(-1)/NH(-1)))^(XIXI_NH-1))^(1/(1-XIXI_NH)) ;
+          REALMCTH = 1/ZTH*((1-ALPHATH-GAMA_TH)*REALWH^(1-XIXI_TH)+ALPHATH*REALRH^(1-XIXI_TH)+GAMA_TH*REALP_OTH^(1-XIXI_TH)*(BIGGAM_O_TH-COST_O_TH*(O_TH/TH/(O_TH(-1)/TH(-1))-1)*O_TH/TH/(O_TH(-1)/TH(-1)))^(XIXI_TH-1))^(1/(1-XIXI_TH)) ;
+          MARGUTH = (CH-B2H*HH)^(-SIGMAH)*ZUH ;
+          HH = (1-B0H)*HH(-1)+B0H*B1H*CH(-1) ;
+          VPRIMEH = CAPAH*LH^ZEDH ;
+          AH^(1-1/EPSH) = GAMMAH^(1/EPSH)*(NYH^(1/EPSQMH)*QH^(1-1/EPSQMH)+(1-NYH)^(1/EPSQMH)*(MH*BIGGAMIMPH)^(1-1/EPSQMH))^(EPSQMH/(EPSQMH-1)*(1-1/EPSH))+(1-GAMMAH)^(1/EPSH)*NNH^(1-1/EPSH) ;
+          QH = GAMMAH*NYH*REALPQH^(-EPSQMH)*AH*REALPXH^(EPSQMH-EPSH) ;
+          MH = GAMMAH*(1-NYH)*REALPMH^(-EPSQMH)*AH*REALPXH^(EPSQMH-EPSH)*1/BIGGAMIMPH*(BIGGAMIMPH-COSTH*(MH/AH/(MH(-1)/AH(-1))-1)*MH/AH/(MH(-1)/AH(-1)))^EPSQMH ;
+          REALPXH = (NYH*REALPQH^(1-EPSQMH)+(1-NYH)*REALPMH^(1-EPSQMH)*(BIGGAMIMPH-COSTH*(MH/AH/(MH(-1)/AH(-1))-1)*MH/AH/(MH(-1)/AH(-1)))^(EPSQMH-1))^(1/(1-EPSQMH)) ;
+          BIGGAMIMPH = 1-COSTH/2*(MH/AH/(MH(-1)/AH(-1))-1)^2 ;
+          NNH = (1-GAMMAH)*REALPNH^(-EPSH)*AH ;
+          NH = NNH+ETAH*MH+ETAH*QH+GNH ;
+          PIENH = REALPNH/REALPNH(-1)*PIEH ;
+          BIGGAMNH = LILCAPPA3H/2*(PIENH/PIE4TARH^0.25-1)^2+LILCAPPA4H/2*(PIENH/PIENH(-1)-1)^2 ;
+          -(1-BIGGAMNH)*(REALPNH*(1-THETAH)+THETAH*REALMCNH) = -(REALPNH-REALMCNH)*(LILCAPPA3H*PIENH/PIE4TARH^0.25*(PIENH/PIE4TARH^0.25-1)+LILCAPPA4H*PIENH/PIENH(-1)*(PIENH/PIENH(-1)-1))+DEEH*PIEH(+1)*(REALPNH(+1)-REALMCNH(+1))*NH(+1)/NH*(LILCAPPA3H*PIENH(+1)/PIE4TARH^0.25*(PIENH(+1)/PIE4TARH^0.25-1)+LILCAPPA4H*PIENH(+1)/PIENH*(PIENH(+1)/PIENH-1)) ;
+          PIEBARQH = PIEH*REALPBARQH/REALPBARQH(-1) ;
+          BIGGAMQH = LILCAPPA5H/2*(PIEBARQH/PIE4TARH^0.25-1)^2+LILCAPPA6H/2*(PIEBARQH/PIEBARQH(-1)-1)^2 ;
+          -(1-BIGGAMQH)*REALPBARQH/REALPQH*(REALPBARQH*(1-THETAH)+ETAH*REALPNH+THETAH*REALMCTH) = -(REALPBARQH-REALMCTH)*(LILCAPPA5H*PIEBARQH/PIE4TARH^0.25*(PIEBARQH/PIE4TARH^0.25-1)+LILCAPPA6H*PIEBARQH/PIEBARQH(-1)*(PIEBARQH/PIEBARQH(-1)-1))+DEEH*PIEH(+1)*(REALPBARQH(+1)-REALMCTH(+1))*QH(+1)/QH*(LILCAPPA5H*PIEBARQH(+1)/PIE4TARH^0.25*(PIEBARQH(+1)/PIE4TARH^0.25-1)+LILCAPPA6H*PIEBARQH(+1)/PIEBARQH*(PIEBARQH(+1)/PIEBARQH-1)) ;
+          REALPQH = REALPBARQH+ETAH*REALPNH ;
+          KH = KH(-1)*(1-DELTAH)+PSIH(-1)*KH(-1) ;
+          PSIH = EYEH/KH-OMEGAH/2*(EYEH/KH-DELTAH*(1+ZEYEH))^2-OMEGA0H/2*(EYEH/KH-EYEH(-1)/KH(-1))^2 ;
+          PSIPRIMEH = 1-OMEGAH*(EYEH/KH-DELTAH*(1+ZEYEH))-OMEGA0H*(EYEH/KH-EYEH(-1)/KH(-1)) ;
+          1/PSIPRIMEH = DEEH*PIEH(+1)*(REALRH(+1)+1/PSIPRIMEH(+1)*(1-DELTAH+PSIH(+1)*(1-PSIPRIMEH(+1)*EYEH(+1)/(PSIH(+1)*KH(+1))))) ;
+          BIGGAMH = LILCAPPA1H/2*(PIEWH/PIE4TARH^0.25-1)^2+LILCAPPA2H/2*(PIEWH/PIEWH(-1)-1)^2 ;
+          PIEH*REALWH/REALWH(-1) = PIEWH ;
+          REALWH = PHIH*VPRIMEH/MARGUTH*((PHIH-1)*(1-BIGGAMH)+PIEWH*LILCAPPA1H/PIE4TARH^0.25*(PIEWH/PIE4TARH^0.25-1)+PIEWH/PIEWH(-1)*LILCAPPA2H*(PIEWH/PIEWH(-1)-1)-DEEH*PIEWH(+1)*LH(+1)/LH*LILCAPPA1H*PIEWH(+1)/PIE4TARH^0.25*(PIEWH(+1)/PIE4TARH^0.25-1)-DEEH*PIEWH(+1)*LH(+1)/LH*LILCAPPA2H*PIEWH(+1)/(REALWH/REALWH(-1))*(PIEWH(+1)/PIEWH-1))^(-1) ;
+          DEEH = BET*MARGUTH(+1)/PIEH(+1)/MARGUTH*(1+SHOPH+SHOPPRIMEH*VELOH)/(1+SHOPH(+1)+SHOPPRIMEH(+1)*VELOH(+1)) ;
+          SHOPH = A_ZEROH*VELOH+A_ONEH/VELOH-2*(A_ZEROH*A_ONEH)^0.5 ;
+          SHOPPRIMEH = A_ZEROH-A_ONEH*VELOH^(-2) ;
+          VELOH = CH/REALMONEYH ;
+          DEEH = 1-SHOPPRIMEH*VELOH^2 ;
+          1 = RNOMH*DEEH ;
+          100*(RNOMH^4-1) = (1-XDUMH)*100*(RNOM_EXOGH^4-1)+XDUMH*(XR3H*100*(RNOMH(-1)^4-1)+(1-XR3H)*(100*((1/BET*PIE4H^0.25)^4-1))+XR1H*(100*(PIE4H-1)-100*(PIE4TARH-1))+XR4H*100*(DEPEX^4-1)+XR2H*GDPGAPH) ;
+          GDPGAPH = 100*(GDPH_NAT-GDPH_EXOG)/GDPH_EXOG ;
+          PIE4H = PIEH*PIEH(-1)*PIEH1(-1)*PIEH2(-1) ;
+          AH = CH*(1+SHOPH)+EYEH+GAH ;
+          GAH = .05*(GA_RATH*(GLAMBDAH*GDPH_NAT+(1-GLAMBDAH)*GDPH_EXOG))+.95*GAH(-1)+E_GAH;
+          GNH = GN_RATH*(GLAMBDAH*GDPH_NAT+(1-GLAMBDAH)*GDPH_EXOG)/REALPNH_EXOG ;
+          PIEBARMH = PIEH*REALPBARMH/REALPBARMH(-1) ;
+          BIGGAMMH = LILCAPPA7H/2*(PIEBARMH/PIE4TARH^0.25-1)^2+LILCAPPA8H/2*(PIEBARMH/PIEBARMH(-1)-1)^2 ;
+          REALPMH = REALPBARMH+ETAH*REALPNH ;
+          KNH_RAT = ALPHANH/(1-ALPHANH-GAMA_NH)*(REALWH/REALRH)^XIXI_NH ;
+          KTH_RAT = ALPHATH/(1-ALPHATH-GAMA_TH)*(REALWH/REALRH)^XIXI_TH ;
+          KNH_RAT = KNH/LNH ;
+          KTH_RAT = KTH/LTH ;
+          KH = KTH+KNH+K_OH ;
+          LH = (LNH+LTH+L_OH)*(1-COSTLH/2*(LNH/(LTH+L_OH)/(LNH(-1)/(LTH(-1)+L_OH(-1)))-1)^2) ;
+          T_OH = Z_OH*((1-ALPHA_OH-GAMMA_LANDH)^(1/XIXI_OH)*L_OH^(1-1/XIXI_OH)+ALPHA_OH^(1/XIXI_OH)*K_OH^(1-1/XIXI_OH)+GAMMA_LANDH^(1/XIXI_OH)*LANDH^(1-1/XIXI_OH))^(XIXI_OH/(XIXI_OH-1)) ;
+          Q_ONH = NY_NH*(REALP_QOH/REALP_ONH)^(-EPS_ONH)*O_NH ;
+          Q_OTH = NY_TH*(REALP_QOH/REALP_OTH)^(-EPS_OTH)*O_TH ;
+          M_ONH = (1-NY_NH)*(REALP_MOH/REALP_ONH)^(-EPS_ONH)*O_NH*(BIGGAM_MONH-COST_MONH*(M_ONH/O_NH/(M_ONH(-1)/O_NH(-1))-1)*M_ONH/O_NH/(M_ONH(-1)/O_NH(-1)))^EPS_ONH/BIGGAM_MONH ;
+          M_OTH = (1-NY_TH)*(REALP_MOH/REALP_OTH)^(-EPS_OTH)*O_TH*(BIGGAM_MOTH-COST_MOTH*(M_OTH/O_TH/(M_OTH(-1)/O_TH(-1))-1)*M_OTH/O_TH/(M_OTH(-1)/O_TH(-1)))^EPS_OTH/BIGGAM_MOTH ;
+          BIGGAM_MONH = 1-COST_MONH/2*(M_ONH/O_NH/(M_ONH(-1)/O_NH(-1))-1)^2 ;
+          BIGGAM_MOTH = 1-COST_MOTH/2*(M_OTH/O_TH/(M_OTH(-1)/O_TH(-1))-1)^2 ;
+          K_OH_RAT = ALPHA_OH/(1-ALPHA_OH-GAMMA_LANDH)*(REALWH/REALRH)^XIXI_OH ;
+          K_OH_RAT = K_OH/L_OH ;
+          REALP_QOH = 1/Z_OH*((1-ALPHA_OH-GAMMA_LANDH)*REALWH^(1-XIXI_OH)+ALPHA_OH*REALRH^(1-XIXI_OH)+GAMMA_LANDH*REALPLANDH^(1-XIXI_OH))^(1/(1-XIXI_OH)) ;
+          LANDH = GAMMA_LANDH*(REALPLANDH/(REALP_QOH*Z_OH))^(-XIXI_OH)*T_OH/Z_OH ;
+          REALP_ONH = (NY_NH*REALP_QOH^(1-EPS_ONH)+(1-NY_NH)*REALP_MOH^(1-EPS_ONH)*(BIGGAM_MONH-COST_MONH*(M_ONH/O_NH/(M_ONH(-1)/O_NH(-1))-1)*M_ONH/O_NH/(M_ONH(-1)/O_NH(-1)))^(EPS_ONH-1))^(1/(1-EPS_ONH)) ;
+          REALP_OTH = (NY_TH*REALP_QOH^(1-EPS_OTH)+(1-NY_TH)*REALP_MOH^(1-EPS_OTH)*(BIGGAM_MOTH-COST_MOTH*(M_OTH/O_TH/(M_OTH(-1)/O_TH(-1))-1)*M_OTH/O_TH/(M_OTH(-1)/O_TH(-1)))^(EPS_OTH-1))^(1/(1-EPS_OTH)) ;
+          SSH*TH = SSH*QH+SSF*MF ;
+          SSH*T_OH = SSH*Q_ONH+SSH*Q_OTH+SSF*M_ONF+SSF*M_OTF ;
+          REALP_MOH = REALP_QOF*REALEXH ;
+          ZZ_GDP_PPP_RATH = GDPH/REALEX/GDPF ;
+          XI = CHI0*(exp(CHI1*REALEX*REALBH)+CHI2*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH)^2+CHI3*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH-REALEX(-1)*(REALBH(-1)-REALBH1(-1)/PIEF(-1))/GDPH(-1))^2-1)/(exp(CHI1*REALEX*REALBH)+CHI2*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH)^2+CHI3*(REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH-REALEX(-1)*(REALBH(-1)-REALBH1(-1)/PIEF(-1))/GDPH(-1))^2+1)+ZBH ;
+          1 = RNOMF*(1-XI)*DEEH*DEPEX(+1) ;
+          DEPEX = PIEH/PIEF*REALEX/REALEX(-1) ;
+          REALFINH = RNOMF(-1)*(1-XI(-1))*REALEX*REALBH(-1)/PIEF ;
+          SSH*DEEH*PIEH(+1)*REALFINH(+1) = SSH*REALFINH+SSH*RNOMF(-1)*XI(-1)*REALEX*REALBH(-1)/PIEF+REALTBALH ;
+          REALEXH = REALEX ;
+          REALEXF = 1/REALEXH ;
+          ZZ_REALEX = 100*log(REALEX) ;
+          -(1-BIGGAMMH)*REALPBARMH/REALPMH*(REALPBARMH/REALEX*(1-THETAF)+ETAH*REALPNH/REALEX+THETAF*REALMCTF) = -(REALPBARMH/REALEX-REALMCTF)*(LILCAPPA7H*PIEBARMH/PIE4TARH^0.25*(PIEBARMH/PIE4TARH^0.25-1)+LILCAPPA8H*PIEBARMH/PIEBARMH(-1)*(PIEBARMH/PIEBARMH(-1)-1))+DEEF*PIEF(+1)*(REALPBARMH(+1)/REALEX(+1)-REALMCTF(+1))*MH(+1)/MH*(LILCAPPA7H*PIEBARMH(+1)/PIE4TARH^0.25*(PIEBARMH(+1)/PIE4TARH^0.25-1)+LILCAPPA8H*PIEBARMH(+1)/PIEBARMH*(PIEBARMH(+1)/PIEBARMH-1)) ;
+          GDPH = AH+REALPNH*GNH+EXPORTSH-IMPORTSH+(RNOMF(-1)-1)*REALEX*REALBH(-1)/PIEF ;
+          GDPH_NAT = AH+REALPNH_EXOG*GNH+EXPORTSH_NAT-IMPORTSH_NAT ;
+          CH_NAT = CH*(1+SHOPH) ;
+          GH_NAT = GAH+REALPNH_EXOG*GNH ;
+          XH_NAT = SSF/SSH*REALEX_EXOG*REALPBARMF_EXOG*MF ;
+          MH_NAT = REALPBARMH_EXOG*MH ;
+          CURBALH_RAT = REALEX*(REALBH-REALBH(-1)/PIEF)/GDPH ;
+          REALTBALH = SSF*(REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF)*REALEX-SSH*(REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH) ;
+          EXPORTSH = SSF/SSH*(REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF)*REALEX ;
+          IMPORTSH = REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH ;
+          EXPORTSH_NAT = SSF/SSH*(REALPBARMF_EXOG*MF+REALP_MOF_EXOG*M_ONF+REALP_MOF_EXOG*M_OTF)*REALEX_EXOG ;
+          IMPORTSH_NAT = REALPBARMH_EXOG*MH+REALP_MOH_EXOG*M_ONH+REALP_MOH_EXOG*M_OTH ;
+          ZZ_UTILITYF = (ZUF*(CF-HF)^(1-SIGMAF)-1)/(1-SIGMAF)-CAPAF*LF^(1+ZEDF)/(1+ZEDF) ;
+          ZZ_GF = 100*log(GF_NAT) ;
+          ZZ_CURBALF_RAT = CURBALF_RAT*100 ;
+          ZZ_M_SHARF = REALPBARMF*MF/AF ;
+          ZZ_M_O_SHARF = (REALP_MOF*M_ONF+REALP_MOF*M_OTF)/AF ;
+          ZZ_M_ON_SHARF = REALP_MOF*M_ONF/AF ;
+          ZZ_M_OT_SHARF = REALP_MOF*M_OTF/AF ;
+          ZZ_N_SHARF = NF*REALPNF/AF ;
+          ZZ_EYE_SHARF = EYEF/GDPF ;
+          ZZ_C_SHARF = CF/GDPF ;
+          ZZ_GDPF = 100*log(GDPF_NAT) ;
+          ZZ_CF = 100*log(CF_NAT) ;
+          ZZ_EYEF = 100*log(EYEF) ;
+          ZZ_EXPORTSF = 100*log(EXPORTSF_NAT) ;
+          ZZ_IMPORTSF = 100*log(IMPORTSF_NAT) ;
+          ZZ_XBALF_TOT_RAT = 100*(EXPORTSF_NAT-IMPORTSF_NAT)/GDPF_NAT ;
+          ZZ_PIE4F = 100*(PIE4F-1) ;
+          ZZ_DPIE4F = ZZ_PIE4F-ZZ_PIE4F(-1) ;
+          ZZ_RNOMF = 100*(RNOMF^4-1) ;
+          ZZ_DRNOMF = ZZ_RNOMF-ZZ_RNOMF(-1) ;
+          100*(PIE4TARF-1) = 1*100*(PIE4TARF_SS-1)+(1-1)*100*(PIE4TARF(-1)-1)+E_PIE4TARF ;
+          log(ZUF) = 0.3*log(ZUF_SS)+0.7*log(ZUF(-1))+E_ZUF ;
+          ZBF = 0.3*ZBF_SS+0.7*ZBF(-1)+E_ZBF ;
+          log(LANDF) = 0.05*log(LANDF_SS)+0.95*log(LANDF(-1))+E_LANDF ;
+          log(ZTF) = 0.05*log(ZTF_SS)+0.95*log(ZTF(-1))+E_LANDF ;
+          log(ZNF) = 0.05*log(ZNF_SS)+0.95*log(ZNF(-1))+E_LANDF ;
+          log(Z_OF) = 0.05*log(Z_OF_SS)+0.95*log(Z_OF(-1))+E_LANDF ;
+          ZEYEF = 0.05*ZEYEF_SS+0.95*ZEYEF(-1)+E_ZEYEF ;
+          CAPAF = 0.05*CAPAF_SS+0.95*CAPAF(-1)+E_CAPAF ;
+          log(GAMMAF) = 0.05*log(GAMMAF_SS)+0.95*log(GAMMAF(-1))+E_GAMMAF ;
+          BIGGAM_O_NF = 1-COST_O_NF/2*(O_NF/NF/(O_NF(-1)/NF(-1))-1)^2 ;
+          BIGGAM_O_TF = 1-COST_O_TF/2*(O_TF/TF/(O_TF(-1)/TF(-1))-1)^2 ;
+          O_NF = GAMA_NF*NF/ZNF*(REALP_ONF/(REALMCNF*ZNF))^(-XIXI_NF)*(BIGGAM_O_NF-COST_O_NF*(O_NF/NF/(O_NF(-1)/NF(-1))-1)*O_NF/NF/(O_NF(-1)/NF(-1)))^XIXI_NF/BIGGAM_O_NF ;
+          O_TF = GAMA_TF*TF/ZTF*(REALP_OTF/(REALMCTF*ZTF))^(-XIXI_TF)*(BIGGAM_O_TF-COST_O_TF*(O_TF/TF/(O_TF(-1)/TF(-1))-1)*O_TF/TF/(O_TF(-1)/TF(-1)))^XIXI_NF/BIGGAM_O_TF ;
+          NF = ZNF*((1-ALPHANF-GAMA_NF)^(1/XIXI_NF)*LNF^(1-1/XIXI_NF)+ALPHANF^(1/XIXI_NF)*KNF^(1-1/XIXI_NF)+GAMA_NF^(1/XIXI_NF)*(BIGGAM_O_NF*O_NF)^(1-1/XIXI_NF))^(XIXI_NF/(XIXI_NF-1)) ;
+          TF = ZTF*((1-ALPHATF-GAMA_TF)^(1/XIXI_TF)*LTF^(1-1/XIXI_TF)+ALPHATF^(1/XIXI_TF)*KTF^(1-1/XIXI_TF)+GAMA_TF^(1/XIXI_TF)*(BIGGAM_O_TF*O_TF)^(1-1/XIXI_TF))^(XIXI_TF/(XIXI_TF-1)) ;
+          REALMCNF = 1/ZNF*((1-ALPHANF-GAMA_NF)*REALWF^(1-XIXI_NF)+ALPHANF*REALRF^(1-XIXI_NF)+GAMA_NF*REALP_ONF^(1-XIXI_NF)*(BIGGAM_O_NF-COST_O_NF*(O_NF/NF/(O_NF(-1)/NF(-1))-1)*O_NF/NF/(O_NF(-1)/NF(-1)))^(XIXI_NF-1))^(1/(1-XIXI_NF)) ;
+          REALMCTF = 1/ZTF*((1-ALPHATF-GAMA_TF)*REALWF^(1-XIXI_TF)+ALPHATF*REALRF^(1-XIXI_TF)+GAMA_TF*REALP_OTF^(1-XIXI_TF)*(BIGGAM_O_TF-COST_O_TF*(O_TF/TF/(O_TF(-1)/TF(-1))-1)*O_TF/TF/(O_TF(-1)/TF(-1)))^(XIXI_TF-1))^(1/(1-XIXI_TF)) ;
+          MARGUTF = (CF-B2F*HF)^(-SIGMAF)*ZUF ;
+          HF = (1-B0F)*HF(-1)+B0F*B1F*CF(-1) ;
+          VPRIMEF = CAPAF*LF^ZEDF ;
+          AF^(1-1/EPSF) = GAMMAF^(1/EPSF)*(NYF^(1/EPSQMF)*QF^(1-1/EPSQMF)+(1-NYF)^(1/EPSQMF)*(MF*BIGGAMIMPF)^(1-1/EPSQMF))^(EPSQMF/(EPSQMF-1)*(1-1/EPSF))+(1-GAMMAF)^(1/EPSF)*NNF^(1-1/EPSF) ;
+          QF = GAMMAF*NYF*REALPQF^(-EPSQMF)*AF*REALPXF^(EPSQMF-EPSF) ;
+          MF = GAMMAF*(1-NYF)*REALPMF^(-EPSQMF)*AF*REALPXF^(EPSQMF-EPSF)*1/BIGGAMIMPF*(BIGGAMIMPF-COSTF*(MF/AF/(MF(-1)/AF(-1))-1)*MF/AF/(MF(-1)/AF(-1)))^EPSQMF ;
+          REALPXF = (NYF*REALPQF^(1-EPSQMF)+(1-NYF)*REALPMF^(1-EPSQMF)*(BIGGAMIMPF-COSTF*(MF/AF/(MF(-1)/AF(-1))-1)*MF/AF/(MF(-1)/AF(-1)))^(EPSQMF-1))^(1/(1-EPSQMF)) ;
+          BIGGAMIMPF = 1-COSTF/2*(MF/AF/(MF(-1)/AF(-1))-1)^2 ;
+          NNF = (1-GAMMAF)*REALPNF^(-EPSF)*AF ;
+          NF = NNF+ETAF*MF+ETAF*QF+GNF ;
+          PIENF = REALPNF/REALPNF(-1)*PIEF ;
+          BIGGAMNF = LILCAPPA3F/2*(PIENF/PIE4TARF^0.25-1)^2+LILCAPPA4F/2*(PIENF/PIENF(-1)-1)^2 ;
+          -(1-BIGGAMNF)*(REALPNF*(1-THETAF)+THETAF*REALMCNF) = -(REALPNF-REALMCNF)*(LILCAPPA3F*PIENF/PIE4TARF^0.25*(PIENF/PIE4TARF^0.25-1)+LILCAPPA4F*PIENF/PIENF(-1)*(PIENF/PIENF(-1)-1))+DEEF*PIEF(+1)*(REALPNF(+1)-REALMCNF(+1))*NF(+1)/NF*(LILCAPPA3F*PIENF(+1)/PIE4TARF^0.25*(PIENF(+1)/PIE4TARF^0.25-1)+LILCAPPA4F*PIENF(+1)/PIENF*(PIENF(+1)/PIENF-1)) ;
+          PIEBARQF = PIEF*REALPBARQF/REALPBARQF(-1) ;
+          BIGGAMQF = LILCAPPA5F/2*(PIEBARQF/PIE4TARF^0.25-1)^2+LILCAPPA6F/2*(PIEBARQF/PIEBARQF(-1)-1)^2 ;
+          -(1-BIGGAMQF)*REALPBARQF/REALPQF*(REALPBARQF*(1-THETAF)+ETAF*REALPNF+THETAF*REALMCTF) = -(REALPBARQF-REALMCTF)*(LILCAPPA5F*PIEBARQF/PIE4TARF^0.25*(PIEBARQF/PIE4TARF^0.25-1)+LILCAPPA6F*PIEBARQF/PIEBARQF(-1)*(PIEBARQF/PIEBARQF(-1)-1))+DEEF*PIEF(+1)*(REALPBARQF(+1)-REALMCTF(+1))*QF(+1)/QF*(LILCAPPA5F*PIEBARQF(+1)/PIE4TARF^0.25*(PIEBARQF(+1)/PIE4TARF^0.25-1)+LILCAPPA6F*PIEBARQF(+1)/PIEBARQF*(PIEBARQF(+1)/PIEBARQF-1)) ;
+          REALPQF = REALPBARQF+ETAF*REALPNF ;
+          KF = KF(-1)*(1-DELTAF)+PSIF(-1)*KF(-1) ;
+          PSIF = EYEF/KF-OMEGAF/2*(EYEF/KF-DELTAF*(1+ZEYEF))^2-OMEGA0F/2*(EYEF/KF-EYEF(-1)/KF(-1))^2 ;
+          PSIPRIMEF = 1-OMEGAF*(EYEF/KF-DELTAF*(1+ZEYEF))-OMEGA0F*(EYEF/KF-EYEF(-1)/KF(-1)) ;
+          1/PSIPRIMEF = DEEF*PIEF(+1)*(REALRF(+1)+1/PSIPRIMEF(+1)*(1-DELTAF+PSIF(+1)*(1-PSIPRIMEF(+1)*EYEF(+1)/(PSIF(+1)*KF(+1))))) ;
+          BIGGAMF = LILCAPPA1F/2*(PIEWF/PIE4TARF^0.25-1)^2+LILCAPPA2F/2*(PIEWF/PIEWF(-1)-1)^2 ;
+          PIEF*REALWF/REALWF(-1) = PIEWF ;
+          REALWF = PHIF*VPRIMEF/MARGUTF*((PHIF-1)*(1-BIGGAMF)+PIEWF*LILCAPPA1F/PIE4TARF^0.25*(PIEWF/PIE4TARF^0.25-1)+PIEWF/PIEWF(-1)*LILCAPPA2F*(PIEWF/PIEWF(-1)-1)-DEEF*PIEWF(+1)*LF(+1)/LF*LILCAPPA1F*PIEWF(+1)/PIE4TARF^0.25*(PIEWF(+1)/PIE4TARF^0.25-1)-DEEF*PIEWF(+1)*LF(+1)/LF*LILCAPPA2F*PIEWF(+1)/(REALWF/REALWF(-1))*(PIEWF(+1)/PIEWF-1))^(-1) ;
+          DEEF = BET*MARGUTF(+1)/PIEF(+1)/MARGUTF*(1+SHOPF+SHOPPRIMEF*VELOF)/(1+SHOPF(+1)+SHOPPRIMEF(+1)*VELOF(+1)) ;
+          SHOPF = A_ZEROF*VELOF+A_ONEF/VELOF-2*(A_ZEROF*A_ONEF)^0.5 ;
+          SHOPPRIMEF = A_ZEROF-A_ONEF*VELOF^(-2) ;
+          VELOF = CF/REALMONEYF ;
+          DEEF = 1-SHOPPRIMEF*VELOF^2 ;
+          1 = RNOMF*DEEF ;
+          100*(RNOMF^4-1) = (1-XDUMF)*100*(RNOM_EXOGF^4-1)+XDUMF*(XR3F*100*(RNOMF(-1)^4-1)+(1-XR3F)*(100*((1/BET*PIE4F^0.25)^4-1)+XR1F*(100*(PIE4F-1)-100*(PIE4TARF-1))+XR4F*100*(DEPEX^4-1)+XR2F*GDPGAPF)) ;
+          GDPGAPF = 100*(GDPF_NAT-GDPF_EXOG)/GDPF_EXOG ;
+          PIE4F = PIEF*PIEF(-1)*PIEF1(-1)*PIEF2(-1) ;
+          AF = CF*(1+SHOPF)+EYEF+GAF ;
+          GAF = .05*(GA_RATF*(GLAMBDAF*GDPF_NAT+(1-GLAMBDAF)*GDPF_EXOG))+.95*GAF(-1)+E_GAF; 
+          GNF = GN_RATF*(GLAMBDAF*GDPF_NAT+(1-GLAMBDAF)*GDPF_EXOG)/REALPNF_EXOG ;
+          PIEBARMF = PIEF*REALPBARMF/REALPBARMF(-1) ;
+          BIGGAMMF = LILCAPPA7F/2*(PIEBARMF/PIE4TARF^0.25-1)^2+LILCAPPA8F/2*(PIEBARMF/PIEBARMF(-1)-1)^2 ;
+          REALPMF = REALPBARMF+ETAF*REALPNF ;
+          KNF_RAT = ALPHANF/(1-ALPHANF-GAMA_NF)*(REALWF/REALRF)^XIXI_NF ;
+          KTF_RAT = ALPHATF/(1-ALPHATF-GAMA_TF)*(REALWF/REALRF)^XIXI_TF ;
+          KNF_RAT = KNF/LNF ;
+          KTF_RAT = KTF/LTF ;
+          KF = KTF+KNF+K_OF ;
+          LF = (LNF+LTF+L_OF)*(1-COSTLF/2*(LNF/(LTF+L_OF)/(LNF(-1)/(LTF(-1)+L_OF(-1)))-1)^2) ;
+          T_OF = Z_OF*((1-ALPHA_OF-GAMMA_LANDF)^(1/XIXI_OF)*L_OF^(1-1/XIXI_OF)+ALPHA_OF^(1/XIXI_OF)*K_OF^(1-1/XIXI_OF)+GAMMA_LANDF^(1/XIXI_OF)*LANDF^(1-1/XIXI_OF))^(XIXI_OF/(XIXI_OF-1)) ;
+          Q_ONF = NY_NF*(REALP_QOF/REALP_ONF)^(-EPS_ONF)*O_NF ;
+          Q_OTF = NY_TF*(REALP_QOF/REALP_OTF)^(-EPS_OTF)*O_TF ;
+          M_ONF = (1-NY_NF)*(REALP_MOF/REALP_ONF)^(-EPS_ONF)*O_NF*(BIGGAM_MONF-COST_MONF*(M_ONF/O_NF/(M_ONF(-1)/O_NF(-1))-1)*M_ONF/O_NF/(M_ONF(-1)/O_NF(-1)))^EPS_ONF/BIGGAM_MONF ;
+          M_OTF = (1-NY_TF)*(REALP_MOF/REALP_OTF)^(-EPS_OTF)*O_TF*(BIGGAM_MOTF-COST_MOTF*(M_OTF/O_TF/(M_OTF(-1)/O_TF(-1))-1)*M_OTF/O_TF/(M_OTF(-1)/O_TF(-1)))^EPS_OTF/BIGGAM_MOTF ;
+          BIGGAM_MONF = 1-COST_MONF/2*(M_ONF/O_NF/(M_ONF(-1)/O_NF(-1))-1)^2 ;
+          BIGGAM_MOTF = 1-COST_MOTF/2*(M_OTF/O_TF/(M_OTF(-1)/O_TF(-1))-1)^2 ;
+          K_OF_RAT = ALPHA_OF/(1-ALPHA_OF-GAMMA_LANDF)*(REALWF/REALRF)^XIXI_OF ;
+          K_OF_RAT = K_OF/L_OF ;
+          REALP_QOF = 1/Z_OF*((1-ALPHA_OF-GAMMA_LANDF)*REALWF^(1-XIXI_OF)+ALPHA_OF*REALRF^(1-XIXI_OF)+GAMMA_LANDF*REALPLANDF^(1-XIXI_OF))^(1/(1-XIXI_OF)) ;
+          LANDF = GAMMA_LANDF*(REALPLANDF/(REALP_QOF*Z_OF))^(-XIXI_OF)*T_OF/Z_OF ;
+          REALP_ONF = (NY_NF*REALP_QOF^(1-EPS_ONF)+(1-NY_NF)*REALP_MOF^(1-EPS_ONF)*(BIGGAM_MONF-COST_MONF*(M_ONF/O_NF/(M_ONF(-1)/O_NF(-1))-1)*M_ONF/O_NF/(M_ONF(-1)/O_NF(-1)))^(EPS_ONF-1))^(1/(1-EPS_ONF)) ;
+          REALP_OTF = (NY_TF*REALP_QOF^(1-EPS_OTF)+(1-NY_TF)*REALP_MOF^(1-EPS_OTF)*(BIGGAM_MOTF-COST_MOTF*(M_OTF/O_TF/(M_OTF(-1)/O_TF(-1))-1)*M_OTF/O_TF/(M_OTF(-1)/O_TF(-1)))^(EPS_OTF-1))^(1/(1-EPS_OTF)) ;
+          SSF*TF = SSF*QF+SSH*MH ;
+          SSF*T_OF = SSF*Q_ONF+SSF*Q_OTF+SSH*M_ONH+SSH*M_OTH ;
+          REALP_MOF = REALP_QOH*REALEXF ;
+          SSH*REALBH+SSF*REALBF = 0 ;
+          REALTBALF = SSF*(REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF)-SSH*(REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH)*1/REALEX ;
+          EXPORTSF = SSH/SSF*(REALPBARMH*MH+REALP_MOH*M_ONH+REALP_MOH*M_OTH)*1/REALEX ;
+          IMPORTSF = REALPBARMF*MF+REALP_MOF*M_ONF+REALP_MOF*M_OTF ;
+          EXPORTSF_NAT = SSH/SSF*(REALPBARMH_EXOG*MH+REALP_MOH_EXOG*M_ONH+REALP_MOH_EXOG*M_OTH)*1/REALEX_EXOG ;
+          IMPORTSF_NAT = REALPBARMF_EXOG*MF+REALP_MOF_EXOG*M_ONF+REALP_MOF_EXOG*M_OTF ;
+          -(1-BIGGAMMF)*REALPBARMF/REALPMF*(REALPBARMF*REALEX*(1-THETAH)+ETAF*REALPNF*REALEX+THETAH*REALMCTH) = -(REALPBARMF*REALEX-REALMCTH)*(LILCAPPA7F*PIEBARMF/PIE4TARF^0.25*(PIEBARMF/PIE4TARF^0.25-1)+LILCAPPA8F*PIEBARMF/PIEBARMF(-1)*(PIEBARMF/PIEBARMF(-1)-1))+DEEH*PIEH(+1)*(REALPBARMF(+1)*REALEX(+1)-REALMCTH(+1))*MF(+1)/MF*(LILCAPPA7F*PIEBARMF(+1)/PIE4TARF^0.25*(PIEBARMF(+1)/PIE4TARF^0.25-1)+LILCAPPA8F*PIEBARMF(+1)/PIEBARMF*(PIEBARMF(+1)/PIEBARMF-1)) ;
+          GDPF = AF+REALPNF*GNF+EXPORTSF-IMPORTSF+(RNOMF(-1)-1)*REALBF(-1)/PIEF ;
+          GDPF_NAT = AF+REALPNF_EXOG*GNF+EXPORTSF_NAT-IMPORTSF_NAT ;
+          CF_NAT = CF*(1+SHOPF) ;
+          GF_NAT = GAF+REALPNF_EXOG*GNF ;
+          XF_NAT = SSH/SSF*1/REALEX_EXOG*REALPBARMH_EXOG*MH ;
+          MF_NAT = REALPBARMF_EXOG*MF ;
+          CURBALF_RAT = -(REALTBALH/REALEX/SSF/GDPF)+(RNOMF(-1)-1)*REALBF(-1)/PIEF/GDPF ;
+	  PIEF1 = PIEF(-1);
+	  PIEF2 = PIEF1(-1);
+	  PIEH1 = PIEH(-1);
+	  PIEH2 = PIEH1(-1);
+	  REALBH1 = REALBH(-1);
+end; 
+ 
+initval; 
+AF=2.17350447531715;
+AH=2.61461230039988;
+BIGGAMF=0;
+BIGGAMH=0;
+BIGGAMIMPF=1;
+BIGGAMIMPH=1;
+BIGGAMMF=0;
+BIGGAMMH=0;
+BIGGAMNF=0;
+BIGGAMNH=0;
+BIGGAMQF=0;
+BIGGAMQH=0;
+BIGGAM_MONF=1;
+BIGGAM_MONH=1;
+BIGGAM_MOTF=1;
+BIGGAM_MOTH=1;
+BIGGAM_O_NF=1;
+BIGGAM_O_NH=1;
+BIGGAM_O_TF=1;
+BIGGAM_O_TH=1;
+CAPAF=11;
+CAPAH=11;
+CF=1.77599320017707;
+CF_NAT=1.77797456682707;
+CH=2.10139281352027;
+CH_NAT=2.10373720855446;
+CURBALF_RAT=2.20209042676066e-018;
+CURBALH_RAT=0;
+DEEF=0.963834712172592;
+DEEH=0.963834712172592;
+DEPEX=1;
+EXPORTSF=0.0374229290542059;
+EXPORTSF_NAT=0.0374229290542059;
+EXPORTSH=0.976573287861717;
+EXPORTSH_NAT=0.976573287861717;
+EYEF=0.27477965986135;
+EYEH=0.365618852934316;
+GAF=0.12075024862873;
+GAH=0.145256238911104;
+GAMMAF=0.5;
+GAMMAH=0.25;
+GDPF=2.41500497257461;
+GDPF_NAT=2.41500497257461;
+GDPGAPF=0;
+GDPGAPH=0;
+GDPH=2.90512477822209;
+GDPH_NAT=2.90512477822209;
+GF_NAT=0.362250745886191;
+GH_NAT=0.435768716733313;
+GNF=0.287269571519256;
+GNH=0.321902361090147;
+HF=1.68719354016822;
+HH=1.99632317284426;
+IMPORTSF=0.0374229290542059;
+IMPORTSF_NAT=0.0374229290542059;
+IMPORTSH=0.976573287861718;
+IMPORTSH_NAT=0.976573287861718;
+KF=10.991186394454;
+KH=14.6247541173726;
+KNF=6.33686501417153;
+KNF_RAT=22.6981730731029;
+KNH=11.034700665508;
+KNH_RAT=22.8755992006951;
+KTF=2.97137434524903;
+KTF_RAT=22.6981730731029;
+KTH=2.23720856941572;
+KTH_RAT=114.377996003476;
+K_OF=1.68294703503345;
+K_OF_RAT=7.27127622255245;
+K_OH=1.35284488244891;
+K_OH_RAT=8.16985685739111;
+LANDF=0.1;
+LANDH=0.1;
+LF=0.64153899810027;
+LH=0.667528221502678;
+LNF=0.279179517830034;
+LNH=0.482378650224502;
+LTF=0.130908083909629;
+LTH=0.019559781143112;
+L_OF=0.231451396360608;
+L_OH=0.165589790135064;
+MARGUTF=2.24145263303312;
+MARGUTH=2.11921125101343;
+MF=0.0196445696804563;
+MF_NAT=0.0171196449669319;
+MH=0.438784845846124;
+MH_NAT=0.522472906750236;
+M_ONF=0.0143006671963624;
+M_ONH=0.134410532365428;
+M_OTF=0.00670562423725087;
+M_OTH=0.143002828997546;
+NF=1.91582345366461;
+NH=2.609674642079;
+NNF=1.31534385473198;
+NNH=2.19524942542191;
+O_NF=0.387338325509274;
+O_NH=0.147043832240678;
+O_TF=0.18162406186278;
+O_TH=0.148205762233076;
+PIE4F=1.125;
+PIE4H=1.125;
+PIE4TARF=1.125;
+PIE4TARH=1.125;
+PIEBARMF=1.02988357195356;
+PIEBARMH=1.02988357195356;
+PIEBARQF=1.02988357195356;
+PIEBARQH=1.02988357195356;
+PIEF=1.02988357195356;
+PIEF1=1.02988357195356;
+PIEF2=1.02988357195356;
+PIEH=1.02988357195356;
+PIEH1=1.02988357195356;
+PIEH2=1.02988357195356;
+PIENF=1.02988357195356;
+PIENH=1.02988357195356;
+PIEWF=1.02988357195356;
+PIEWH=1.02988357195356;
+PSIF=0.025;
+PSIH=0.025;
+PSIPRIMEF=1;
+PSIPRIMEH=1;
+QF=0.875241222929181;
+QH=0.0238294319885835;
+Q_ONF=0.373740369418894;
+Q_ONH=0.0132636199615755;
+Q_OTF=0.175247940896905;
+Q_OTH=0.00547180886242481;
+REALBF=0;
+REALBH=0;
+REALBH1=0;
+REALEX=1.3734519289908;
+REALEXF=0.728092464608345;
+REALEXH=1.3734519289908;
+REALFINH=0;
+REALMCNF=0.700562935771035;
+REALMCNH=0.752071934789911;
+REALMCTF=0.700562935771035;
+REALMCTH=0.930081384894704;
+REALMONEYF=0.558667031035572;
+REALMONEYH=0.661026677383566;
+REALPBARMF=0.87146958398196;
+REALPBARMH=1.19072687148694;
+REALPBARQF=0.899522809530009;
+REALPBARQH=1.15219711474356;
+REALPLANDF=0.554831427212494;
+REALPLANDH=0.414697221827051;
+REALPMF=1.16570601700579;
+REALPMH=1.37122413583652;
+REALPNF=0.840675522925242;
+REALPNH=0.902486321747893;
+REALPQF=1.19375924255384;
+REALPQH=1.33269437909314;
+REALPXF=1.19317131724075;
+REALPXH=1.36926881180313;
+REALP_MOF=0.966533486000563;
+REALP_MOH=1.63690883121281;
+REALP_ONF=1.18566549908199;
+REALP_ONH=1.61601524261254;
+REALP_OTF=1.18566549908199;
+REALP_OTH=1.62845456685201;
+REALP_QOF=1.1918209852569;
+REALP_QOH=1.32748728078168;
+REALRF=0.0324170717777328;
+REALRH=0.0324170717777329;
+REALTBALF=-6.93889390390723e-018;
+REALTBALH=-6.93889390390723e-018;
+REALWF=2.42667732699502;
+REALWH=2.83454771236558;
+RNOMF=1.03752229232945;
+RNOMH=1.03752229232945;
+SHOPF=0.00111563864647424;
+SHOPH=0.00111563864647424;
+SHOPPRIMEF=0.00357861859467432;
+SHOPPRIMEH=0.00357861859467432;
+TF=0.89833516218424;
+TH=0.397076255917254;
+T_OF=0.563589013545429;
+T_OH=0.417854966062653;
+VELOF=3.17898336847443;
+VELOH=3.17898336847443;
+VPRIMEF=3.62618818940983;
+VPRIMEH=4.00467026905301;
+XF_NAT=0.0200215045456245;
+XH_NAT=0.446747178665936;
+XI=0;
+ZBF=0;
+ZBH=0;
+ZEYEF=0;
+ZEYEH=0;
+ZNF=1;
+ZNH=1;
+ZTF=1;
+ZTH=0.6;
+ZUF=1;
+ZUH=1;
+ZZ_CF=57.5474832617676;
+ZZ_CH=74.3715386197541;
+ZZ_CURBALF_RAT=2.20209042676066e-016;
+ZZ_CURBALH_RAT=0;
+ZZ_C_SHARF=0.735399396831762;
+ZZ_C_SHARH=0.723339950584259;
+ZZ_DPIE4F=0;
+ZZ_DPIE4H=0;
+ZZ_DRNOMF=0;
+ZZ_DRNOMH=0;
+ZZ_EXPORTSF=-328.547168610049;
+ZZ_EXPORTSH=-2.37054799079326;
+ZZ_EYEF=-129.17857393452;
+ZZ_EYEH=-100.616387362469;
+ZZ_EYE_SHARF=0.113780163180538;
+ZZ_EYE_SHARH=0.12585306341233;
+ZZ_GDPF=88.1701346139521;
+ZZ_GDPH=106.647634229781;
+ZZ_GDP_PPP_RATH=0.875857186130553;
+ZZ_GF=-101.541863874636;
+ZZ_GH=-83.0643642588075;
+ZZ_IMPORTSF=-328.547168610049;
+ZZ_IMPORTSH=-2.37054799079323;
+ZZ_M_ON_SHARF=0.0063593490946998;
+ZZ_M_ON_SHARH=0.084149297164759;
+ZZ_M_OT_SHARF=0.00298191719568198;
+ZZ_M_OT_SHARH=0.0895286056899133;
+ZZ_M_O_SHARF=0.00934126629038178;
+ZZ_M_O_SHARH=0.173677902854672;
+ZZ_M_SHARF=0.00787651700806085;
+ZZ_M_SHARH=0.19982806118916;
+ZZ_N_SHARF=0.741008772713445;
+ZZ_N_SHARH=0.90078198910348;
+ZZ_PIE4F=12.5;
+ZZ_PIE4H=12.5;
+ZZ_REALEX=31.7327227026121;
+ZZ_RNOMF=15.8749999999999;
+ZZ_RNOMH=15.8749999999999;
+ZZ_UTILITYF=-1.86610854895021;
+ZZ_UTILITYH=-1.9297829736965;
+ZZ_XBALF_TOT_RAT=0;
+ZZ_XBALH_TOT_RAT=-7.6432037132987e-015;
+Z_OF=1;
+Z_OH=1;
+
+E_ZBH=0;
+
+E_ZUH=0;
+E_ZUF=0;
+
+E_ZEYEH=0;
+E_ZEYEF=0;
+
+E_GAMMAH=0;
+E_GAMMAF=0;
+
+E_LANDH=0;
+E_LANDF=0;
+
+E_GAH = 0;
+E_GAF = 0;
+
+E_CAPAH=0;
+E_CAPAF=0;
+end;    
+
+vcov = [
+0.000324 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0.0004 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0.00000001 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0.000004 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0.000289 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0.000025 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0.0049 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0.000001 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0.000016 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0.00001225 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0.0000005625 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0.01 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0.0001
+];
+
+order=2;
\ No newline at end of file
diff --git a/dynare++/tests/judd.mod b/dynare++/tests/judd.mod
new file mode 100644
index 0000000000000000000000000000000000000000..7acfb1218c13241bc31235bd23956faa1e940b9f
--- /dev/null
+++ b/dynare++/tests/judd.mod
@@ -0,0 +1,50 @@
+var C K1 K2 L S1 S2 THETA V V1 V2;
+
+varexo KSI;
+
+parameters theta_ss lambda delta1 delta2 alpha1 alpha2 eta beta gamma depr1 depr2;
+
+theta_ss=1;
+lambda=0.8;
+delta1=0.1;
+delta2=0.05;
+alpha1=0.3;
+alpha2=0.15;
+eta=3;
+beta=0.95;
+gamma=0.5;
+depr1=0.1;
+depr2=0.05;
+
+model;
+C = THETA*K1^alpha1*K2^alpha2*L^(1-alpha1-alpha2)-S1*K1-S2*K2;
+K1 = (1-depr1+(1-0.5*delta1*S1)*S1)*K1(-1);
+K2 = (1-depr2+(1-0.5*delta2*S2)*S2)*K2(-1);
+THETA = THETA(-1)^lambda*theta_ss^(1-lambda)*exp(KSI);
+/*
+THETA = THETA(-1)*lambda+theta_ss*(1-lambda)+KSI;
+*/
+C^(-gamma)*THETA*K1^alpha1*K2^alpha2*L^(-alpha1-alpha2)*(1-alpha1-alpha2) = L^eta;
+C^(-gamma) = beta*V1(+1)*(1-delta1*S1); 
+C^(-gamma) = beta*V2(+1)*(1-delta2*S2);
+V1 = C^(-gamma)*(alpha1*THETA*K1^(alpha1-1)*K2^alpha2*L^(1-alpha1-alpha2)-S1)+beta*V1(+1)*(1-depr1+(1-0.5*delta1*S1)*S1); 
+V2 = C^(-gamma)*(alpha2*THETA*K1^alpha1*K2^(alpha2-1)*L^(1-alpha1-alpha2)-S2)+beta*V2(+1)*(1-depr2+(1-0.5*delta2*S2)*S2);
+V = (C^(1-gamma)/(1-gamma)-L^(1+eta)/(1+eta)) + beta*V(+1);
+end;
+
+initval;
+C=   1.33341818203972;
+K1=   3.80023995548668;
+K2=   3.80023995563911;
+L=   0.85120255261552;
+S1=                  0;
+S2=                  0;
+THETA=   1.00000000000000;
+V1=   0.59202988402399;
+V2=   0.59202988402399;
+V=   -17.6239;
+end;
+
+vcov = [ 0.001 ];
+
+order = 6;
diff --git a/dynare++/tests/judd_norm.mod b/dynare++/tests/judd_norm.mod
new file mode 100644
index 0000000000000000000000000000000000000000..30a74af3e6c4a8c2ae99c24d89cc2c99a5d83cc3
--- /dev/null
+++ b/dynare++/tests/judd_norm.mod
@@ -0,0 +1,50 @@
+var C K1 K2 L S1 S2 THETA V V1 V2;
+
+varexo KSI;
+
+parameters theta_ss lambda delta1 delta2 alpha1 alpha2 eta beta gamma depr1 depr2;
+
+theta_ss=1;
+lambda=0.5;
+delta1=0.05;
+delta2=0.2;
+alpha1=0.3;
+alpha2=0.3;
+eta=3;
+beta=0.95;
+gamma=0.5;
+depr1=0.1;
+depr2=0.05;
+
+model;
+1 = (THETA*K1^alpha1*K2^alpha2*L^(1-alpha1-alpha2)-S1*K1-S2*K2)/C;
+1 = (1-depr1+(1-0.5*delta1*S1)*S1)*K1(-1)/K1;
+1 = (1-depr2+(1-0.5*delta2*S2)*S2)*K2(-1)/K2;
+1 = THETA(-1)^lambda/THETA*theta_ss^(1-lambda)*exp(KSI);
+/*
+1 = (THETA(-1)*lambda+theta_ss*(1-lambda)+KSI)/THETA;
+*/
+C^(-gamma)*THETA*K1^alpha1*K2^alpha2*L^(-alpha1-alpha2)*(1-alpha1-alpha2)*L^(-eta)=1;
+1 = beta*V1(+1)*(1-delta1*S1)*C^gamma; 
+1 = beta*V2(+1)*(1-delta2*S2)*C^gamma;
+1 = (C^(-gamma)*(alpha1*THETA*K1^(alpha1-1)*K2^alpha2*L^(1-alpha1-alpha2)-S1)+beta*V1(+1)*(1-depr1+(1-0.5*delta1*S1)*S1))/V1; 
+1 = (C^(-gamma)*(alpha2*THETA*K1^alpha1*K2^(alpha2-1)*L^(1-alpha1-alpha2)-S2)+beta*V2(+1)*(1-depr2+(1-0.5*delta2*S2)*S2))/V2;
+1 = (C^(1-gamma)/(1-gamma)-L^(1+eta)/(1+eta) + beta*V(+1))/V;
+end;
+
+initval;
+C      =1.0997055 ;
+L      =0.9425540 ;
+S1     =0.1005051 ;
+S2     =0.0500627 ;
+K1     =2.9378521 ;
+K2     =2.1952681 ;
+THETA  =1.        ;
+V      =38.000392 ;
+V1     =1.0139701 ;
+V2     =1.0062981 ;
+end;
+
+vcov = [ 0.05 ];
+
+order = 5;
diff --git a/dynare++/tests/kp1980_1.dyn b/dynare++/tests/kp1980_1.dyn
new file mode 100644
index 0000000000000000000000000000000000000000..8083544a1b34753dcc8bdf69c9699c29d522a769
--- /dev/null
+++ b/dynare++/tests/kp1980_1.dyn
@@ -0,0 +1,41 @@
+// Model from Kydland & Prescott JEDC 1980
+
+// case 1: optimal policy, in fact, optimal control
+
+var C G K TAU Z;
+
+varexo EPS;
+
+parameters eta beta alpha delta phi a rho; 
+
+eta = 2;
+beta = 0.99;
+alpha = 0.3;
+delta = 0.10;
+phi = 2.5;
+a = 0.1;
+rho = 0.7;
+
+planner_objective C^(1-eta)/(1-eta) + a*G^(1-phi)/(1-phi);
+
+planner_discount beta;
+
+model;
+K = (1-delta)*K(-1) + (exp(Z(-1))*K(-1)^alpha - C(-1) - G(-1));
+G = TAU*alpha*K^alpha;
+Z = rho*Z(-1) + EPS;
+end;
+
+initval;
+TAU = 0.70;
+K = ((delta+1/beta-1)/alpha)^(1/(alpha-1));
+G = TAU*alpha*K^alpha;
+C =  K^alpha - delta*K - G;
+Z = 0;
+end;
+
+order = 4;
+
+vcov = [
+	0.01
+];
diff --git a/dynare++/tests/kp1980_2.dyn b/dynare++/tests/kp1980_2.dyn
new file mode 100644
index 0000000000000000000000000000000000000000..6d9d58f73396f30744ea9dde4d043195a0ae84c9
--- /dev/null
+++ b/dynare++/tests/kp1980_2.dyn
@@ -0,0 +1,42 @@
+// Model from Kydland & Prescott JEDC 1980
+
+// case 2: time inconsistent optimal policy with different policy and consumer objectives
+
+var C G K TAU Z;
+
+varexo EPS;
+
+parameters eta beta alpha delta phi a rho; 
+
+eta = 2;
+beta = 0.99;
+alpha = 0.3;
+delta = 0.10;
+phi = 2.5;
+a = 0.1;
+rho = 0.7;
+
+planner_objective C^(1-eta)/(1-eta) + a*G^(1-phi)/(1-phi);
+
+planner_discount beta;
+
+model;
+K = (1-delta)*K(-1) + (exp(Z(-1))*K(-1)^alpha - C(-1) - G(-1));
+G = TAU*alpha*K^alpha;
+Z = rho*Z(-1) + EPS;
+C^(-eta) = beta*C(+1)^(-eta)*(1-delta+exp(Z(+1))*alpha*K(+1)^(alpha-1)*(1-alpha*TAU(+1)));
+end;
+
+initval;
+TAU = 0.70;
+K = ((delta+1/beta-1)/(alpha*(1-alpha*TAU)))^(1/(alpha-1));
+G = TAU*alpha*K^alpha;
+C =  K^alpha - delta*K - G;
+Z = 0;
+end;
+
+order = 4;
+
+vcov = [
+	0.01
+];
diff --git a/dynare++/tests/kp1980_3.dyn b/dynare++/tests/kp1980_3.dyn
new file mode 100644
index 0000000000000000000000000000000000000000..9bb36f12680c72698c289166966747c8dde9a43f
--- /dev/null
+++ b/dynare++/tests/kp1980_3.dyn
@@ -0,0 +1,42 @@
+// Model from Kydland & Prescott JEDC 1980
+
+// case 3: optimal policy with consistent objective (equivalent to kp1980_1.dyn)
+
+var C G K TAU Z;
+
+varexo EPS;
+
+parameters eta beta alpha delta phi a rho; 
+
+eta = 2;
+beta = 0.99;
+alpha = 0.3;
+delta = 0.10;
+phi = 2.5;
+a = 0.1;
+rho = 0.7;
+
+planner_objective C^(1-eta)/(1-eta) + a*G^(1-phi)/(1-phi);
+
+planner_discount beta;
+
+model;
+K = (1-delta)*K(-1) + (exp(Z(-1))*K(-1)^alpha - C(-1) - G(-1));
+G = TAU*alpha*K^alpha;
+Z = rho*Z(-1) + EPS;
+C^(-eta) = beta*C(+1)^(-eta)*(1-delta+exp(Z(+1))*alpha*K(+1)^(alpha-1)*(1-alpha*TAU(+1))) + beta*a*G(+1)^(-phi)*TAU(+1)*exp(Z(+1))*alpha^2*K(+1)^(alpha-1);
+end;
+
+initval;
+TAU = 0.70;
+K = ((delta+1/beta-1)/alpha)^(1/(alpha-1));
+G = TAU*alpha*K^alpha;
+C =  K^alpha - delta*K - G;
+Z = 0;
+end;
+
+order = 4;
+
+vcov = [
+	0.01
+];
diff --git a/dynare++/tests/lucas78.mod b/dynare++/tests/lucas78.mod
new file mode 100644
index 0000000000000000000000000000000000000000..c4c1a8ae1836db2bbeaed62a50ff76ac9c67dbaf
--- /dev/null
+++ b/dynare++/tests/lucas78.mod
@@ -0,0 +1,26 @@
+var Y P;
+
+varexo EXO_Y;
+
+parameters beta gamma rho y_ss;
+
+beta = 0.95;
+gamma= 0.5;
+rho  = 0.9;
+y_ss = 2;
+
+model;
+Y-y_ss = rho*(Y(-1)-y_ss) + EXO_Y;
+Y^(-gamma)*P = beta*Y(+1)^(-gamma)*(P(+1) + Y(+1));
+end;
+
+initval;
+Y = 2;
+P = 38;
+end;
+
+vcov = [
+10
+];
+
+order = 7;
diff --git a/dynare++/tests/m_0_3_0_0_0_0_0_0.mod b/dynare++/tests/m_0_3_0_0_0_0_0_0.mod
new file mode 100644
index 0000000000000000000000000000000000000000..1d7c522b441eceb1bc99c7aa561dc7645488287e
--- /dev/null
+++ b/dynare++/tests/m_0_3_0_0_0_0_0_0.mod
@@ -0,0 +1,47 @@
+var lambda a1 a2 k1 k2 i1 i2 c1 c2 l1 l2;
+varexo e  e1 e2;
+parameters beta delta rho sigma phi AA alpha  gamma mu chi b Le tau;
+beta = 0.99;
+delta = 0.025;
+rho = 0.95;
+sigma = 0.001;
+phi = 0.5;
+alpha = 0.36;
+AA = 0.028058361;
+tau = 6.36522e-11;
+gamma = 0.25;
+mu = -0.2;
+chi = 0.83;
+b = 2.16872693993;
+Le = 2.5;
+
+model;
+log(a1) = rho*log(a1(-1))+sigma*(e+e1);
+log(a2) = rho*log(a2(-1))+sigma*(e+e2);
+lambda = tau*c1^(-1/chi)*(c1^(1-1/chi)+b*(Le-l1)^(1-1/chi))^((1-1/gamma)/(1-1/chi)-1);
+lambda = tau*c2^(-1/chi)*(c2^(1-1/chi)+b*(Le-l2)^(1-1/chi))^((1-1/gamma)/(1-1/chi)-1);
+tau*(-b)*(Le-l1)^(-1/chi)*(c1^(1-1/chi)+b*(Le-l1)^(1-1/chi))^((1-1/gamma)/(1-1/chi)-1) = -lambda*a1*AA*(1-alpha)*l1^(mu-1)*(alpha*k1(-1)^mu+(1-alpha)*l1^mu)^(1/mu-1);
+tau*(-b)*(Le-l2)^(-1/chi)*(c2^(1-1/chi)+b*(Le-l2)^(1-1/chi))^((1-1/gamma)/(1-1/chi)-1) = -lambda*a2*AA*(1-alpha)*l2^(mu-1)*(alpha*k2(-1)^mu+(1-alpha)*l2^mu)^(1/mu-1);
+lambda*(1+phi*(i1/k1(-1)-delta)) =beta*lambda(+1)*(1+a1(+1)*AA*alpha*k1^(mu-1)*(alpha*k1^mu+(1-alpha)*l1(+1)^mu)^(1/mu-1)+phi*(1-delta+i1(+1)/k1-0.5*(i1(+1)/k1-delta))*(i1(+1)/k1-delta));
+lambda*(1+phi*(i2/k2(-1)-delta)) =beta*lambda(+1)*(1+a2(+1)*AA*alpha*k2^(mu-1)*(alpha*k2^mu+(1-alpha)*l2(+1)^mu)^(1/mu-1)+phi*(1-delta+i2(+1)/k2-0.5*(i2(+1)/k2-delta))*(i2(+1)/k2-delta));
+k1 = i1 + (1-delta)*k1(-1);
+k2 = i2 + (1-delta)*k2(-1);
+c1+i1-delta*k1(-1) + c2+i2-delta*k2(-1) = a1*AA*(alpha*k1(-1)^mu+(1-alpha)*l1^mu)^(1/mu)-(phi/2)*k1(-1)*(i1/k1(-1)-delta)^2 + a2*AA*(alpha*k2(-1)^mu+(1-alpha)*l2^mu)^(1/mu)-(phi/2)*k2(-1)*(i2/k2(-1)-delta)^2;
+end;
+initval;
+a1 = 1;
+a2 = 1;
+k1 = 1;
+k2 = 1;
+c1 = 0.028058361;
+c2 = 0.028058361;
+i1 =      0.025;
+i2 =      0.025;
+l1 = 1;
+l2 = 1;
+lambda = 1;
+end;
+
+vcov = [1 0 0; 0 1 0; 0 0 1];
+
+order = 4;
\ No newline at end of file
diff --git a/dynare++/tests/m_1_3_0_0_0_0_0_0.mod b/dynare++/tests/m_1_3_0_0_0_0_0_0.mod
new file mode 100644
index 0000000000000000000000000000000000000000..a95e8137d9c3ef1d0e7d6665a2ec1c9b6af4efa6
--- /dev/null
+++ b/dynare++/tests/m_1_3_0_0_0_0_0_0.mod
@@ -0,0 +1,53 @@
+var lambda a1 a2 k1 k2 i1 i2 c1 c2 l1 l2;
+varexo e  e1 e2;
+parameters beta delta rho sigma phi AA alpha Le  gamma1 gamma2 mu1 mu2 chi1 chi2 b1 b2 tau1 tau2;
+beta = 0.99;
+delta = 0.025;
+rho = 0.95;
+sigma = 0.01;
+phi = 0.5;
+alpha = 0.36;
+AA = 0.028058361;
+tau1 = 1.0604611e-11;
+tau2 = 2.9305887e-08;
+Le = 2.5;
+gamma1 = 0.2;
+gamma2 = 0.4;
+chi1 = 0.75;
+chi2 = 0.9;
+mu1 = -0.3;
+mu2 = 0.3;
+b1 =  3.6164368;
+b2 =  1.4937381;
+
+model;
+log(a1) = rho*log(a1(-1))+sigma*(e+e1);
+log(a2) = rho*log(a2(-1))+sigma*(e+e2);
+lambda = tau1*c1^(-1/chi1)*(c1^(1-1/chi1)+b1*(Le-l1)^(1-1/chi1))^((1-1/gamma1)/(1-1/chi1)-1);
+lambda = tau2*c2^(-1/chi2)*(c2^(1-1/chi2)+b2*(Le-l2)^(1-1/chi2))^((1-1/gamma2)/(1-1/chi2)-1);
+tau1*(-b1)*(Le-l1)^(-1/chi1)*(c1^(1-1/chi1)+b1*(Le-l1)^(1-1/chi1))^((1-1/gamma1)/(1-1/chi1)-1) = -lambda*a1*AA*(1-alpha)*l1^(mu1-1)*(alpha*k1(-1)^mu1+(1-alpha)*l1^mu1)^(1/mu1-1);
+tau2*(-b2)*(Le-l2)^(-1/chi2)*(c2^(1-1/chi2)+b2*(Le-l2)^(1-1/chi2))^((1-1/gamma2)/(1-1/chi2)-1) = -lambda*a2*AA*(1-alpha)*l2^(mu2-1)*(alpha*k2(-1)^mu2+(1-alpha)*l2^mu2)^(1/mu2-1);
+lambda*(1+phi*(i1/k1(-1)-delta)) =beta*lambda(+1)*(1+a1(+1)*AA*alpha*k1^(mu1-1)*(alpha*k1^mu1+(1-alpha)*l1(+1)^mu1)^(1/mu1-1)+phi*(1-delta+i1(+1)/k1-0.5*(i1(+1)/k1-delta))*(i1(+1)/k1-delta));
+lambda*(1+phi*(i2/k2(-1)-delta)) =beta*lambda(+1)*(1+a2(+1)*AA*alpha*k2^(mu2-1)*(alpha*k2^mu2+(1-alpha)*l2(+1)^mu2)^(1/mu2-1)+phi*(1-delta+i2(+1)/k2-0.5*(i2(+1)/k2-delta))*(i2(+1)/k2-delta));
+k1 = i1 + (1-delta)*k1(-1);
+k2 = i2 + (1-delta)*k2(-1);
+c1+i1-delta*k1(-1) + c2+i2-delta*k2(-1) = a1*AA*(alpha*k1(-1)^mu1+(1-alpha)*l1^mu1)^(1/mu1)-(phi/2)*k1(-1)*(i1/k1(-1)-delta)^2 + a2*AA*(alpha*k2(-1)^mu2+(1-alpha)*l2^mu2)^(1/mu2)-(phi/2)*k2(-1)*(i2/k2(-1)-delta)^2;
+end;
+
+initval;
+a1 = 1;
+a2 = 1;
+k1 = 1;
+k2 = 1;
+c1 = 0.028058361;
+c2 = 0.028058361;
+i1 =      0.025;
+i2 =      0.025;
+l1 = 1;
+l2 = 1;
+lambda = 1;
+end;
+
+vcov = [1 0 0; 0 1 0; 0 0 1];
+
+order = 4;
diff --git a/dynare++/tests/m_1_3_0_0_0_0_0_1.mod b/dynare++/tests/m_1_3_0_0_0_0_0_1.mod
new file mode 100644
index 0000000000000000000000000000000000000000..05e29ffd81f6b96b239692f63fa69323e1361d95
--- /dev/null
+++ b/dynare++/tests/m_1_3_0_0_0_0_0_1.mod
@@ -0,0 +1,82 @@
+var lambda a1 a2 a3 a4 k1 k2 k3 k4 i1 i2 i3 i4 c1 c2 c3 c4 l1 l2 l3 l4;
+varexo e  e1 e2 e3 e4;
+parameters beta delta rho sigma phi AA alpha Le  gamma1 gamma2 gamma3 gamma4 mu1 mu2 mu3 mu4 chi1 chi2 chi3 chi4 b1 b2 b3 b4 tau1 tau2 tau3 tau4;
+beta = 0.99;
+delta = 0.025;
+rho = 0.95;
+sigma = 0.001;
+phi = 0.5;
+alpha = 0.36;
+AA = 0.028058361;
+tau1 = 1.0604611e-11;
+tau2 = 1.8099765e-09;
+tau3 = 2.1096359e-08;
+tau4 = 2.9305887e-08;
+Le = 2.5;
+gamma1 = 0.2;
+gamma2 = 0.266666666667;
+gamma3 = 0.333333333333;
+gamma4 = 0.4;
+chi1 = 0.75;
+chi2 = 0.8;
+chi3 = 0.85;
+chi4 = 0.9;
+mu1 = -0.3;
+mu2 = -0.1;
+mu3 = 0.1;
+mu4 = 0.3;
+b1 =  3.6164368;
+b2 =  2.5958433;
+b3 =  1.9373921;
+b4 =  1.4937381;
+
+model;
+log(a1) = rho*log(a1(-1))+sigma*(e+e1);
+log(a2) = rho*log(a2(-1))+sigma*(e+e2);
+log(a3) = rho*log(a3(-1))+sigma*(e+e3);
+log(a4) = rho*log(a4(-1))+sigma*(e+e4);
+lambda = tau1*c1^(-1/chi1)*(c1^(1-1/chi1)+b1*(Le-l1)^(1-1/chi1))^((1-1/gamma1)/(1-1/chi1)-1);
+lambda = tau2*c2^(-1/chi2)*(c2^(1-1/chi2)+b2*(Le-l2)^(1-1/chi2))^((1-1/gamma2)/(1-1/chi2)-1);
+lambda = tau3*c3^(-1/chi3)*(c3^(1-1/chi3)+b3*(Le-l3)^(1-1/chi3))^((1-1/gamma3)/(1-1/chi3)-1);
+lambda = tau4*c4^(-1/chi4)*(c4^(1-1/chi4)+b4*(Le-l4)^(1-1/chi4))^((1-1/gamma4)/(1-1/chi4)-1);
+tau1*(-b1)*(Le-l1)^(-1/chi1)*(c1^(1-1/chi1)+b1*(Le-l1)^(1-1/chi1))^((1-1/gamma1)/(1-1/chi1)-1) = -lambda*a1*AA*(1-alpha)*l1^(mu1-1)*(alpha*k1(-1)^mu1+(1-alpha)*l1^mu1)^(1/mu1-1);
+tau2*(-b2)*(Le-l2)^(-1/chi2)*(c2^(1-1/chi2)+b2*(Le-l2)^(1-1/chi2))^((1-1/gamma2)/(1-1/chi2)-1) = -lambda*a2*AA*(1-alpha)*l2^(mu2-1)*(alpha*k2(-1)^mu2+(1-alpha)*l2^mu2)^(1/mu2-1);
+tau3*(-b3)*(Le-l3)^(-1/chi3)*(c3^(1-1/chi3)+b3*(Le-l3)^(1-1/chi3))^((1-1/gamma3)/(1-1/chi3)-1) = -lambda*a3*AA*(1-alpha)*l3^(mu3-1)*(alpha*k3(-1)^mu3+(1-alpha)*l3^mu3)^(1/mu3-1);
+tau4*(-b4)*(Le-l4)^(-1/chi4)*(c4^(1-1/chi4)+b4*(Le-l4)^(1-1/chi4))^((1-1/gamma4)/(1-1/chi4)-1) = -lambda*a4*AA*(1-alpha)*l4^(mu4-1)*(alpha*k4(-1)^mu4+(1-alpha)*l4^mu4)^(1/mu4-1);
+lambda*(1+phi*(i1/k1(-1)-delta)) =beta*lambda(+1)*(1+a1(+1)*AA*alpha*k1^(mu1-1)*(alpha*k1^mu1+(1-alpha)*l1(+1)^mu1)^(1/mu1-1)+phi*(1-delta+i1(+1)/k1-0.5*(i1(+1)/k1-delta))*(i1(+1)/k1-delta));
+lambda*(1+phi*(i2/k2(-1)-delta)) =beta*lambda(+1)*(1+a2(+1)*AA*alpha*k2^(mu2-1)*(alpha*k2^mu2+(1-alpha)*l2(+1)^mu2)^(1/mu2-1)+phi*(1-delta+i2(+1)/k2-0.5*(i2(+1)/k2-delta))*(i2(+1)/k2-delta));
+lambda*(1+phi*(i3/k3(-1)-delta)) =beta*lambda(+1)*(1+a3(+1)*AA*alpha*k3^(mu3-1)*(alpha*k3^mu3+(1-alpha)*l3(+1)^mu3)^(1/mu3-1)+phi*(1-delta+i3(+1)/k3-0.5*(i3(+1)/k3-delta))*(i3(+1)/k3-delta));
+lambda*(1+phi*(i4/k4(-1)-delta)) =beta*lambda(+1)*(1+a4(+1)*AA*alpha*k4^(mu4-1)*(alpha*k4^mu4+(1-alpha)*l4(+1)^mu4)^(1/mu4-1)+phi*(1-delta+i4(+1)/k4-0.5*(i4(+1)/k4-delta))*(i4(+1)/k4-delta));
+k1 = i1 + (1-delta)*k1(-1);
+k2 = i2 + (1-delta)*k2(-1);
+k3 = i3 + (1-delta)*k3(-1);
+k4 = i4 + (1-delta)*k4(-1);
+c1+i1-delta*k1(-1) + c2+i2-delta*k2(-1) + c3+i3-delta*k3(-1) + c4+i4-delta*k4(-1) = a1*AA*(alpha*k1(-1)^mu1+(1-alpha)*l1^mu1)^(1/mu1)-(phi/2)*k1(-1)*(i1/k1(-1)-delta)^2 + a2*AA*(alpha*k2(-1)^mu2+(1-alpha)*l2^mu2)^(1/mu2)-(phi/2)*k2(-1)*(i2/k2(-1)-delta)^2 + a3*AA*(alpha*k3(-1)^mu3+(1-alpha)*l3^mu3)^(1/mu3)-(phi/2)*k3(-1)*(i3/k3(-1)-delta)^2 + a4*AA*(alpha*k4(-1)^mu4+(1-alpha)*l4^mu4)^(1/mu4)-(phi/2)*k4(-1)*(i4/k4(-1)-delta)^2;
+end;
+initval;
+a1 = 1;
+a2 = 1;
+a3 = 1;
+a4 = 1;
+k1 = 1;
+k2 = 1;
+k3 = 1;
+k4 = 1;
+c1 = 0.028058361;
+c2 = 0.028058361;
+c3 = 0.028058361;
+c4 = 0.028058361;
+i1 =      0.025;
+i2 =      0.025;
+i3 =      0.025;
+i4 =      0.025;
+l1 = 1;
+l2 = 1;
+l3 = 1;
+l4 = 1;
+lambda = 1;
+end;
+
+vcov = [1.0 0 0 0 0; 0 1.0 0 0 0; 0 0 1.0 0 0; 0 0 0 1.0 0; 0 0 0 0 1.0];
+
+order = 4;
diff --git a/dynare++/tests/or0a.mod b/dynare++/tests/or0a.mod
new file mode 100644
index 0000000000000000000000000000000000000000..20b962e284c71e5743eb4f2595d19e45d8ac9dce
--- /dev/null
+++ b/dynare++/tests/or0a.mod
@@ -0,0 +1,119 @@
+var 
+C
+CF
+CF_STAR
+CH
+CH_STAR
+CN
+CN_STAR
+CT
+CT_STAR
+C_STAR
+E
+KE
+KE_STAR
+L
+L_STAR
+P
+PF
+PF_STAR
+PH
+PH_STAR
+PN
+PN_STAR
+PT
+PT_STAR
+P_STAR
+W
+W_STAR
+Y
+Y_STAR
+;
+
+varexo k k_star m m_star;
+
+parameters epsi chi thet nu phi gam;
+
+epsi = 0.5;
+nu = 3;
+chi = 1.2;
+phi = 4;
+thet = 3;
+gam = 0.5;
+
+model;
+C = (1/chi)*(exp(m)/P)^epsi;
+C_STAR = (1/chi)*(exp(m_star)/P_STAR)^epsi;
+CN = (1-gam)*(P/PN)*C;
+CN_STAR = (1-gam)*(P_STAR/PN_STAR)*C_STAR;
+CT = gam*(P/PT)*C;
+CT_STAR = gam*(P_STAR/PT_STAR)*C_STAR;
+CH = 0.5*(PT/PH)*CT;
+CH_STAR = 0.5*(PT_STAR/PH_STAR)*CT_STAR;
+CF = 0.5*(PT/PF)*CT;
+CF_STAR = 0.5*(PT_STAR/PF_STAR)*CT_STAR;
+P = PT^gam*PN^(1-gam);
+P_STAR = PT_STAR^gam*PN_STAR^(1-gam);
+PT = sqrt(PH*PF);
+PT_STAR = sqrt(PH_STAR*PF_STAR);
+PH = (thet/(thet-1))*W(-1);
+PF_STAR = (thet/(thet-1))*W_STAR(-1);
+PN = PH;
+PN_STAR = PF_STAR;
+L = Y;
+L_STAR = Y_STAR;
+(L(+1)/(P(+1)*C(+1)))*W = (phi/(phi-1))*KE(+1)*L(+1)^nu;
+(L_STAR(+1)/(P_STAR(+1)*C_STAR(+1)))*W_STAR = (phi/(phi-1))*KE_STAR(+1)*L_STAR(+1)^nu;
+P*C = Y*PH;
+P_STAR*C_STAR = Y_STAR*PF_STAR;
+Y = CH + CH_STAR + CN;
+Y_STAR = CF + CF_STAR + CN_STAR;
+PT = E*PT_STAR;
+KE = exp(k);
+KE_STAR = exp(k_star);
+end;
+
+initval;
+C = 1;
+PH = 1;
+P = 1;
+PN = 1;
+PT = 1;
+L = 1;
+Y = 1;
+W = 1;
+CF = 0.25;
+CH = 0.25;
+CT = 0.5;
+CN = 0.5;
+PF = 1;
+C_STAR = 1;
+PH_STAR = 1;
+P_STAR = 1;
+PN_STAR = 1;
+PT_STAR = 1;
+L_STAR = 1;
+Y_STAR = 1;
+W_STAR = 1;
+CF_STAR = 0.25;
+CH_STAR = 0.25;
+CT_STAR = 0.5;
+CN_STAR = 0.5;
+PF_STAR = 1;
+KE = 1;
+KE_STAR = 1;
+E = 1;
+k = 0;
+k_star = 0;
+m = 0;
+m_star = 0;
+end;
+
+vcov = [
+0.01 0 -0.01 0;
+0 0.01 0 -0.01;
+-0.01 0 0.01 0;
+0 -0.01 0 0.01
+];
+
+order=4;
diff --git a/dynare++/tests/portfolio.mod b/dynare++/tests/portfolio.mod
new file mode 100644
index 0000000000000000000000000000000000000000..5309dce0bf15dad4f40bbf6e0a8abd70ba18e772
--- /dev/null
+++ b/dynare++/tests/portfolio.mod
@@ -0,0 +1,52 @@
+var DOTQ Q1 Q2 X1 X2 C D1 D2;
+
+varexo E_D1 E_D2;
+
+parameters beta, r1, r2, gamma, d, rho1, rho2;
+
+beta = 0.95;
+r1 = 0.2;
+r2 = 0.05;
+
+gamma = 0.78;
+d = 0.10;
+
+rho1 = 0.8;
+rho2 = 0.2;
+
+model;
+C + X1 + X2 = D1*Q1 + D2*Q2;
+Q1+Q2 = 1;
+C^(-gamma)/(1-2*r1*X1) = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r1*X1(+1))*(D1(+1)*(1-2*r1*X1(+1))+1);
+C^(-gamma)/(1-2*r2*X2) = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r2*X2(+1))*(D2(+1)*(1-2*r2*X2(+1))+1);
+DOTQ*Q1 = Q1(-1) + X1(-1) - r1*X1(-1)^2;  
+DOTQ*Q2 = Q2(-1) + X2(-1) - r2*X2(-1)^2;
+
+D1/d = D1(-1)^rho1/(d^rho1)*exp(E_D1);
+D2/d = D2(-1)^rho2/(d^rho2)*exp(E_D2);
+
+/*
+D1-d = rho1*(D1(-1)-d) + E_D1;
+D2-d = rho2*(D2(-1)-d) + E_D2;
+*/
+end;
+
+initval;
+C    		 =0.0441234;
+D1   		 =0.1000000000000;
+D2   		 =0.1000000000000;
+
+DOTQ 		 =1.05567;
+Q1   		 =0.333333;
+Q2   		 =0.666667;
+
+X1   		 =0.0186255;
+X2   		 =0.0372511;
+end;
+
+vcov = [
+0.04 0;
+0 0.01
+];
+
+order=5;
diff --git a/dynare++/tests/portfolio4.mod b/dynare++/tests/portfolio4.mod
new file mode 100644
index 0000000000000000000000000000000000000000..4413aa5ed72ab702c97f0c4c3a14e62f081104d3
--- /dev/null
+++ b/dynare++/tests/portfolio4.mod
@@ -0,0 +1,86 @@
+var DOTQ Q1 Q2 Q3 Q4 X1 X2 X3 X4 C D1 D2 D3 D4 V;
+
+varexo E_D1 E_D2 E_D3 E_D4;
+
+parameters beta, r1, r2, r3, r4, gamma, ed1, ed2, ed3, ed4, rho1, rho2, rho3, rho4;
+
+beta = 0.95;
+r1 = 0.2;
+r2 = 0.1;
+r3 = 0.06;
+r4 = 0.03;
+
+gamma = 0.7;
+ed1 = 0.1;
+ed2 = 0.1;
+ed3 = 0.1;
+ed4 = 0.1;
+
+rho1 = 0.3;
+rho2 = 0.01;
+rho3 = 0.6;
+rho4 = 0.6;
+
+model;
+Q1+Q2+Q3+Q4 = 1;
+C + X1 + X2 + X3 + X4 = D1*Q1 + D2*Q2 + D3*Q3 + D4*Q4;
+DOTQ*Q1 = Q1(-1) + X1(-1) - r1*X1(-1)*X1(-1);  
+DOTQ*Q2 = Q2(-1) + X2(-1) - r2*X2(-1)*X2(-1);
+DOTQ*Q3 = Q3(-1) + X3(-1) - r3*X3(-1)*X3(-1);  
+DOTQ*Q4 = Q4(-1) + X4(-1) - r4*X4(-1)*X4(-1);
+C^(-gamma)/(1-2*r1*X1) = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r1*X1(+1))*(D1(+1)*(1-2*r1*X1(+1))+1);
+C^(-gamma)/(1-2*r2*X2) = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r2*X2(+1))*(D2(+1)*(1-2*r2*X2(+1))+1);
+C^(-gamma)/(1-2*r3*X3) = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r3*X3(+1))*(D3(+1)*(1-2*r3*X3(+1))+1);
+C^(-gamma)/(1-2*r4*X4) = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r4*X4(+1))*(D4(+1)*(1-2*r4*X4(+1))+1);
+
+V = C^(1-gamma)/(1-gamma) + beta*V(+1);
+
+D1/ed1 = D1(-1)^rho1/(ed1^rho1)*exp(E_D1);
+D2/ed2 = D2(-1)^rho2/(ed2^rho2)*exp(E_D2);
+D3/ed3 = D3(-1)^rho3/(ed3^rho3)*exp(E_D3);
+D4/ed4 = D4(-1)^rho4/(ed4^rho4)*exp(E_D4);
+
+/*
+D1-ed1 = rho1*(D1(-1)-ed1) + E_D1;
+D2-ed2 = rho2*(D2(-1)-ed2) + E_D2;
+D3-ed3 = rho3*(D3(-1)-ed3) + E_D3;
+D4-ed4 = rho4*(D4(-1)-ed4) + E_D4;
+*/
+
+end;
+
+initval;
+D1 = ed1;
+D2 = ed2;
+D3 = ed3;
+D4 = ed4;
+DOTQ = 1.05; // provide a guess not larger than the true value
+X1 = 1/2/r1*(1-1/D1*(1/beta*DOTQ^gamma - 1));
+X2 = 1/2/r2*(1-1/D2*(1/beta*DOTQ^gamma - 1));
+X3 = 1/2/r3*(1-1/D3*(1/beta*DOTQ^gamma - 1));
+X4 = 1/2/r4*(1-1/D4*(1/beta*DOTQ^gamma - 1));
+Q1 = 1/(DOTQ-1)*(X1 - r1*X1*X1);
+Q2 = 1/(DOTQ-1)*(X2 - r2*X2*X2);
+Q3 = 1/(DOTQ-1)*(X3 - r3*X3*X3);
+Q4 = 1/(DOTQ-1)*(X4 - r4*X4*X4);
+C = ed1*Q1 + ed2*Q2 + ed3*Q3 + ed4*Q4 - X1 - X2 - X3 - X4;
+V = 1/(1-beta)*C^(1-gamma)/(1-gamma);
+end;
+
+/*
+vcov = [
+0.0005 0 0 0;
+0 0.00025 0 0;
+0 0 0.0005 0;
+0 0 0 0.00025
+];
+*/
+
+vcov = [
+0.05 0 0 0;
+0 0.025 0 0;
+0 0 0.05 0;
+0 0 0 0.025
+];
+
+order=5;
diff --git a/dynare++/tests/portfolio4_norm.mod b/dynare++/tests/portfolio4_norm.mod
new file mode 100644
index 0000000000000000000000000000000000000000..fdedaaa5a2278b640863e4f71964102f86761692
--- /dev/null
+++ b/dynare++/tests/portfolio4_norm.mod
@@ -0,0 +1,86 @@
+var DOTQ Q1 Q2 Q3 Q4 X1 X2 X3 X4 C D1 D2 D3 D4 V;
+
+varexo E_D1 E_D2 E_D3 E_D4;
+
+parameters beta, r1, r2, r3, r4, gamma, ed1, ed2, ed3, ed4, rho1, rho2, rho3, rho4;
+
+beta = 0.95;
+r1 = 0.2;
+r2 = 0.1;
+r3 = 0.06;
+r4 = 0.03;
+
+gamma = 0.7;
+ed1 = 0.1;
+ed2 = 0.1;
+ed3 = 0.1;
+ed4 = 0.1;
+
+rho1 = 0.3;
+rho2 = 0.01;
+rho3 = 0.6;
+rho4 = 0.6;
+
+model;
+1 = (C + X1 + X2 + X3 + X4)/ (D1*Q1 + D2*Q2 + D3*Q3 + D4*Q4);
+1 = (Q1(-1) + X1(-1) - r1*X1(-1)*X1(-1))/(DOTQ*Q1);  
+1 = (Q2(-1) + X2(-1) - r2*X2(-1)*X2(-1))/(DOTQ*Q2);
+1 = (Q3(-1) + X3(-1) - r3*X3(-1)*X3(-1))/(DOTQ*Q3);  
+1 = (Q4(-1) + X4(-1) - r4*X4(-1)*X4(-1))/(DOTQ*Q4);
+Q1+Q2+Q3+Q4 = 1;
+1 = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r1*X1(+1))*(D1(+1)*(1-2*r1*X1(+1))+1)/(C^(-gamma)/(1-2*r1*X1));
+1 = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r2*X2(+1))*(D2(+1)*(1-2*r2*X2(+1))+1)/(C^(-gamma)/(1-2*r2*X2));
+1 = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r3*X3(+1))*(D3(+1)*(1-2*r3*X3(+1))+1)/(C^(-gamma)/(1-2*r3*X3));
+1 = beta*DOTQ(+1)^(-gamma)*C(+1)^(-gamma)/(1-2*r4*X4(+1))*(D4(+1)*(1-2*r4*X4(+1))+1)/(C^(-gamma)/(1-2*r4*X4));
+
+
+1 = D1(-1)^rho1/D1*ed1^(1-rho1)*exp(E_D1);
+1 = D2(-1)^rho2/D2*ed2^(1-rho2)*exp(E_D2);
+1 = D3(-1)^rho3/D3*ed3^(1-rho3)*exp(E_D3);
+1 = D4(-1)^rho4/D4*ed4^(1-rho4)*exp(E_D4);
+
+/*
+D1-ed1 = rho1*(D1(-1)-ed1) + E_D1;
+D2-ed2 = rho2*(D2(-1)-ed2) + E_D2;
+D3-ed3 = rho3*(D3(-1)-ed3) + E_D3;
+D4-ed4 = rho4*(D4(-1)-ed4) + E_D4;
+*/
+
+V/(C^(1-gamma)/(1-gamma) + beta*V(+1)) = 1;
+end;
+
+initval;
+Q1	=0.0769231;
+Q2	=0.1538462;
+Q3	=0.2564103;
+Q4	=0.5128205;
+X1	=0.0049761;
+X2	=0.0099522;
+X3	=0.0165871;
+X4	=0.0331741;
+D1	=0.1;
+D2	=0.1;
+D3	=0.1;
+D4	=0.1;
+DOTQ	=1.0646251;
+C	=0.0353105;
+V	=24.450057;
+end;
+
+/*
+vcov = [
+0.0005 0 0 0;
+0 0.00025 0 0;
+0 0 0.0005 0;
+0 0 0 0.00025
+];
+*/
+
+vcov = [
+0.05 0 0 0;
+0 0.025 0 0;
+0 0 0.05 0;
+0 0 0 0.025
+];
+
+order=4;
diff --git a/dynare++/tests/psd_exo3.mod b/dynare++/tests/psd_exo3.mod
new file mode 100644
index 0000000000000000000000000000000000000000..ff59dab3604e5d162bb7830ae17481d01902d49e
--- /dev/null
+++ b/dynare++/tests/psd_exo3.mod
@@ -0,0 +1,26 @@
+var c k x;
+varexo rho;
+
+parameters a alph gam bet lamb;
+alph = 0.7;
+bet = 0.95;
+gam = 2;
+a = 1.052632;
+lamb = 0.9;
+
+model;
+c^(-gam) = bet*c(+1)^(-gam)*a*exp(x(+1))*k^(-alph);
+k = a*exp(x)*k(-1)^(1-alph)/(1-alph)-c;
+x = lamb*x(-1)+rho;
+end;
+
+initval;
+k = 1;
+c = 2.508;
+x = 0;
+rho = 0;
+end;
+
+vcov=[0.0001];
+
+order=6;
\ No newline at end of file
diff --git a/dynare++/tests/q3a2.mod b/dynare++/tests/q3a2.mod
new file mode 100644
index 0000000000000000000000000000000000000000..650b27d3f92d2d3ce54612012fc2e2d1b20cc7ab
--- /dev/null
+++ b/dynare++/tests/q3a2.mod
@@ -0,0 +1,33 @@
+var c,a1,k1,a2,k2;
+varexo e,e1,e2;
+parameters beta, gamma, phi, delta, alpha, rho, zeta, sigma, N;
+alpha = 0.36;
+delta = 0.025;
+phi = 2;
+gamma = 1;
+beta = 0.99;
+rho = 0.95;
+zeta = 2;
+sigma = 0.005;
+N = 2;
+model;
+c^(-gamma)*(1+phi*zeta*(k1-k1(-1))^(zeta-1)/(2*k1(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k1-k1(-1))^(zeta-1)/(2*k1(-1))+phi*(k1-k1(-1))^zeta/(2*k1(-1)^2)-delta+alpha*a1(+1)*k1^(alpha-1));
+log(a1) = rho*log(a1(-1))+sigma*(e+e1);
+c^(-gamma)*(1+phi*zeta*(k2-k2(-1))^(zeta-1)/(2*k2(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k2-k2(-1))^(zeta-1)/(2*k2(-1))+phi*(k2-k2(-1))^zeta/(2*k2(-1)^2)-delta+alpha*a2(+1)*k2^(alpha-1));
+log(a2) = rho*log(a2(-1))+sigma*(e+e2);
+N*c+k1+phi*(k1-k1(-1))^zeta/(2*k1)-(1-delta)*k1(-1)+k2+phi*(k2-k2(-1))^zeta/(2*k2)-(1-delta)*k2(-1)= a1*k1(-1)^alpha+a2*k2(-1)^alpha;
+end;
+initval;
+c=1;
+e=0;
+a1=1;
+e1=0;
+k1=10;
+a2=1;
+e2=0;
+k2=10;
+end;
+
+vcov=[1 0 0; 0 1 0; 0 0 1];
+
+order = 4;
\ No newline at end of file
diff --git a/dynare++/tests/q3a50.mod b/dynare++/tests/q3a50.mod
new file mode 100644
index 0000000000000000000000000000000000000000..f573caea23529af74511cb48548177809e0c6676
--- /dev/null
+++ b/dynare++/tests/q3a50.mod
@@ -0,0 +1,325 @@
+periods 5000;
+var c,a1,k1,a2,k2,a3,k3,a4,k4,a5,k5,a6,k6,a7,k7,a8,k8,a9,k9,a10,k10,a11,k11,a12,k12,a13,k13,a14,k14,a15,k15,a16,k16,a17,k17,a18,k18,a19,k19,a20,k20,a21,k21,a22,k22,a23,k23,a24,k24,a25,k25,a26,k26,a27,k27,a28,k28,a29,k29,a30,k30,a31,k31,a32,k32,a33,k33,a34,k34,a35,k35,a36,k36,a37,k37,a38,k38,a39,k39,a40,k40,a41,k41,a42,k42,a43,k43,a44,k44,a45,k45,a46,k46,a47,k47,a48,k48,a49,k49,a50,k50;
+varexo e,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15,e16,e17,e18,e19,e20,e21,e22,e23,e24,e25,e26,e27,e28,e29,e30,e31,e32,e33,e34,e35,e36,e37,e38,e39,e40,e41,e42,e43,e44,e45,e46,e47,e48,e49,e50;
+parameters beta, gamma, phi, delta, alpha, rho, zeta, sigma, N;
+alpha = 0.36;
+delta = 0.1;
+phi = 2;
+gamma = 1;
+beta = 0.99;
+rho = 0.95;
+zeta = 2;
+sigma = 2;
+N = 50;
+model;
+c^(-gamma)*(1+phi*zeta*(k1-k1(-1))^(zeta-1)/(2*k1(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k1-k1(-1))^(zeta-1)/(2*k1(-1))+phi*(k1-k1(-1))^zeta/(2*k1(-1)^2)-delta+alpha*a1(+1)*k1^(alpha-1));
+log(a1) = rho*log(a1(-1))+sigma*(e+e1);
+c^(-gamma)*(1+phi*zeta*(k2-k2(-1))^(zeta-1)/(2*k2(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k2-k2(-1))^(zeta-1)/(2*k2(-1))+phi*(k2-k2(-1))^zeta/(2*k2(-1)^2)-delta+alpha*a2(+1)*k2^(alpha-1));
+log(a2) = rho*log(a2(-1))+sigma*(e+e2);
+c^(-gamma)*(1+phi*zeta*(k3-k3(-1))^(zeta-1)/(2*k3(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k3-k3(-1))^(zeta-1)/(2*k3(-1))+phi*(k3-k3(-1))^zeta/(2*k3(-1)^2)-delta+alpha*a3(+1)*k3^(alpha-1));
+log(a3) = rho*log(a3(-1))+sigma*(e+e3);
+c^(-gamma)*(1+phi*zeta*(k4-k4(-1))^(zeta-1)/(2*k4(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k4-k4(-1))^(zeta-1)/(2*k4(-1))+phi*(k4-k4(-1))^zeta/(2*k4(-1)^2)-delta+alpha*a4(+1)*k4^(alpha-1));
+log(a4) = rho*log(a4(-1))+sigma*(e+e4);
+c^(-gamma)*(1+phi*zeta*(k5-k5(-1))^(zeta-1)/(2*k5(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k5-k5(-1))^(zeta-1)/(2*k5(-1))+phi*(k5-k5(-1))^zeta/(2*k5(-1)^2)-delta+alpha*a5(+1)*k5^(alpha-1));
+log(a5) = rho*log(a5(-1))+sigma*(e+e5);
+c^(-gamma)*(1+phi*zeta*(k6-k6(-1))^(zeta-1)/(2*k6(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k6-k6(-1))^(zeta-1)/(2*k6(-1))+phi*(k6-k6(-1))^zeta/(2*k6(-1)^2)-delta+alpha*a6(+1)*k6^(alpha-1));
+log(a6) = rho*log(a6(-1))+sigma*(e+e6);
+c^(-gamma)*(1+phi*zeta*(k7-k7(-1))^(zeta-1)/(2*k7(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k7-k7(-1))^(zeta-1)/(2*k7(-1))+phi*(k7-k7(-1))^zeta/(2*k7(-1)^2)-delta+alpha*a7(+1)*k7^(alpha-1));
+log(a7) = rho*log(a7(-1))+sigma*(e+e7);
+c^(-gamma)*(1+phi*zeta*(k8-k8(-1))^(zeta-1)/(2*k8(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k8-k8(-1))^(zeta-1)/(2*k8(-1))+phi*(k8-k8(-1))^zeta/(2*k8(-1)^2)-delta+alpha*a8(+1)*k8^(alpha-1));
+log(a8) = rho*log(a8(-1))+sigma*(e+e8);
+c^(-gamma)*(1+phi*zeta*(k9-k9(-1))^(zeta-1)/(2*k9(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k9-k9(-1))^(zeta-1)/(2*k9(-1))+phi*(k9-k9(-1))^zeta/(2*k9(-1)^2)-delta+alpha*a9(+1)*k9^(alpha-1));
+log(a9) = rho*log(a9(-1))+sigma*(e+e9);
+c^(-gamma)*(1+phi*zeta*(k10-k10(-1))^(zeta-1)/(2*k10(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k10-k10(-1))^(zeta-1)/(2*k10(-1))+phi*(k10-k10(-1))^zeta/(2*k10(-1)^2)-delta+alpha*a10(+1)*k10^(alpha-1));
+log(a10) = rho*log(a10(-1))+sigma*(e+e10);
+c^(-gamma)*(1+phi*zeta*(k11-k11(-1))^(zeta-1)/(2*k11(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k11-k11(-1))^(zeta-1)/(2*k11(-1))+phi*(k11-k11(-1))^zeta/(2*k11(-1)^2)-delta+alpha*a11(+1)*k11^(alpha-1));
+log(a11) = rho*log(a11(-1))+sigma*(e+e11);
+c^(-gamma)*(1+phi*zeta*(k12-k12(-1))^(zeta-1)/(2*k12(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k12-k12(-1))^(zeta-1)/(2*k12(-1))+phi*(k12-k12(-1))^zeta/(2*k12(-1)^2)-delta+alpha*a12(+1)*k12^(alpha-1));
+log(a12) = rho*log(a12(-1))+sigma*(e+e12);
+c^(-gamma)*(1+phi*zeta*(k13-k13(-1))^(zeta-1)/(2*k13(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k13-k13(-1))^(zeta-1)/(2*k13(-1))+phi*(k13-k13(-1))^zeta/(2*k13(-1)^2)-delta+alpha*a13(+1)*k13^(alpha-1));
+log(a13) = rho*log(a13(-1))+sigma*(e+e13);
+c^(-gamma)*(1+phi*zeta*(k14-k14(-1))^(zeta-1)/(2*k14(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k14-k14(-1))^(zeta-1)/(2*k14(-1))+phi*(k14-k14(-1))^zeta/(2*k14(-1)^2)-delta+alpha*a14(+1)*k14^(alpha-1));
+log(a14) = rho*log(a14(-1))+sigma*(e+e14);
+c^(-gamma)*(1+phi*zeta*(k15-k15(-1))^(zeta-1)/(2*k15(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k15-k15(-1))^(zeta-1)/(2*k15(-1))+phi*(k15-k15(-1))^zeta/(2*k15(-1)^2)-delta+alpha*a15(+1)*k15^(alpha-1));
+log(a15) = rho*log(a15(-1))+sigma*(e+e15);
+c^(-gamma)*(1+phi*zeta*(k16-k16(-1))^(zeta-1)/(2*k16(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k16-k16(-1))^(zeta-1)/(2*k16(-1))+phi*(k16-k16(-1))^zeta/(2*k16(-1)^2)-delta+alpha*a16(+1)*k16^(alpha-1));
+log(a16) = rho*log(a16(-1))+sigma*(e+e16);
+c^(-gamma)*(1+phi*zeta*(k17-k17(-1))^(zeta-1)/(2*k17(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k17-k17(-1))^(zeta-1)/(2*k17(-1))+phi*(k17-k17(-1))^zeta/(2*k17(-1)^2)-delta+alpha*a17(+1)*k17^(alpha-1));
+log(a17) = rho*log(a17(-1))+sigma*(e+e17);
+c^(-gamma)*(1+phi*zeta*(k18-k18(-1))^(zeta-1)/(2*k18(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k18-k18(-1))^(zeta-1)/(2*k18(-1))+phi*(k18-k18(-1))^zeta/(2*k18(-1)^2)-delta+alpha*a18(+1)*k18^(alpha-1));
+log(a18) = rho*log(a18(-1))+sigma*(e+e18);
+c^(-gamma)*(1+phi*zeta*(k19-k19(-1))^(zeta-1)/(2*k19(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k19-k19(-1))^(zeta-1)/(2*k19(-1))+phi*(k19-k19(-1))^zeta/(2*k19(-1)^2)-delta+alpha*a19(+1)*k19^(alpha-1));
+log(a19) = rho*log(a19(-1))+sigma*(e+e19);
+c^(-gamma)*(1+phi*zeta*(k20-k20(-1))^(zeta-1)/(2*k20(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k20-k20(-1))^(zeta-1)/(2*k20(-1))+phi*(k20-k20(-1))^zeta/(2*k20(-1)^2)-delta+alpha*a20(+1)*k20^(alpha-1));
+log(a20) = rho*log(a20(-1))+sigma*(e+e20);
+c^(-gamma)*(1+phi*zeta*(k21-k21(-1))^(zeta-1)/(2*k21(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k21-k21(-1))^(zeta-1)/(2*k21(-1))+phi*(k21-k21(-1))^zeta/(2*k21(-1)^2)-delta+alpha*a21(+1)*k21^(alpha-1));
+log(a21) = rho*log(a21(-1))+sigma*(e+e21);
+c^(-gamma)*(1+phi*zeta*(k22-k22(-1))^(zeta-1)/(2*k22(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k22-k22(-1))^(zeta-1)/(2*k22(-1))+phi*(k22-k22(-1))^zeta/(2*k22(-1)^2)-delta+alpha*a22(+1)*k22^(alpha-1));
+log(a22) = rho*log(a22(-1))+sigma*(e+e22);
+c^(-gamma)*(1+phi*zeta*(k23-k23(-1))^(zeta-1)/(2*k23(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k23-k23(-1))^(zeta-1)/(2*k23(-1))+phi*(k23-k23(-1))^zeta/(2*k23(-1)^2)-delta+alpha*a23(+1)*k23^(alpha-1));
+log(a23) = rho*log(a23(-1))+sigma*(e+e23);
+c^(-gamma)*(1+phi*zeta*(k24-k24(-1))^(zeta-1)/(2*k24(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k24-k24(-1))^(zeta-1)/(2*k24(-1))+phi*(k24-k24(-1))^zeta/(2*k24(-1)^2)-delta+alpha*a24(+1)*k24^(alpha-1));
+log(a24) = rho*log(a24(-1))+sigma*(e+e24);
+c^(-gamma)*(1+phi*zeta*(k25-k25(-1))^(zeta-1)/(2*k25(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k25-k25(-1))^(zeta-1)/(2*k25(-1))+phi*(k25-k25(-1))^zeta/(2*k25(-1)^2)-delta+alpha*a25(+1)*k25^(alpha-1));
+log(a25) = rho*log(a25(-1))+sigma*(e+e25);
+c^(-gamma)*(1+phi*zeta*(k26-k26(-1))^(zeta-1)/(2*k26(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k26-k26(-1))^(zeta-1)/(2*k26(-1))+phi*(k26-k26(-1))^zeta/(2*k26(-1)^2)-delta+alpha*a26(+1)*k26^(alpha-1));
+log(a26) = rho*log(a26(-1))+sigma*(e+e26);
+c^(-gamma)*(1+phi*zeta*(k27-k27(-1))^(zeta-1)/(2*k27(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k27-k27(-1))^(zeta-1)/(2*k27(-1))+phi*(k27-k27(-1))^zeta/(2*k27(-1)^2)-delta+alpha*a27(+1)*k27^(alpha-1));
+log(a27) = rho*log(a27(-1))+sigma*(e+e27);
+c^(-gamma)*(1+phi*zeta*(k28-k28(-1))^(zeta-1)/(2*k28(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k28-k28(-1))^(zeta-1)/(2*k28(-1))+phi*(k28-k28(-1))^zeta/(2*k28(-1)^2)-delta+alpha*a28(+1)*k28^(alpha-1));
+log(a28) = rho*log(a28(-1))+sigma*(e+e28);
+c^(-gamma)*(1+phi*zeta*(k29-k29(-1))^(zeta-1)/(2*k29(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k29-k29(-1))^(zeta-1)/(2*k29(-1))+phi*(k29-k29(-1))^zeta/(2*k29(-1)^2)-delta+alpha*a29(+1)*k29^(alpha-1));
+log(a29) = rho*log(a29(-1))+sigma*(e+e29);
+c^(-gamma)*(1+phi*zeta*(k30-k30(-1))^(zeta-1)/(2*k30(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k30-k30(-1))^(zeta-1)/(2*k30(-1))+phi*(k30-k30(-1))^zeta/(2*k30(-1)^2)-delta+alpha*a30(+1)*k30^(alpha-1));
+log(a30) = rho*log(a30(-1))+sigma*(e+e30);
+c^(-gamma)*(1+phi*zeta*(k31-k31(-1))^(zeta-1)/(2*k31(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k31-k31(-1))^(zeta-1)/(2*k31(-1))+phi*(k31-k31(-1))^zeta/(2*k31(-1)^2)-delta+alpha*a31(+1)*k31^(alpha-1));
+log(a31) = rho*log(a31(-1))+sigma*(e+e31);
+c^(-gamma)*(1+phi*zeta*(k32-k32(-1))^(zeta-1)/(2*k32(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k32-k32(-1))^(zeta-1)/(2*k32(-1))+phi*(k32-k32(-1))^zeta/(2*k32(-1)^2)-delta+alpha*a32(+1)*k32^(alpha-1));
+log(a32) = rho*log(a32(-1))+sigma*(e+e32);
+c^(-gamma)*(1+phi*zeta*(k33-k33(-1))^(zeta-1)/(2*k33(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k33-k33(-1))^(zeta-1)/(2*k33(-1))+phi*(k33-k33(-1))^zeta/(2*k33(-1)^2)-delta+alpha*a33(+1)*k33^(alpha-1));
+log(a33) = rho*log(a33(-1))+sigma*(e+e33);
+c^(-gamma)*(1+phi*zeta*(k34-k34(-1))^(zeta-1)/(2*k34(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k34-k34(-1))^(zeta-1)/(2*k34(-1))+phi*(k34-k34(-1))^zeta/(2*k34(-1)^2)-delta+alpha*a34(+1)*k34^(alpha-1));
+log(a34) = rho*log(a34(-1))+sigma*(e+e34);
+c^(-gamma)*(1+phi*zeta*(k35-k35(-1))^(zeta-1)/(2*k35(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k35-k35(-1))^(zeta-1)/(2*k35(-1))+phi*(k35-k35(-1))^zeta/(2*k35(-1)^2)-delta+alpha*a35(+1)*k35^(alpha-1));
+log(a35) = rho*log(a35(-1))+sigma*(e+e35);
+c^(-gamma)*(1+phi*zeta*(k36-k36(-1))^(zeta-1)/(2*k36(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k36-k36(-1))^(zeta-1)/(2*k36(-1))+phi*(k36-k36(-1))^zeta/(2*k36(-1)^2)-delta+alpha*a36(+1)*k36^(alpha-1));
+log(a36) = rho*log(a36(-1))+sigma*(e+e36);
+c^(-gamma)*(1+phi*zeta*(k37-k37(-1))^(zeta-1)/(2*k37(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k37-k37(-1))^(zeta-1)/(2*k37(-1))+phi*(k37-k37(-1))^zeta/(2*k37(-1)^2)-delta+alpha*a37(+1)*k37^(alpha-1));
+log(a37) = rho*log(a37(-1))+sigma*(e+e37);
+c^(-gamma)*(1+phi*zeta*(k38-k38(-1))^(zeta-1)/(2*k38(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k38-k38(-1))^(zeta-1)/(2*k38(-1))+phi*(k38-k38(-1))^zeta/(2*k38(-1)^2)-delta+alpha*a38(+1)*k38^(alpha-1));
+log(a38) = rho*log(a38(-1))+sigma*(e+e38);
+c^(-gamma)*(1+phi*zeta*(k39-k39(-1))^(zeta-1)/(2*k39(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k39-k39(-1))^(zeta-1)/(2*k39(-1))+phi*(k39-k39(-1))^zeta/(2*k39(-1)^2)-delta+alpha*a39(+1)*k39^(alpha-1));
+log(a39) = rho*log(a39(-1))+sigma*(e+e39);
+c^(-gamma)*(1+phi*zeta*(k40-k40(-1))^(zeta-1)/(2*k40(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k40-k40(-1))^(zeta-1)/(2*k40(-1))+phi*(k40-k40(-1))^zeta/(2*k40(-1)^2)-delta+alpha*a40(+1)*k40^(alpha-1));
+log(a40) = rho*log(a40(-1))+sigma*(e+e40);
+c^(-gamma)*(1+phi*zeta*(k41-k41(-1))^(zeta-1)/(2*k41(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k41-k41(-1))^(zeta-1)/(2*k41(-1))+phi*(k41-k41(-1))^zeta/(2*k41(-1)^2)-delta+alpha*a41(+1)*k41^(alpha-1));
+log(a41) = rho*log(a41(-1))+sigma*(e+e41);
+c^(-gamma)*(1+phi*zeta*(k42-k42(-1))^(zeta-1)/(2*k42(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k42-k42(-1))^(zeta-1)/(2*k42(-1))+phi*(k42-k42(-1))^zeta/(2*k42(-1)^2)-delta+alpha*a42(+1)*k42^(alpha-1));
+log(a42) = rho*log(a42(-1))+sigma*(e+e42);
+c^(-gamma)*(1+phi*zeta*(k43-k43(-1))^(zeta-1)/(2*k43(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k43-k43(-1))^(zeta-1)/(2*k43(-1))+phi*(k43-k43(-1))^zeta/(2*k43(-1)^2)-delta+alpha*a43(+1)*k43^(alpha-1));
+log(a43) = rho*log(a43(-1))+sigma*(e+e43);
+c^(-gamma)*(1+phi*zeta*(k44-k44(-1))^(zeta-1)/(2*k44(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k44-k44(-1))^(zeta-1)/(2*k44(-1))+phi*(k44-k44(-1))^zeta/(2*k44(-1)^2)-delta+alpha*a44(+1)*k44^(alpha-1));
+log(a44) = rho*log(a44(-1))+sigma*(e+e44);
+c^(-gamma)*(1+phi*zeta*(k45-k45(-1))^(zeta-1)/(2*k45(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k45-k45(-1))^(zeta-1)/(2*k45(-1))+phi*(k45-k45(-1))^zeta/(2*k45(-1)^2)-delta+alpha*a45(+1)*k45^(alpha-1));
+log(a45) = rho*log(a45(-1))+sigma*(e+e45);
+c^(-gamma)*(1+phi*zeta*(k46-k46(-1))^(zeta-1)/(2*k46(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k46-k46(-1))^(zeta-1)/(2*k46(-1))+phi*(k46-k46(-1))^zeta/(2*k46(-1)^2)-delta+alpha*a46(+1)*k46^(alpha-1));
+log(a46) = rho*log(a46(-1))+sigma*(e+e46);
+c^(-gamma)*(1+phi*zeta*(k47-k47(-1))^(zeta-1)/(2*k47(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k47-k47(-1))^(zeta-1)/(2*k47(-1))+phi*(k47-k47(-1))^zeta/(2*k47(-1)^2)-delta+alpha*a47(+1)*k47^(alpha-1));
+log(a47) = rho*log(a47(-1))+sigma*(e+e47);
+c^(-gamma)*(1+phi*zeta*(k48-k48(-1))^(zeta-1)/(2*k48(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k48-k48(-1))^(zeta-1)/(2*k48(-1))+phi*(k48-k48(-1))^zeta/(2*k48(-1)^2)-delta+alpha*a48(+1)*k48^(alpha-1));
+log(a48) = rho*log(a48(-1))+sigma*(e+e48);
+c^(-gamma)*(1+phi*zeta*(k49-k49(-1))^(zeta-1)/(2*k49(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k49-k49(-1))^(zeta-1)/(2*k49(-1))+phi*(k49-k49(-1))^zeta/(2*k49(-1)^2)-delta+alpha*a49(+1)*k49^(alpha-1));
+log(a49) = rho*log(a49(-1))+sigma*(e+e49);
+c^(-gamma)*(1+phi*zeta*(k50-k50(-1))^(zeta-1)/(2*k50(-1))) = beta*c(+1)^(-gamma)*(1+phi*zeta*(k50-k50(-1))^(zeta-1)/(2*k50(-1))+phi*(k50-k50(-1))^zeta/(2*k50(-1)^2)-delta+alpha*a50(+1)*k50^(alpha-1));
+log(a50) = rho*log(a50(-1))+sigma*(e+e50);
+N*c+k1+phi*(k1-k1(-1))^zeta/(2*k1)-(1-delta)*k1(-1)+k2+phi*(k2-k2(-1))^zeta/(2*k2)-(1-delta)*k2(-1)+k3+phi*(k3-k3(-1))^zeta/(2*k3)-(1-delta)*k3(-1)+k4+phi*(k4-k4(-1))^zeta/(2*k4)-(1-delta)*k4(-1)+k5+phi*(k5-k5(-1))^zeta/(2*k5)-(1-delta)*k5(-1)+k6+phi*(k6-k6(-1))^zeta/(2*k6)-(1-delta)*k6(-1)+k7+phi*(k7-k7(-1))^zeta/(2*k7)-(1-delta)*k7(-1)+k8+phi*(k8-k8(-1))^zeta/(2*k8)-(1-delta)*k8(-1)+k9+phi*(k9-k9(-1))^zeta/(2*k9)-(1-delta)*k9(-1)+k10+phi*(k10-k10(-1))^zeta/(2*k10)-(1-delta)*k10(-1)+k11+phi*(k11-k11(-1))^zeta/(2*k11)-(1-delta)*k11(-1)+k12+phi*(k12-k12(-1))^zeta/(2*k12)-(1-delta)*k12(-1)+k13+phi*(k13-k13(-1))^zeta/(2*k13)-(1-delta)*k13(-1)+k14+phi*(k14-k14(-1))^zeta/(2*k14)-(1-delta)*k14(-1)+k15+phi*(k15-k15(-1))^zeta/(2*k15)-(1-delta)*k15(-1)+k16+phi*(k16-k16(-1))^zeta/(2*k16)-(1-delta)*k16(-1)+k17+phi*(k17-k17(-1))^zeta/(2*k17)-(1-delta)*k17(-1)+k18+phi*(k18-k18(-1))^zeta/(2*k18)-(1-delta)*k18(-1)+k19+phi*(k19-k19(-1))^zeta/(2*k19)-(1-delta)*k19(-1)+k20+phi*(k20-k20(-1))^zeta/(2*k20)-(1-delta)*k20(-1)+k21+phi*(k21-k21(-1))^zeta/(2*k21)-(1-delta)*k21(-1)+k22+phi*(k22-k22(-1))^zeta/(2*k22)-(1-delta)*k22(-1)+k23+phi*(k23-k23(-1))^zeta/(2*k23)-(1-delta)*k23(-1)+k24+phi*(k24-k24(-1))^zeta/(2*k24)-(1-delta)*k24(-1)+k25+phi*(k25-k25(-1))^zeta/(2*k25)-(1-delta)*k25(-1)+k26+phi*(k26-k26(-1))^zeta/(2*k26)-(1-delta)*k26(-1)+k27+phi*(k27-k27(-1))^zeta/(2*k27)-(1-delta)*k27(-1)+k28+phi*(k28-k28(-1))^zeta/(2*k28)-(1-delta)*k28(-1)+k29+phi*(k29-k29(-1))^zeta/(2*k29)-(1-delta)*k29(-1)+k30+phi*(k30-k30(-1))^zeta/(2*k30)-(1-delta)*k30(-1)+k31+phi*(k31-k31(-1))^zeta/(2*k31)-(1-delta)*k31(-1)+k32+phi*(k32-k32(-1))^zeta/(2*k32)-(1-delta)*k32(-1)+k33+phi*(k33-k33(-1))^zeta/(2*k33)-(1-delta)*k33(-1)+k34+phi*(k34-k34(-1))^zeta/(2*k34)-(1-delta)*k34(-1)+k35+phi*(k35-k35(-1))^zeta/(2*k35)-(1-delta)*k35(-1)+k36+phi*(k36-k36(-1))^zeta/(2*k36)-(1-delta)*k36(-1)+k37+phi*(k37-k37(-1))^zeta/(2*k37)-(1-delta)*k37(-1)+k38+phi*(k38-k38(-1))^zeta/(2*k38)-(1-delta)*k38(-1)+k39+phi*(k39-k39(-1))^zeta/(2*k39)-(1-delta)*k39(-1)+k40+phi*(k40-k40(-1))^zeta/(2*k40)-(1-delta)*k40(-1)+k41+phi*(k41-k41(-1))^zeta/(2*k41)-(1-delta)*k41(-1)+k42+phi*(k42-k42(-1))^zeta/(2*k42)-(1-delta)*k42(-1)+k43+phi*(k43-k43(-1))^zeta/(2*k43)-(1-delta)*k43(-1)+k44+phi*(k44-k44(-1))^zeta/(2*k44)-(1-delta)*k44(-1)+k45+phi*(k45-k45(-1))^zeta/(2*k45)-(1-delta)*k45(-1)+k46+phi*(k46-k46(-1))^zeta/(2*k46)-(1-delta)*k46(-1)+k47+phi*(k47-k47(-1))^zeta/(2*k47)-(1-delta)*k47(-1)+k48+phi*(k48-k48(-1))^zeta/(2*k48)-(1-delta)*k48(-1)+k49+phi*(k49-k49(-1))^zeta/(2*k49)-(1-delta)*k49(-1)+k50+phi*(k50-k50(-1))^zeta/(2*k50)-(1-delta)*k50(-1)= a1*k1(-1)^alpha+a2*k2(-1)^alpha+a3*k3(-1)^alpha+a4*k4(-1)^alpha+a5*k5(-1)^alpha+a6*k6(-1)^alpha+a7*k7(-1)^alpha+a8*k8(-1)^alpha+a9*k9(-1)^alpha+a10*k10(-1)^alpha+a11*k11(-1)^alpha+a12*k12(-1)^alpha+a13*k13(-1)^alpha+a14*k14(-1)^alpha+a15*k15(-1)^alpha+a16*k16(-1)^alpha+a17*k17(-1)^alpha+a18*k18(-1)^alpha+a19*k19(-1)^alpha+a20*k20(-1)^alpha+a21*k21(-1)^alpha+a22*k22(-1)^alpha+a23*k23(-1)^alpha+a24*k24(-1)^alpha+a25*k25(-1)^alpha+a26*k26(-1)^alpha+a27*k27(-1)^alpha+a28*k28(-1)^alpha+a29*k29(-1)^alpha+a30*k30(-1)^alpha+a31*k31(-1)^alpha+a32*k32(-1)^alpha+a33*k33(-1)^alpha+a34*k34(-1)^alpha+a35*k35(-1)^alpha+a36*k36(-1)^alpha+a37*k37(-1)^alpha+a38*k38(-1)^alpha+a39*k39(-1)^alpha+a40*k40(-1)^alpha+a41*k41(-1)^alpha+a42*k42(-1)^alpha+a43*k43(-1)^alpha+a44*k44(-1)^alpha+a45*k45(-1)^alpha+a46*k46(-1)^alpha+a47*k47(-1)^alpha+a48*k48(-1)^alpha+a49*k49(-1)^alpha+a50*k50(-1)^alpha;
+end;
+initval;
+c=1;
+e=0;
+a1=1;
+e1=0;
+k1=10;
+a2=1;
+e2=0;
+k2=10;
+a3=1;
+e3=0;
+k3=10;
+a4=1;
+e4=0;
+k4=10;
+a5=1;
+e5=0;
+k5=10;
+a6=1;
+e6=0;
+k6=10;
+a7=1;
+e7=0;
+k7=10;
+a8=1;
+e8=0;
+k8=10;
+a9=1;
+e9=0;
+k9=10;
+a10=1;
+e10=0;
+k10=10;
+a11=1;
+e11=0;
+k11=10;
+a12=1;
+e12=0;
+k12=10;
+a13=1;
+e13=0;
+k13=10;
+a14=1;
+e14=0;
+k14=10;
+a15=1;
+e15=0;
+k15=10;
+a16=1;
+e16=0;
+k16=10;
+a17=1;
+e17=0;
+k17=10;
+a18=1;
+e18=0;
+k18=10;
+a19=1;
+e19=0;
+k19=10;
+a20=1;
+e20=0;
+k20=10;
+a21=1;
+e21=0;
+k21=10;
+a22=1;
+e22=0;
+k22=10;
+a23=1;
+e23=0;
+k23=10;
+a24=1;
+e24=0;
+k24=10;
+a25=1;
+e25=0;
+k25=10;
+a26=1;
+e26=0;
+k26=10;
+a27=1;
+e27=0;
+k27=10;
+a28=1;
+e28=0;
+k28=10;
+a29=1;
+e29=0;
+k29=10;
+a30=1;
+e30=0;
+k30=10;
+a31=1;
+e31=0;
+k31=10;
+a32=1;
+e32=0;
+k32=10;
+a33=1;
+e33=0;
+k33=10;
+a34=1;
+e34=0;
+k34=10;
+a35=1;
+e35=0;
+k35=10;
+a36=1;
+e36=0;
+k36=10;
+a37=1;
+e37=0;
+k37=10;
+a38=1;
+e38=0;
+k38=10;
+a39=1;
+e39=0;
+k39=10;
+a40=1;
+e40=0;
+k40=10;
+a41=1;
+e41=0;
+k41=10;
+a42=1;
+e42=0;
+k42=10;
+a43=1;
+e43=0;
+k43=10;
+a44=1;
+e44=0;
+k44=10;
+a45=1;
+e45=0;
+k45=10;
+a46=1;
+e46=0;
+k46=10;
+a47=1;
+e47=0;
+k47=10;
+a48=1;
+e48=0;
+k48=10;
+a49=1;
+e49=0;
+k49=10;
+a50=1;
+e50=0;
+k50=10;
+end;
+
+vcov = [
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0;
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1];
+
+order = 2;
\ No newline at end of file
diff --git a/dynare++/tests/sedmodel1.mod b/dynare++/tests/sedmodel1.mod
new file mode 100644
index 0000000000000000000000000000000000000000..a8ad536980ec01c971da60d570cc4d31c964ddf8
--- /dev/null
+++ b/dynare++/tests/sedmodel1.mod
@@ -0,0 +1,77 @@
+var A, Disp, G, Int, L,
+		   LStar, pi, Welf, WelfStar,  x0,
+		   Y, YGap, YStar, z1, z2, Cbar, Cequiv; 
+varexo eps1 eps2 eps3;
+
+parameters alpha  beta  gamma  rhoa  rhog  rho phi  chi  chi0  theta  xi  
+ABar  GBar  KBar  ZBar  piBar Istar;
+alpha = 0.3;
+  beta = 0.99;
+  gamma = 15;
+  rhoa = 0.8;
+  rhog = 0.7;
+  phi = 1.5;
+  chi = 1.5;
+  chi0 = 1;
+  theta = 0.333333333333;
+  xi = 0.75;
+  ABar = 4.0266;
+  GBar = 0.3163;
+  KBar = 9.489;
+  ZBar = .03;
+  piBar = 1;
+  rho=.8;
+  Istar=1.01010101010101;
+
+
+model;
+z1 - ((Y-G)^(1-phi) + beta *xi *piBar *pi(+1)^(1/theta) *z1(+1));
+z2 - (Y *chi0 *(1-L-ZBar)^(-chi) / ((1-alpha) *A *KBar^alpha
+	*L^(-alpha)) + beta *xi *pi(+1)^((1+theta)/theta) *z2(+1));
+x0 - (1+theta)*z2 /z1;
+pi^(-1/theta) - ((1-xi) *(x0*pi)^(-1/theta) + xi *piBar^(-1/theta));
+Y - (Disp^(-1) *A *KBar^alpha *L^(1-alpha));
+Disp - ((1-xi) *x0^(-(1+theta)/theta)
+	+ xi *(piBar/pi)^(-(1+theta)/theta) *Disp(-1));
+log(A/ABar) - (rhoa *log(A(-1)/ABar) + eps1);
+log(G/GBar) - (rhog *log(G(-1)/GBar) + eps2);
+(Y-G)^(-phi) - (beta *(Int/pi(+1)) *(Y(+1)-G(+1))^(-phi));
+Welf - ((Y-G)^(1-phi) /(1-phi)
+	+ chi0*(1-L-ZBar)^(1-chi) /(1-chi) + beta *Welf(+1));
+Cequiv = (((1-beta)*Welf-chi0*(1-LStar-ZBar)^(1-chi) /(1-chi))*(1-phi))^(1/(1-phi));
+(1-alpha) *A *KBar^alpha *LStar^(-alpha)
+	- (1+theta) *YStar *(YStar-G)^(phi-1) *chi0
+	*(1-LStar-ZBar)^(-chi);
+YStar - A *KBar^alpha *LStar^(1-alpha);
+YGap - (log(Y/YStar));
+WelfStar - ((YStar-G)^(1-phi) /(1-phi)
+	+ chi0*(1-LStar-ZBar)^(1-chi) /(1-chi) + beta *WelfStar(+1));
+Int = (Int(-1)^rho)*((Istar*(pi/piBar)^gamma)^(1-rho))*exp(eps3);
+Cbar=(1/100)*((1-phi)*((1-beta)*WelfStar-chi0*(1-LStar-ZBar)^(1-chi)/(1-chi)))^(1/(1-phi));
+end;
+
+initval;
+A=            4.022;	       
+Disp=	      1;	       
+G=	      0.3;	       
+Int=	      1.0101;	       
+L=	      0.22;	       
+LStar=	       0.22;	       
+pi=	      1;	       
+Welf=	      -359;	       
+WelfStar=     -359;	 
+x0=	      1;	       
+Y=	       2.8;	       
+YGap=	      0;	       
+YStar=	      2.8;	       
+z1=	      2.5;	       
+z2=	      1.8;             
+Cbar= 0.024;
+Cequiv = 0.024;
+end;
+
+vcov = [0.001 0 0 ; 0 0.001 0; 0 0 0.001];
+
+order=4;
+
+
diff --git a/dynare++/tests/sw_euro.mod b/dynare++/tests/sw_euro.mod
new file mode 100644
index 0000000000000000000000000000000000000000..fc6da21fb7221781489394a8607b86f65370c782
--- /dev/null
+++ b/dynare++/tests/sw_euro.mod
@@ -0,0 +1,98 @@
+var MC EH EF R_KF QF CF IF YF LF PIEF WF RF R_KH QH CH IH YH LH PIEH WH RH EE_A PIE_BAR EE_B EE_G EE_L EE_I KF KH ONE;    
+
+varexo E_A E_B E_G E_L E_I ETA_R E_PIE_BAR ETA_Q ETA_P ETA_W  ;  
+ 
+parameters xi_e lambda_w alpha czcap beta phi_i tau sig_c hab ccs cinvs phi_y gamma_w xi_w gamma_p xi_p sig_l r_dpi r_pie r_dy r_y rho rho_a rho_pb rho_b rho_g rho_l rho_i  ;
+alpha=.30;
+beta=0.99;
+tau=0.025;
+ccs=0.6;
+cinvs=.22;
+lambda_w = 0.5;
+phi_i= 6.771;
+sig_c=   1.353; 
+hab=    0.573;    
+xi_w=   0.737;
+sig_l=    2.400;
+xi_p=   0.908;
+xi_e= 0.599;
+gamma_w=    0.763;
+gamma_p=    0.469;
+czcap=    0.169;
+phi_y=    1.408;
+r_pie=     1.684;
+r_dpi=    0.14;
+rho=      0.961;
+r_y=      0.099;
+r_dy=     0.159;
+rho_a=    0.823;
+rho_b=    0.855;
+rho_g=    0.949;
+rho_l=   0.889;
+rho_i=   0.927;
+rho_pb=  0.924;
+
+
+model; 
+          CF = (1/(1+hab))*(CF(1)+hab*CF(-1))-((1-hab)/((1+hab)*sig_c))*(RF-PIEF(1)-EE_B) ;
+	      0 =  alpha*R_KF+(1-alpha)*WF -EE_A ;
+          PIEF = 0*ONE;
+	      IF = (1/(1+beta))* ((  IF(-1) + beta*(IF(1)))+(1/phi_i)*QF)+0*ETA_Q+EE_I ;
+	      QF = -(RF-PIEF(1))+(1-beta*(1-tau))*((1+czcap)/czcap)*R_KF(1)+beta*(1-tau)*QF(1) +0*EE_I ;
+          KF =  (1-tau)*KF(-1)+tau*IF(-1) ;
+	      YF = (ccs*CF+cinvs*IF)+EE_G  ;
+	      YF = 1*phi_y*( alpha*KF+alpha*(1/czcap)*R_KF+(1-alpha)*LF+EE_A ) ;
+	      WF = (sig_c/(1-hab))*(CF-hab*CF(-1)) + sig_l*LF - EE_L ;
+	      LF = R_KF*((1+czcap)/czcap)-WF+KF ;
+          EF = EF(-1)+EF(1)-EF+(LF-EF)*((1-xi_e)*(1-xi_e*beta)/(xi_e));
+         
+	      CH = (hab/(1+hab))*CH(-1)+(1/(1+hab))*CH(1)-((1-hab)/((1+hab)*sig_c))*(RH-PIEH(1)-EE_B) ;
+	      IH = (1/(1+beta))* ((  IH(-1) + beta*(IH(1)))+(1/phi_i)*QH )+1*ETA_Q+1*EE_I ;
+	      QH = -(RH-PIEH(1))+(1-beta*(1-tau))*((1+czcap)/czcap)*R_KH(1)+beta*(1-tau)*QH(1) +EE_I*0+0*ETA_Q ;
+	      KH =  (1-tau)*KH(-1)+tau*IH(-1) ;
+	      YH = (ccs*CH+cinvs*IH)+ EE_G   ;
+	      YH = phi_y*( alpha*KH+alpha*(1/czcap)*R_KH+(1-alpha)*LH ) +phi_y*EE_A  ;
+	      PIEH = (1/(1+beta*gamma_p))*
+	            ( 
+	            (beta)*(PIEH(1)) +(gamma_p)*(PIEH(-1)) 
+	            +((1-xi_p)*(1-beta*xi_p)/(xi_p))*(MC)
+	            )  + ETA_P ; 
+	            
+	      MC = alpha*R_KH+(1-alpha)*WH -EE_A;
+	      WH =  (1/(1+beta))*(beta*WH(+1)+WH(-1))
+                +(beta/(1+beta))*(PIEH(+1))
+                -((1+beta*gamma_w)/(1+beta))*(PIEH)
+                +(gamma_w/(1+beta))*(PIEH(-1))
+                -(1/(1+beta))*(((1-beta*xi_w)*(1-xi_w))/(((1+(((1+lambda_w)*sig_l)/(lambda_w))))*xi_w))*(WH-sig_l*LH-(sig_c/(1-hab))*(CH-hab*CH(-1))+EE_L)
+                +ETA_W;
+	      LH = R_KH*((1+czcap)/czcap)-WH+KH ;
+	      RH = r_dpi*(PIEH-PIEH(-1))
+              +(1-rho)*(r_pie*(PIEH(-1)-PIE_BAR)+r_y*(YH-YF))
+              +r_dy*(YH-YF-(YH(-1)-YF(-1)))
+              +rho*(RH(-1)-PIE_BAR)
+              +PIE_BAR
+              +ETA_R;
+          EH = EH(-1)+EH(1)-EH+(LH-EH)*((1-xi_e)*(1-xi_e*beta)/(xi_e));
+          
+          
+          EE_A = (rho_a)*EE_A(-1)  + E_A;
+	      PIE_BAR = rho_pb*PIE_BAR(-1)+ E_PIE_BAR ;
+	      EE_B = rho_b*EE_B(-1) + E_B ;
+	      EE_G = rho_g*EE_G(-1) + E_G ;
+	      EE_L = rho_l*EE_L(-1) + E_L ;
+	      EE_I = rho_i*EE_I(-1) + E_I ;
+	      ONE = 0*ONE(-1) ;
+end; 
+
+vcov = [0.357604 0 0 0 0 0 0 0 0 0;
+        0 0.112896 0 0 0 0 0 0 0 0;
+        0 0 0.105625 0 0 0 0 0 0 0;
+        0 0 0 12.39040 0 0 0 0 0 0;
+        0 0 0 0 0.722500 0 0 0 0 0;
+        0 0 0 0 0 0.656100 0 0 0 0;
+        0 0 0 0 0 0 0.000289 0 0 0;
+        0 0 0 0 0 0 0 0.364816 0 0;
+        0 0 0 0 0 0 0 0 0.025600 0;
+        0 0 0 0 0 0 0 0 0 0.083521];
+
+order = 1;
diff --git a/dynare++/tests/swma_pie.dyn b/dynare++/tests/swma_pie.dyn
new file mode 100644
index 0000000000000000000000000000000000000000..bf6183ba6dcb57c623a82749e9074a7676e39060
--- /dev/null
+++ b/dynare++/tests/swma_pie.dyn
@@ -0,0 +1,187 @@
+// this model has sticky wages and adjustment costs in
+// investment, consumer goods sector is perfectly competitive, thus MC=1
+// with money and transaction costs based on money velocity
+// and it has a financial accelerator
+// wage is indexed to past consumer price inflation
+
+// LAMBDA Lagrange multiplier on household's budget constraint (divided by price level)
+// PIE inflation of CPI
+// PIETILDE to what inflation new wage setters index (here PIE(-1) but could be PIEW(-1))
+// INT nominal interest rate
+// C real consumption
+// I real investment
+// K real capital
+// R real rental rate of capital
+// W real wage
+// L labour
+// Y real output
+// PIEW nominal wage inflation
+// VW wage front loading term for newly set wages
+// BBD, BBE, BBF, BBG terms in nominator and denominator in wage FOC
+// G government
+// SL process for labor shock
+// SC process for consumption shock
+// SY process for technology shock
+// RM real money balances hold
+// Q real price of capital
+// Q_M1 lagged Q
+// RK nominal return of capital for enterpreneurs
+// OMEGABAR threshold value for idiosyncratic shock
+// N real net worth of borrowers
+// WF lifetime utility
+
+var LAMBDA PIE PIETILDE INT C I K R W L Y PIEW VW BBD BBE BBF BBG G SL SC SY RM
+    Q Q_M1 RK OMEGABAR N ACAL ACALPRIME BCAL BCALPRIME WF;
+
+varexo E_C E_L E_Y E_GOV E_INT;
+
+parameters dep beta gamma eta biga alpha sigmaw phiw deltaw sg pietar h psi nu osigma mu tc1 tc2 ksi1 ksi2 c_weight rho_g rho_l rho_c rho_y;
+dep = 0.025;
+beta = 0.99;
+gamma = 1;
+eta = 2;
+alpha = 0.30;
+biga = alpha^(-alpha)*(1-alpha)^(alpha-1);
+sigmaw = 11;
+phiw = 2;
+deltaw = 0.75;
+sg = 0.18;
+pietar = 1.03^0.25;
+h = 0.8;
+// investment adjustment costs
+psi = 12;
+// enterpreneur saving rate
+nu = 0.94;
+// stderr of enterpreneur's idiosyncratic shocks
+osigma = 0.5;
+// monitoring cost for lender
+mu = 0.2;
+// consumption transaction costs
+tc1 = 0.05;
+tc2 = 0.5;
+// Taylor rule
+ksi1 = 0.106;
+ksi2 = 3;
+rho_g = 0.90;
+rho_l = 0.90;
+rho_c = 0.90;
+rho_y = 0.90;
+// weight of consumption utility 
+c_weight = 1;
+
+model;
+// capital accumulation
+K = (1 - dep - psi/2*(I(-1)/K(-1)-dep)^2)*K(-1) + I(-1);
+// FOC bonds
+LAMBDA = beta*INT*LAMBDA(+1)/PIE(+1);
+// FOC consumption (right hand side is equal to LAMBDA*(1+TC+TCPRIME*C/RM))
+SC*c_weight*(C-h*C(-1))^(-eta) = LAMBDA*(1+2*tc1*C/RM-2*sqrt(tc1*tc2));
+// FOC money (right hand side is equal to 1 - TCPRIME*C*C/RM/RM)
+beta*LAMBDA(+1)/LAMBDA/PIE(+1) = 1 - tc1*C*C/RM/RM + tc2;
+// FOC investment removed
+// FOC capital(+1) removed
+// real price of capital
+Q = (1-psi*(I/K-dep))^(-1);
+// nominal return on capital
+RK = PIE*(R + Q*(1 - dep + psi*(I/K-dep)*I/K -psi/2*(I/K-dep)^2))/Q(-1);
+// FOC in optimal contract for K(+1)
+RK(+1)*(BCAL(+1)*ACALPRIME(+1)/BCALPRIME(+1)-ACAL(+1)) = INT(+1)*ACALPRIME(+1)/BCALPRIME(+1);
+// Participation constraint
+//RK(+1)*BCAL(+1) = INT(+1)*(1-N(+1)*PIE(+1)/Q/K(+1));
+RK*BCAL = INT*(1-N*PIE/Q(-1)/K);
+// evolution of net worth (real)
+N*PIE*PIE(-1) = nu*(ACAL(-1)+BCAL(-1))*RK(-1)*Q_M1(-1)*K(-1) - nu*INT(-1)*(Q_M1(-1)*K(-1)-N(-1)*PIE);
+// marginal cost is 1
+1 = biga*(W/SY)^(1-alpha)*R^alpha;
+// labor attaining minimal MC
+L = (1-alpha)/W*Y;
+// capital attaining minimal MC
+K = alpha/R*Y;
+// FOC for newly set wages
+W*VW = sigmaw/(sigmaw-1)*(BBD*VW^(-sigmaw*gamma) + phiw*BBE*VW^(-sigmaw) - phiw*BBF)/BBG;
+// definition of BBD
+BBD = SL*L^(1+gamma) + deltaw*beta*(PIETILDE(+1)/PIEW(+1))^(-sigmaw*(1+gamma))*BBD(+1);
+// definition of BBE
+BBE = LAMBDA*L*W + deltaw*beta*(PIETILDE(+1)/PIEW(+1))^(-2*sigmaw)*BBE(+1);
+// definition of BBF
+BBF = LAMBDA*L*W + deltaw*beta*(PIETILDE(+1)/PIEW(+1))^(-sigmaw)*BBF(+1);
+// definition of BBG
+BBG = LAMBDA*L + deltaw*beta*(PIETILDE(+1)/PIEW(+1))^(-sigmaw)*PIETILDE(+1)/PIE(+1)*BBG(+1);
+// price index
+1 = (1-deltaw)*VW^(1-sigmaw) + deltaw*(PIETILDE/PIEW)^(1-sigmaw);
+// definition of ACAL
+ACAL = 0.5*erfc((log(OMEGABAR) - 0.5*osigma^2)/osigma/sqrt(2.0)) - OMEGABAR/2*erfc((log(OMEGABAR) + 0.5*osigma^2)/osigma/sqrt(2.0));
+// definition of BCAL
+BCAL = OMEGABAR/2*erfc((log(OMEGABAR) + 0.5*osigma^2)/osigma/sqrt(2.0)) + (1-mu)/2*(1+erf((log(OMEGABAR) - 0.5*osigma^2)/osigma/sqrt(2.0)));
+// definition of ACALPRIME
+ACALPRIME = -0.5*erfc((log(OMEGABAR) + 0.5*osigma^2)/osigma/sqrt(2.0));
+// definition of BCALPRIME
+BCALPRIME = -ACALPRIME - mu/osigma/2.506628274631*exp(-((log(OMEGABAR) + 0.5*osigma)^2)/2/osigma/osigma);
+// identity for PIEW
+PIEW = PIE*W/W(-1);
+// welfare identity
+WF = SC*c_weight*(C-h*C(-1))^(1-eta)/(1-eta) - SL*L^(1+gamma)/(1+gamma) + beta*WF(+1);
+// interest rate rule
+INT = INT(-1)^ksi1*((PIE/beta)*(PIE/pietar)^ksi2)^(1-ksi1)*exp(E_INT);
+// aggregate constraint
+Y = C + I + G + (1-ACAL-BCAL)*RK*Q(-1)*K;
+//Y = C + I + G;
+// process for government
+G/Y = (G(-1)/Y(-1))^rho_g*sg^(1-rho_g)*exp(E_GOV/sg);
+// to what do they index (pietar, past inflation, past indexed inflation)
+PIETILDE = PIE(-1);
+//PIETILDE = pietar;
+// exo processes
+SL = SL(-1)^rho_l*exp(E_L);
+SC = SC(-1)^rho_c*exp(E_C);
+SY = SY(-1)^rho_y*exp(E_Y);
+// lagged Q
+Q_M1 = Q(-1);
+end;
+
+initval;
+RM = 0.1;
+INT = pietar/beta;
+PIE = pietar;
+PIEW = pietar;
+PIETILDE = pietar;
+//R = dep/beta;
+R = 0.1;
+W = (1/biga/(R)^alpha)^(1/(1-alpha));
+LAMBDA = ((1-dep*alpha/R-sg)*(1-h)*c_weight/(1-alpha)*W^(1/gamma+1)*((sigmaw-1)/sigmaw)^(1/gamma))^(-1/(1/eta+1/gamma));
+L = (W*LAMBDA*(sigmaw-1)/sigmaw)^(1/gamma);
+Y = W*L/(1-alpha);
+K = alpha/R*Y;
+I = dep*K;
+G = sg*Y;
+VW = 1;
+BBD = L^(1+gamma)/(1-deltaw*beta);
+BBE = LAMBDA*L*W/(1-deltaw*beta);
+BBF = LAMBDA*L*W/(1-deltaw*beta);
+BBG = LAMBDA*L/(1-deltaw*beta);
+Q = 1;
+Q_M1 = Q;
+RK = 1/Q*PIE*(R+(1-dep)*Q);
+OMEGABAR = 0.5;
+ACAL = 0.5*erfc((log(OMEGABAR) - 0.5*osigma^2)/osigma/sqrt(2.0)) - OMEGABAR/2*erfc((log(OMEGABAR) + 0.5*osigma^2)/osigma/sqrt(2.0));
+BCAL = OMEGABAR/2*erfc((log(OMEGABAR) + 0.5*osigma^2)/osigma/sqrt(2.0)) + (1-mu)/2*(1+erf((log(OMEGABAR) - 0.5*osigma^2)/osigma/sqrt(2.0)));
+ACALPRIME = -0.5*erfc((log(OMEGABAR) + 0.5*osigma^2)/osigma/sqrt(2.0));
+BCALPRIME = -ACALPRIME - mu/osigma/2.506628274631*exp(-((log(OMEGABAR) + 0.5*osigma)^2)/2/osigma/osigma);
+N = (nu*(ACAL+BCAL)*RK*Q*K-nu*INT*Q*K)/(PIE*PIE-nu*INT*PIE);
+C = Y - I - G - (1-ACAL-BCAL)*RK*Q*K;
+SL = 1;
+SC = 1;
+SY = 1;
+WF = 1/(1-beta)*(SC*c_weight*((1-h)*C)^(1-eta)/(1-eta) - SL*L^(1+gamma)/(1+gamma));
+end;
+
+vcov = [
+0.0001 0 0 0 0;
+0 0.0001 0 0 0;
+0 0 0.0001 0 0;
+0 0 0 0.0001 0;
+0 0 0 0 0.0001
+];
+
+order = 4;
+
diff --git a/dynare++/tests/test.mod b/dynare++/tests/test.mod
new file mode 100644
index 0000000000000000000000000000000000000000..aa30ff7f349053e519d3ccfa8b28c84a2989d5fe
--- /dev/null
+++ b/dynare++/tests/test.mod
@@ -0,0 +1,39 @@
+var a, b, c, h, k, y;
+varexo e,u;
+
+parameters beta, rho, beta, alpha, delta, theta, psi, tau, phi;
+
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 0.99;
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+
+phi   = 0.1;
+
+model;
+c*theta*h^(1+psi)=(1-alpha)*y;
+k = beta*(((exp(b)*c)/(exp(b(+1))*c(+1)))*(exp(b(+1))*alpha*y(+1)+(1-delta)*k));
+y = exp(a)*(k(-1)^alpha)*(h^(1-alpha));
+k = exp(b)*(y-c)+(1-delta)*k(-1);
+a = rho*a(-1)+tau*b(-1) + e;
+b = tau*a(-1)+rho*b(-1) + u;
+end;
+
+initval;
+y = 1;
+c = 0.7;
+h = 0.1;
+k = 11;
+a = 0;
+b = 0;
+e = 0;
+u = 0;
+end;
+
+vcov = [ 0.000081 0.000008;0.000008 0.000081];
+
+order = 2;
+
diff --git a/dynare++/tests/test1.mod b/dynare++/tests/test1.mod
new file mode 100644
index 0000000000000000000000000000000000000000..b61ef76ca6406a13165baf5a20632202fdf8f786
--- /dev/null
+++ b/dynare++/tests/test1.mod
@@ -0,0 +1,42 @@
+var y, c, k, a, h, b;
+varexo e,u;
+
+parameters beta, rho, beta, alpha, delta, theta, psi, tau, phi;
+
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 0.99;
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+
+phi   = 0.1;
+
+model;
+c*theta*h^(1+psi)=(1-alpha)*y;
+k = beta*(((exp(b)*c)/(exp(b(+1))*c(+1)))*(exp(b(+1))*alpha*y(+1)+(1-delta)*k));
+y = exp(a)*(k(-1)^alpha)*(h^(1-alpha));
+k = exp(b)*(y-c)+(1-delta)*k(-1);
+a = rho*a(-1)+tau*b(-1) + e;
+b = tau*a(-1)+rho*b(-1) + u;
+end;
+
+initval;
+y = 1;
+c = 0.7;
+h = 0.1;
+k = 11;
+a = 0;
+b = 0;
+e = 0;
+u = 0;
+end;
+
+vcov = [ 0.000081 0.0000081; 0.0000081 0.000081];
+
+order = 1;
+
+
+
+
diff --git a/dynare++/tests/test2.mod b/dynare++/tests/test2.mod
new file mode 100644
index 0000000000000000000000000000000000000000..09e8d3554259b0a13895e12e1e72e4b6d236ac0d
--- /dev/null
+++ b/dynare++/tests/test2.mod
@@ -0,0 +1,39 @@
+var y, c, k, a, h, b;
+varexo e,u;
+
+parameters beta, rho, beta, alpha, delta, theta, psi, tau, phi;
+
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 0.99;
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+
+phi   = 0.1;
+
+model;
+c*theta*h^(1+psi)=(1-alpha)*y;
+k = beta*(((exp(b)*c)/(exp(b(+1))*c(+1)))*(exp(b(+1))*alpha*y(+1)+(1-delta)*k));
+y = exp(a)*(k(-1)^alpha)*(h^(1-alpha));
+k = exp(b)*(y-c)+(1-delta)*k(-1);
+a = rho*a(-1)+tau*b(-1) - rho*a(-2) - tau*b(-3) + e;
+b = tau*a(-1)+rho*b(-1) - rho*b(-2) - tau*a(-3) + u;
+end;
+
+initval;
+y = 1.08;
+c = 0.8;
+h = 0.29;
+k = 11.08;
+a = 0;
+b = 0;
+e = 0;
+u = 0;
+end;
+
+vcov = [ 0.01 0.005; 0.005 0.01];
+
+order = 1;
+
diff --git a/dynare++/tests/test2a.mod b/dynare++/tests/test2a.mod
new file mode 100644
index 0000000000000000000000000000000000000000..eb1defce951767afffbad1dff5a18a346ff38661
--- /dev/null
+++ b/dynare++/tests/test2a.mod
@@ -0,0 +1,39 @@
+var y, c, k, a, h, b;
+varexo e,u;
+
+parameters beta, rho, beta, alpha, delta, theta, psi, tau, phi;
+
+alpha = 0.36;
+rho   = 0.95;
+tau   = 0.025;
+beta  = 0.99;
+delta = 0.025;
+psi   = 0;
+theta = 2.95;
+
+phi   = 0.1;
+
+model;
+c*theta*h^(1+psi)=(1-alpha)*y;
+k = beta*(((exp(b)*c)/(exp(b(+1))*c(+1)))*(exp(b(+1))*alpha*y(+1)+(1-delta)*k));
+y = exp(a)*(k(-1)^alpha)*(h^(1-alpha));
+k = exp(b)*(y-c)+(1-delta)*k(-1);
+a = rho*a(-1)+tau*b(-1) - rho*a(-2) - tau*b(-3) + e;
+b = tau*a(-1)+rho*b(-1) - rho*b(-2) - tau*a(-3) + u;
+end;
+
+initval;
+y = 1.08;
+c = 0.8;
+h = 0.29;
+k = 11.08;
+a = 0;
+b = 0;
+e = 0;
+u = 0;
+end;
+
+vcov = [ 0.01 0.005; 0.005 0.01];
+
+order = 2;
+
diff --git a/dynare++/tests/test3.mod b/dynare++/tests/test3.mod
new file mode 100644
index 0000000000000000000000000000000000000000..7f386f2a87fa9aa6a64f931760ac90bb916ca1fe
--- /dev/null
+++ b/dynare++/tests/test3.mod
@@ -0,0 +1,30 @@
+var y,x;
+varexo u,v;
+parameters a, b, c, d, e, f, g, h, j;
+
+a=0.8;
+b=0.9;
+c=0.9;
+d=1;
+e=-0.556875;
+f=-0.172125;
+g=-0.9;
+h=-0.2754;
+j=-1.8;
+
+
+model;
+x=a*x(-1)+u;
+c*y(+1)^2+d*y^2+e*x^2+f*u^2-d*v^2+g+h*x(-1)*u+j*x(-1)*v=0;
+end;
+
+initval;
+x=0;
+y=0.7237469;
+u=0;
+v=0;
+end;
+
+vcov=[1 0; 0 1];
+
+order = 2;
\ No newline at end of file
diff --git a/dynare++/tests/test4.mod b/dynare++/tests/test4.mod
new file mode 100644
index 0000000000000000000000000000000000000000..4250ac3a04f38f525c72f7563c1aefcffd0c76b6
--- /dev/null
+++ b/dynare++/tests/test4.mod
@@ -0,0 +1,27 @@
+var y,x;
+varexo u,v;
+parameters a, b, c, d, e, f, m;
+
+a=0.8;
+b=0.9;
+c=0.9;
+d=1;
+e=1;
+m=50;
+f = 1;
+
+model;
+x = a*x(-1)+u;
+c*y(+1)^2+d*y^2+e*x^2-(c+d)*m^2-(c*b*b*a*a+d*b*b+e*a*a)*x(-1)^2-(c*b*b+e)*u^2-2*(c*m*b*a+d*m*b)*x(-1)-2*c*m*b*u-2*(c*b*b*a+e*a)*x(-1)*u-d*f^2*v^2-2*d*m*f*v-2*d*b*f*x(-1)*v=0;
+end;
+
+initval;
+x=1;
+y=21;
+u=0;
+v=0;
+end;
+
+vcov=[1 0; 0 1];
+
+order = 2;
diff --git a/dynare++/tests/test5.mod b/dynare++/tests/test5.mod
new file mode 100644
index 0000000000000000000000000000000000000000..4e2e5938a31f9307ada72a28af6e61900d116e23
--- /dev/null
+++ b/dynare++/tests/test5.mod
@@ -0,0 +1,27 @@
+var y,x;
+varexo u,v;
+parameters a, b, c, d, e, m, n;
+
+a=-0.8;
+b=0.9;
+c=0.9;
+d=1;
+e=1;
+m=50;
+n=0.2;
+
+model;
+x=b*x(-1)+u;
+a*y(+1)+y-(a*b^2+1)*x(-1)^2-2*a*b*x(-1)*u-a*u^2-a-2*x(-1)*v-v^2;
+end;
+
+initval;
+x=0;
+y=0;
+u=0;
+v=0;
+end;
+
+vcov=[1 0; 0 1];
+
+order = 3;
diff --git a/dynare++/tests/test6.mod b/dynare++/tests/test6.mod
new file mode 100644
index 0000000000000000000000000000000000000000..53f4e826c21b7f4c09e05ad5a41f1cf64494ee80
--- /dev/null
+++ b/dynare++/tests/test6.mod
@@ -0,0 +1,27 @@
+var y,x;
+varexo u,v;
+parameters a, b, c, d, e, m, n;
+
+a=-0.8;
+b=0.9;
+c=0.9;
+d=1;
+e=1;
+m=50;
+n=0.2;
+
+model;
+x=b*x(-1)+u;
+a*y(+1)+y-(a*b^3+1)*x(-1)^3-3*a*b*x(-1)*u^2-3*a*b^2*x(-1)^2*u-a*u^3-a-v^2;
+end;
+
+initval;
+x=0;
+y=0;
+u=0;
+v=0;
+end;
+
+vcov=[1 0; 0 1];
+
+order = 3;
diff --git a/dynare++/tests/test7.mod b/dynare++/tests/test7.mod
new file mode 100644
index 0000000000000000000000000000000000000000..f520b9453622794c5e2971f2be137e2c074cdd3c
--- /dev/null
+++ b/dynare++/tests/test7.mod
@@ -0,0 +1,27 @@
+var y,x;
+varexo u,v;
+parameters a, b, c, d, e, m, n;
+
+a=-0.8;
+b=0.9;
+c=0.9;
+d=1;
+e=1;
+m=50;
+n=0.2;
+
+model;
+x=b*x(-1)+u;
+a*y(+1)+y-(a*b^4+1)*x(-1)^4-4*a*b*x(-1)*u^3-4*a*b^3*x(-1)^3*u-6*a*(b*x(-1)*u)^2-a*u^4-v;
+end;
+
+initval;
+x=0;
+y=0;
+u=0;
+v=0;
+end;
+
+vcov=[1 0; 0 1];
+
+order = 4;
diff --git a/dynare++/tl/cc/Makefile b/dynare++/tl/cc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..896c8c04f5a4ec642945c6b3f4a8b05eb3722677
--- /dev/null
+++ b/dynare++/tl/cc/Makefile
@@ -0,0 +1,60 @@
+# $Id: Makefile 2344 2009-02-09 20:36:08Z michel $
+# Copyright 2004, Ondra Kamenik
+
+include ../../Makefile.include
+
+#LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c
+
+CC_FLAGS := -I../../sylv/cc $(CC_FLAGS)
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
+endif
+
+ifeq ($(OS),Windows_NT)
+	CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
+	LD_LIBS := -mno-cygwin -mthreads $(LD_LIBS)  -lpthreadGC1
+else
+	LD_LIBS := $(LD_LIBS) -lpthread
+endif
+
+
+matrix_interface := GeneralMatrix Vector SylvException 
+matobjs := $(patsubst %, ../../sylv/cc/%.o, $(matrix_interface))
+cwebsource := $(wildcard *.cweb)
+cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource)) 
+objects := $(patsubst %.cweb,%.o,$(cwebsource))
+hwebsource := $(wildcard *.hweb)
+hsource := $(patsubst %.hweb,%.h,$(hwebsource))
+
+
+dummy.ch:
+	touch dummy.ch
+
+%.cpp: %.cweb dummy.ch
+	ctangle -bhp $*.cweb dummy.ch $*.cpp
+
+%.h: %.hweb dummy.ch
+	ctangle -bhp $*.hweb dummy.ch $*.h
+
+%.o : %.cpp $(hsource)
+	$(CC) $(CC_FLAGS) $(EXTERN_DEFS) -c $*.cpp
+
+all: $(objects) $(cppsource) $(hsource)
+
+tl.pdf: doc
+
+doc: main.web $(hwebsource) $(cwebsource)
+	cweave -bhp main.web
+	pdftex main
+	mv main.pdf tl.pdf
+
+clear:
+	rm -f $(cppsource)
+	rm -f $(hsource)
+	rm -f *.o
+	rm -f main.{dvi,idx,log,pdf,scn,tex,toc}
+	rm -f dummy.ch
+	rm -f *~
diff --git a/dynare++/tl/cc/equivalence.cweb b/dynare++/tl/cc/equivalence.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..a1a3495142311e0aa776a1fe64416e36cb708cc2
--- /dev/null
+++ b/dynare++/tl/cc/equivalence.cweb
@@ -0,0 +1,477 @@
+@q $Id: equivalence.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt equivalence.cpp} file.
+
+@c
+#include "equivalence.h"
+#include "permutation.h"
+#include "tl_exception.h"
+
+#include <string.h>
+
+@<|OrdSequence| method codes@>;
+@<|Equivalence| method codes@>;
+@<|EquivalenceSet| method codes@>;
+@<|EquivalenceBundle| method codes@>;
+ 
+@ 
+@<|OrdSequence| method codes@>=
+@<|OrdSequence::operator[]| code@>;
+@<|OrdSequence::operator<| code@>;
+@<|OrdSequence::operator==| code@>;
+@<|OrdSequence::add| codes@>;
+@<|OrdSequence::has| code@>;
+@<|OrdSequence::average()| code@>;
+@<|OrdSequence::print| code@>;
+
+@ 
+@<|Equivalence| method codes@>=
+@<|Equivalence| constructors@>;
+@<|Equivalence| copy constructors@>;
+@<|Equivalence::findHaving| codes@>;
+@<|Equivalence::find| codes@>;
+@<|Equivalence::insert| code@>;
+@<|Equivalence::operator=| code@>;
+@<|Equivalence::operator==| code@>;
+@<|Equivalence::trace| code@>;
+@<|Equivalence::trace| permuted code@>;
+@<|Equivalence::print| code@>;
+
+@ 
+@<|EquivalenceSet| method codes@>=
+@<|EquivalenceSet| constructor code@>;
+@<|EquivalenceSet::has| code@>;
+@<|EquivalenceSet::addParents| code@>;
+@<|EquivalenceSet::print| code@>;
+
+@ 
+@<|EquivalenceBundle| method codes@>=
+@<|EquivalenceBundle| constructor code@>;
+@<|EquivalenceBundle| destructor code@>;
+@<|EquivalenceBundle::get| code@>;
+@<|EquivalenceBundle::generateUpTo| code@>;
+
+
+@ 
+@<|OrdSequence::operator[]| code@>=
+int OrdSequence::operator[](int i) const
+{
+	TL_RAISE_IF((i<0 || i>=length()),
+				"Index out of range in OrdSequence::operator[]");
+	return data[i];
+}
+
+@ Here we implement the ordering. It can be changed, or various
+orderings can be used for different problem sizes. We order them
+according to the average, and then according to the first item.
+
+@<|OrdSequence::operator<| code@>=
+bool OrdSequence::operator<(const OrdSequence& s) const
+{
+	double ta = average();
+	double sa = s.average();
+	return (ta < sa || ((ta == sa) && (operator[](0) > s[0])));
+}
+
+@ 
+@<|OrdSequence::operator==| code@>=
+bool OrdSequence::operator==(const OrdSequence& s) const
+{
+	if (length() != s.length())
+		return false;
+
+	int i = 0;
+	while (i < length() && operator[](i) == s[i])
+		i++;
+
+	return (i == length());
+}
+
+
+@ The first |add| adds a given integer to the class, the second
+iterates through a given sequence and adds everything found in the
+given class.
+
+@<|OrdSequence::add| codes@>=
+void OrdSequence::add(int i)
+{
+	vector<int>::iterator vit = data.begin();
+	while (vit != data.end() && *vit < i)
+		++vit;
+	if (vit != data.end() && *vit == i)
+		return;
+	data.insert(vit, i);
+}
+@#
+void OrdSequence::add(const OrdSequence& s)
+{
+	vector<int>::const_iterator vit = s.data.begin();
+	while (vit != s.data.end()) {
+		add(*vit);
+		++vit;
+	}
+}
+
+@ Answers |true| if a given number is in the class.
+@<|OrdSequence::has| code@>=
+bool OrdSequence::has(int i) const
+{
+	vector<int>::const_iterator vit = data.begin();
+	while (vit != data.end()) {
+		if (*vit == i)
+			return true;
+		++vit;
+	}
+	return false;
+}
+
+@ Return an average of the class. 
+@<|OrdSequence::average()| code@>=
+double OrdSequence::average() const
+{
+	double res = 0;
+	for (unsigned int i = 0; i < data.size(); i++)
+		res += data[i];
+	TL_RAISE_IF(data.size() == 0,
+				"Attempt to take average of empty class in OrdSequence::average");
+	return res/data.size();
+}
+
+@ Debug print.
+@<|OrdSequence::print| code@>=
+void OrdSequence::print(const char* prefix) const
+{
+	printf("%s",prefix);
+	for (unsigned int i = 0; i < data.size(); i++)
+		printf("%d ",data[i]);
+	printf("\n");
+}
+
+@ 
+@<|Equivalence| constructors@>=
+Equivalence::Equivalence(int num)
+	: n(num)
+{
+	for (int i = 0; i < num; i++) {
+		OrdSequence s;
+		s.add(i);
+		classes.push_back(s);
+	}
+}
+@#
+Equivalence::Equivalence(int num, const char* dummy)
+	: n(num)
+{
+	OrdSequence s;
+	for (int i = 0; i < num; i++)
+		s.add(i);
+	classes.push_back(s);
+}
+
+@ Copy constructors. The second also glues a given couple.
+@<|Equivalence| copy constructors@>=
+Equivalence::Equivalence(const Equivalence& e)
+	: n(e.n),
+	  classes(e.classes)
+{
+}
+@#
+Equivalence::Equivalence(const Equivalence& e, int i1, int i2)
+	: n(e.n),
+	  classes(e.classes)
+{
+	seqit s1 = find(i1);
+	seqit s2 = find(i2);
+	if (s1 != s2) {
+		OrdSequence ns(*s1);
+		ns.add(*s2);
+		classes.erase(s1);
+		classes.erase(s2);
+		insert(ns);
+	}
+}
+
+@ 
+@<|Equivalence::operator=| code@>=
+const Equivalence& Equivalence::operator=(const Equivalence& e)
+{
+	classes.clear();
+	n = e.n;
+	classes = e.classes;
+	return *this;
+}
+
+@ 
+@<|Equivalence::operator==| code@>=
+bool Equivalence::operator==(const Equivalence& e) const
+{
+	if (! std::operator==(classes, e.classes))
+		return false;
+
+	if (n != e.n)
+		return false;
+
+	return true;
+}
+
+
+@ Return an iterator pointing to a class having a given integer.
+@<|Equivalence::findHaving| codes@>=
+Equivalence::const_seqit Equivalence::findHaving(int i) const
+{
+	const_seqit si = classes.begin();
+	while (si != classes.end()) {
+		if ((*si).has(i))
+			return si;
+		++si;
+	}
+	TL_RAISE_IF(si == classes.end(),
+				"Couldn't find equivalence class in Equivalence::findHaving");
+	return si;
+}
+@#
+Equivalence::seqit Equivalence::findHaving(int i)
+{
+	seqit si = classes.begin();
+	while (si != classes.end()) {
+		if ((*si).has(i))
+			return si;
+		++si;
+	}
+	TL_RAISE_IF(si == classes.end(),
+				"Couldn't find equivalence class in Equivalence::findHaving");
+	return si;
+}
+
+
+@ Find $j$-th class for a given $j$.
+@<|Equivalence::find| codes@>=
+Equivalence::const_seqit Equivalence::find(int j) const
+{
+	const_seqit si = classes.begin();
+	int i = 0;
+	while (si != classes.end() && i < j) {
+		++si;
+		i++;
+	}
+	TL_RAISE_IF(si == classes.end(),
+				"Couldn't find equivalence class in Equivalence::find");
+	return si;
+}
+@#
+Equivalence::seqit Equivalence::find(int j)
+{
+	seqit si = classes.begin();
+	int i = 0;
+	while (si != classes.end() && i < j) {
+		++si;
+		i++;
+	}
+	TL_RAISE_IF(si == classes.end(),
+				"Couldn't find equivalence class in Equivalence::find");
+	return si;
+}
+
+
+@ Insert a new class yielding the ordering.
+@<|Equivalence::insert| code@>=
+void Equivalence::insert(const OrdSequence& s)
+{
+	seqit si = classes.begin();
+	while (si != classes.end() && *si < s) 
+		++si;
+	classes.insert(si, s);
+}
+
+@ Trace the equivalence into the integer sequence. The classes are in
+some order (described earlier), and items within classes are ordered,
+so this implies, that the data can be linearized. This method
+``prints'' them to the sequence. We allow for tracing only a given
+number of classes from the beginning.
+
+@<|Equivalence::trace| code@>=
+void Equivalence::trace(IntSequence& out, int num) const
+{
+	int i = 0;
+	int nc = 0;
+	for (const_seqit it = begin(); it != end() && nc < num; ++it, ++nc)
+		for (int j = 0;	j < (*it).length(); j++, i++) {
+			TL_RAISE_IF(i >= out.size(),
+						"Wrong size of output sequence in Equivalence::trace");
+			out[i] = (*it)[j];
+		}
+}
+
+@ 
+@<|Equivalence::trace| permuted code@>=
+void Equivalence::trace(IntSequence& out, const Permutation& per) const
+{
+	TL_RAISE_IF(out.size() != n,
+				"Wrong size of output sequence in Equivalence::trace");
+	TL_RAISE_IF(per.size() != numClasses(),
+				"Wrong permutation for permuted Equivalence::trace");
+	int i = 0;
+	for (int iclass = 0; iclass < numClasses(); iclass++) {
+		const_seqit itper = find(per.getMap()[iclass]);
+		for (int j = 0; j < (*itper).length(); j++, i++)
+			out[i] = (*itper)[j];
+	}
+}
+
+
+@ Debug print.
+@<|Equivalence::print| code@>=
+void Equivalence::print(const char* prefix) const
+{
+	int i = 0;
+	for (const_seqit it = classes.begin();
+		 it != classes.end();
+		 ++it, i++) {
+		printf("%sclass %d: ",prefix,i);
+		(*it).print("");
+	}
+}
+
+@ Here we construct a set of all equivalences over $n$-element
+set. The construction proceeds as follows. We maintain a list of added
+equivalences. At each iteration we pop front of the list, try to add
+all parents of the popped equivalence. This action adds new
+equivalences to the object and also to the added list. We finish the
+iterations when the added list is empty.
+
+In the beginning we start with
+$\{\{0\},\{1\},\ldots,\{n-1\}\}$. Adding of parents is an action which
+for a given equivalence tries to glue all possible couples and checks
+whether a new equivalence is already in the equivalence set. This is
+not effective, but we will do the construction only ones.
+
+In this way we breath-first search a lattice of all equivalences. Note
+that the lattice is modular, that is why the result of a construction
+is a list with a property that between two equivalences with the same
+number of classes there are only equivalences with that number of
+classes. Obviously, the list is decreasing in a number of classes
+(since it is constructed by gluing attempts).
+
+
+@<|EquivalenceSet| constructor code@>=
+EquivalenceSet::EquivalenceSet(int num)
+	: n(num),
+	  equis()
+{
+	list<Equivalence> added;
+	Equivalence first(n);
+	equis.push_back(first);
+	addParents(first, added);
+	while (! added.empty()) {
+		addParents(added.front(), added);
+		added.pop_front();
+	}
+	if (n > 1) {
+		Equivalence last(n, "");
+		equis.push_back(last);
+	}
+}
+
+@ This method is used in |addParents| and returns |true| if the object
+already has that equivalence. We trace list of equivalences in reverse
+order since equivalences are ordered in the list from the most
+primitive (nothing equivalent) to maximal (all is equivalent). Since
+we will have much more results of |has| method as |true|, and
+|operator==| between equivalences is quick if number of classes
+differ, and in time we will compare with equivalences with less
+classes, then it is more efficient to trace the equivalences from less
+classes to more classes. hence the reverse order.
+
+@<|EquivalenceSet::has| code@>=
+bool EquivalenceSet::has(const Equivalence& e) const
+{
+	list<Equivalence>::const_reverse_iterator rit = equis.rbegin();
+	while (rit != equis.rend() && *rit != e)
+		++rit;
+	if (rit != equis.rend())
+		return true;
+	return false;
+}
+
+@ Responsibility of this methods is to try to glue all possible
+couples within a given equivalence and add those which are not in the
+list yet. These are added also to the |added| list.
+
+If number of classes is 2 or 1, we exit, because there is nothing to
+be added.
+
+@<|EquivalenceSet::addParents| code@>=
+void EquivalenceSet::addParents(const Equivalence& e,
+								list<Equivalence>& added)
+{
+	if (e.numClasses() == 2 || e.numClasses() == 1)
+		return;
+
+	for (int i1 = 0; i1 < e.numClasses(); i1++)
+		for (int i2 = i1+1; i2 < e.numClasses(); i2++) {
+			Equivalence ns(e, i1, i2);
+			if (! has(ns)) {
+				added.push_back(ns);
+				equis.push_back(ns);
+			}
+		}
+}
+			
+@ Debug print.
+@<|EquivalenceSet::print| code@>=
+void EquivalenceSet::print(const char* prefix) const
+{
+	char tmp[100];
+	strcpy(tmp, prefix);
+	strcat(tmp, "    ");
+	int i = 0;
+	for (list<Equivalence>::const_iterator it = equis.begin();
+		 it != equis.end();
+		 ++it, i++) {
+		printf("%sequivalence %d:(classes %d)\n",prefix,i,(*it).numClasses());
+		(*it).print(tmp);
+	}
+}
+
+@ Construct the bundle. |nmax| is a maximum size of underlying set.
+@<|EquivalenceBundle| constructor code@>=
+EquivalenceBundle::EquivalenceBundle(int nmax)
+{
+	nmax = max(nmax, 1);
+	generateUpTo(nmax);
+}
+
+@ Destruct bundle. Just free all pointers.
+@<|EquivalenceBundle| destructor code@>=
+EquivalenceBundle::~EquivalenceBundle()
+{
+	for (unsigned int i = 0; i < bundle.size(); i++)
+		delete bundle[i];
+}
+
+@ Remember, that the first item is |EquivalenceSet(1)|.
+@<|EquivalenceBundle::get| code@>=
+const EquivalenceSet& EquivalenceBundle::get(int n) const
+{
+	if (n > (int)(bundle.size()) || n < 1) {
+		TL_RAISE("Equivalence set not found in EquivalenceBundle::get");
+		return *(bundle[0]);
+	} else {
+		return *(bundle[n-1]);
+	}
+}
+
+@ Get |curmax| which is a maximum size in the bundle, and generate for
+all sizes from |curmax+1| up to |nmax|.
+
+@<|EquivalenceBundle::generateUpTo| code@>=
+void EquivalenceBundle::generateUpTo(int nmax)
+{
+	int curmax = bundle.size();
+	for (int i = curmax+1; i <= nmax; i++)
+		bundle.push_back(new EquivalenceSet(i));
+}
+
+
+@ End of {\tt equivalence.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/equivalence.hweb b/dynare++/tl/cc/equivalence.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..f7f2b5c4dd82947b1d2dec63f37998899bd77917
--- /dev/null
+++ b/dynare++/tl/cc/equivalence.hweb
@@ -0,0 +1,203 @@
+@q $Id: equivalence.hweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Equivalences. Start of {\tt equivalence.h} file
+
+Here we define an equivalence of a set of integers $\{0, 1, \ldots,
+k-1\}$. The purpose is clear, in the tensor library we often iterate
+through all equivalences and sum matrices. We need an abstraction for
+an equivalence class, equivalence and a set of all equivalences.
+
+The equivalence class (which is basically a set of integers) is here
+implemented as ordered integer sequence. The ordered sequence is not
+implemented via |IntSequence|, but via |vector<int>| since we need
+insertions. The equivalence is implemented as an ordered list of
+equivalence classes, and equivalence set is a list of equivalences.
+
+The ordering of the equivalence classes within an equivalence is very
+important. For instance, if we iterate through equivalences for $k=5$
+and pickup some equivalence class, say $\{\{0,4\},\{1,2\},\{3\}\}$, we
+then evaluate something like:
+$$\left[B_{y^2u^3}\right]_{\alpha_1\alpha_2\beta_1\beta_2\beta_3}=
+\cdots+\left[g_{y^3}\right]_{\gamma_1\gamma_2\gamma_3}
+\left[g_{yu}\right]^{\gamma_1}_{\alpha_1\beta_3}
+\left[g_{yu}\right]^{\gamma_2}_{\alpha_2\beta_1}
+\left[g_u\right]^{\gamma_3}_{\beta_2}+\cdots
+$$ 
+If the tensors are unfolded, we can evaluate this expression as
+$$g_{y^3}\cdot\left(g_{yu}\otimes g_{yu}\otimes g_{u}\right)\cdot P,$$
+where $P$ is a suitable permutation of columns of the expressions,
+which permutes them so that the index
+$(\alpha_1,\beta_3,\alpha_2,\beta_1,\beta_2)$ would go to
+$(\alpha_1,\alpha_2,\beta_1,\beta_2,\beta_3)$.
+The permutation $P$ can be very ineffective (copying great amount of
+small chunks of data) if the equivalence class ordering is chosen
+badly. However, we do not provide any heuristic minimizing a total
+time spent in all permutations. We choose an ordering which orders the
+classes according to their averages, and according to the smallest
+equivalence class element if the averages are the same.
+
+
+
+@s OrdSequence int
+@s Equivalence int
+@s EquivalenceSet int
+
+@c
+#ifndef EQUIVALENCE_H
+#define EQUIVALENCE_H
+
+#include "int_sequence.h"
+
+#include <vector>
+#include <list>
+
+using namespace std;
+
+@<|OrdSequence| class declaration@>;
+@<|Equivalence| class declaration@>;
+@<|EquivalenceSet| class declaration@>;
+@<|EquivalenceBundle| class declaration@>;
+
+#endif
+
+
+@ Here is the abstraction for an equivalence class. We implement it as
+|vector<int>|. We have a constructor for empty class, copy
+constructor. What is important here is the ordering operator
+|operator<| and methods for addition of an integer, and addition of
+another sequence. Also we provide method |has| which returns true if a
+given integer is contained.
+
+@<|OrdSequence| class declaration@>=
+class OrdSequence {
+	vector<int> data;
+public:@/
+	OrdSequence() : data()@+ {}
+	OrdSequence(const OrdSequence& s) : data(s.data)@+ {}
+	const OrdSequence& operator=(const OrdSequence& s)
+		{@+ data = s.data;@+ return *this;@+}
+	bool operator==(const OrdSequence& s) const;
+	int operator[](int i) const;
+	bool operator<(const OrdSequence& s) const;
+	const vector<int>& getData() const
+		{@+ return data;@+}
+	int length() const {@+ return data.size();@+}
+	void add(int i);
+	void add(const OrdSequence& s);
+	bool has(int i) const;
+	void print(const char* prefix) const;
+private:@/
+	double average() const;
+};
+
+
+@ Here is the abstraction for the equivalence. It is a list of
+equivalence classes. Also we remember |n|, which is a size of
+underlying set $\{0, 1, \ldots, n-1\}$.
+
+Method |trace| ``prints'' the equivalence into the integer sequence.
+
+@<|Equivalence| class declaration@>=
+class Permutation;
+class Equivalence {
+private:
+	int n;
+	list<OrdSequence> classes;
+public:@;
+	typedef list<OrdSequence>::const_iterator const_seqit;
+	typedef list<OrdSequence>::iterator seqit;
+
+	@<|Equivalence| constructors@>;
+	const Equivalence& operator=(const Equivalence& e);
+	bool operator==(const Equivalence& e) const;
+	bool operator!=(const Equivalence& e) const
+		{@+ return ! operator==(e);@+}
+	int getN() const {@+ return n;@+}
+	int numClasses() const {@+ return classes.size();@+}
+	void trace(IntSequence& out, int n) const;
+	void trace(IntSequence& out) const
+		{@+ trace(out, numClasses()); @+}
+	void trace(IntSequence& out, const Permutation& per) const;
+	void print(const char* prefix) const;
+	@<|Equivalence| begin and end methods@>;
+	const_seqit find(int i) const;
+	seqit find(int i);
+protected:@;
+	@<|Equivalence| protected methods@>;
+};
+
+@ The |EquivalenceSet| is a list of equivalences. The unique
+constructor constructs a set of all equivalences over $n$-element
+set. The equivalences are sorted in the list so that equivalences with
+fewer number of classes are in the end.
+
+The two methods |has| and |addParents| are useful in the constructor.
+
+@<|EquivalenceSet| class declaration@>=
+class EquivalenceSet {
+	int n;
+	list<Equivalence> equis;
+public:@;
+	typedef list<Equivalence>::const_iterator const_iterator; 
+	EquivalenceSet(int num);
+	void print(const char* prefix) const;
+	const_iterator begin() const
+		{@+ return equis.begin();@+}
+	const_iterator end() const
+		{@+ return equis.end();@+}
+private:@;
+	bool has(const Equivalence& e) const;
+	void addParents(const Equivalence& e, list<Equivalence>& added);
+};
+
+@ The equivalence bundle class only encapsulates |EquivalenceSet|s
+from 1 up to a given number. It is able to retrieve the equivalence set
+over $n$-element set for a given $n$, and also it can generate some more
+sets on request.
+
+It is fully responsible for storage needed for |EquivalenceSet|s.
+
+@<|EquivalenceBundle| class declaration@>=
+class EquivalenceBundle {
+	vector<EquivalenceSet*> bundle;
+public:@;
+	EquivalenceBundle(int nmax);
+	~EquivalenceBundle();
+	const EquivalenceSet& get(int n) const;
+	void generateUpTo(int nmax);
+};
+
+@ The first constructor constructs $\{\{0\},\{1\},\ldots,\{n-1\}\}$.
+
+The second constructor constructs $\{\{0,1,\ldots,n-1\}\}$.
+
+The third is the copy constructor. And the fourth is the copy
+constructor plus gluing |i1| and |i2| in one class.
+
+@<|Equivalence| constructors@>=
+	Equivalence(int num);
+	Equivalence(int num, const char* dummy);
+	Equivalence(const Equivalence& e);
+	Equivalence(const Equivalence& e, int i1, int i2);
+
+@ 
+@<|Equivalence| begin and end methods@>=
+	seqit begin() {@+ return classes.begin();@+}
+	const_seqit begin() const {@+ return classes.begin();@+}
+	seqit end() {@+ return classes.end();@+}
+	const_seqit end() const {@+ return classes.end();@+}
+
+@ Here we have find methods. We can find an equivalence class having a
+given number or we can find an equivalence class of a given index within
+the ordering.
+
+We have also an |insert| method which inserts a given class
+according to the class ordering.
+
+@<|Equivalence| protected methods@>=
+	const_seqit findHaving(int i) const;
+	seqit findHaving(int i);
+	void insert(const OrdSequence& s);
+
+@ End of {\tt equivalence.h} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/fine_container.cweb b/dynare++/tl/cc/fine_container.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..8c314c88f9b7e724b6e3e505187db74a64d8a791
--- /dev/null
+++ b/dynare++/tl/cc/fine_container.cweb
@@ -0,0 +1,41 @@
+@q $Id: fine_container.cweb 1833 2008-05-18 20:22:39Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@ Start of {\tt stack\_container.cpp} file.
+
+@c
+#include "fine_container.h"
+
+#include <math.h>
+
+@<|SizeRefinement| constructor code@>;
+
+@ Here we construct the vector of new sizes of containers (before
+|nc|) and copy all remaining sizes behind |nc|.
+
+@<|SizeRefinement| constructor code@>=
+SizeRefinement::SizeRefinement(const IntSequence& s, int nc, int max)
+{
+	new_nc = 0;
+	for (int i = 0; i < nc; i++) {
+		int nr = s[i]/max;
+		if (s[i] % max != 0)
+			nr++;
+		int ss = (nr>0) ? (int)round(((double)s[i])/nr) : 0;
+		for (int j = 0; j < nr - 1; j++) {
+			rsizes.push_back(ss);
+			ind_map.push_back(i);
+			new_nc++;
+		}
+		rsizes.push_back(s[i]-(nr-1)*ss);
+		ind_map.push_back(i);
+		new_nc++;
+	}
+
+	for (int i = nc; i < s.size(); i++) {
+		rsizes.push_back(s[i]);
+		ind_map.push_back(i);
+	}
+}
+
+@ End of {\tt stack\_container.cpp} file.
diff --git a/dynare++/tl/cc/fine_container.hweb b/dynare++/tl/cc/fine_container.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..bec478ff43ef43a96bb6867b947b7b57f686fa03
--- /dev/null
+++ b/dynare++/tl/cc/fine_container.hweb
@@ -0,0 +1,164 @@
+@q $Id: fine_container.hweb 332 2005-07-15 13:41:48Z kamenik $ @>
+@q Copyright 2005, Ondra Kamenik @>
+
+@*2 Refined stack of containers. Start of {\tt fine\_container.h} file.
+
+This file defines a refinement of the stack container. It makes a
+vertical refinement of a given stack container, it refines only matrix
+items, the items which are always zero, or can be identity matrices
+are not refined.
+
+The refinement is done by a simple construction from the stack
+container being refined. A parameter is passed meaning a maximum size
+of each stack in the refined container. The resulting object is stack
+container, so everything works seamlessly.
+
+We define here a class for refinement of sizes |SizeRefinement|, this
+is purely an auxiliary class allowing us to write a code more
+concisely. The main class of this file is |FineContainer|, which
+corresponds to refining. The two more classes |FoldedFineContainer|
+and |UnfoldedFineContainer| are its specializations.
+
+NOTE: This code was implemented with a hope that it will help to cut
+down memory allocations during the Faa Di Bruno formula
+evaluation. However, it seems that this needs to be accompanied with a
+similar thing for tensor multidimensional index. Thus, the abstraction
+is not currently used, but it might be useful in future.
+
+@s SizeRefinement int
+@s FineContainer int
+@s FoldedFineContainer int
+@s UnfoldedFineContainer int
+
+@c
+#ifndef FINE_CONTAINER_H
+#define FINE_CONTAINER_H
+
+#include "stack_container.h"
+
+#include <vector>
+
+@<|SizeRefinement| class declaration@>;
+@<|FineContainer| class declaration@>;
+@<|FoldedFineContainer| class declaration@>;
+@<|UnfoldedFineContainer| class declaration@>;
+
+#endif
+
+@ This class splits the first |nc| elements of the given sequence |s|
+to a sequence not having items greater than given |max|. The remaining
+elements (those behind |nc|) are left untouched. It also remembers the
+mapping, i.e. for a given index in a new sequence, it is able to
+return a corresponding index in old sequence.
+
+@<|SizeRefinement| class declaration@>=
+class SizeRefinement {
+	vector<int> rsizes;
+	vector<int> ind_map;
+	int new_nc;
+public:@;
+	SizeRefinement(const IntSequence& s, int nc, int max);
+	int getRefSize(int i) const
+		{@+ return rsizes[i];@+}
+	int numRefinements() const
+		{@+ return rsizes.size();@+}
+	int getOldIndex(int i) const
+		{@+ return ind_map[i];@+}
+	int getNC() const
+		{@+ return new_nc;@+}
+};
+
+
+@ This main class of this class refines a given stack container, and
+inherits from the stack container. It also defines the |getType|
+method, which returns a type for a given stack as the type of the
+corresponding (old) stack of the former stack container.
+
+@<|FineContainer| class declaration@>=
+template <class _Ttype>@;
+class FineContainer : public SizeRefinement, public StackContainer<_Ttype> {
+protected:@;
+	typedef StackContainer<_Ttype> _Stype;
+	typedef typename StackContainerInterface<_Ttype>::_Ctype _Ctype;
+	typedef typename StackContainerInterface<_Ttype>::itype itype;
+	_Ctype** const ref_conts;
+	const _Stype& stack_cont;
+public:@;
+	@<|FineContainer| constructor@>;
+	@<|FineContainer| destructor@>;
+	itype getType(int i, const Symmetry& s) const
+		{@+ return stack_cont.getType(getOldIndex(i), s);@+}
+	
+};
+
+
+@ Here we construct the |SizeRefinement| and allocate space for the
+refined containers. Then, the containers are created and put to
+|conts| array. Note that the containers do not claim any further
+space, since all the tensors of the created containers are in-place
+submatrices.
+
+Here we use a dirty trick of converting |const| pointer to non-|const|
+pointer and passing it to a subtensor container constructor. The
+containers are stored in |ref_conts| and then in |conts| from
+|StackContainer|. However, this is safe since neither |ref_conts| nor
+|conts| are used in non-|const| contexts. For example,
+|StackContainer| has only a |const| method to return a member of
+|conts|.
+
+@<|FineContainer| constructor@>=
+FineContainer(const _Stype& sc, int max)
+	: SizeRefinement(sc.getStackSizes(), sc.numConts(), max),
+	  StackContainer<_Ttype>(numRefinements(), getNC()),
+	  ref_conts(new _Ctype*[getNC()]),
+	  stack_cont(sc)
+{
+	for (int i = 0; i < numRefinements(); i++)
+		_Stype::stack_sizes[i] = getRefSize(i);
+	_Stype::calculateOffsets();
+
+	int last_cont = -1;
+	int last_row = 0;
+	for (int i = 0; i < getNC(); i++) {
+		if (getOldIndex(i) != last_cont) {
+			last_cont = getOldIndex(i);
+			last_row = 0;
+		}
+		union {const _Ctype* c; _Ctype* n;} convert;
+		convert.c = stack_cont.getCont(last_cont);
+		ref_conts[i] = new _Ctype(last_row, _Stype::stack_sizes[i],
+								  *(convert.n));
+		_Stype::conts[i] = ref_conts[i];
+		last_row += _Stype::stack_sizes[i];
+	}
+}
+
+@ Here we deallocate the refined containers, and deallocate the array of refined containers.
+@<|FineContainer| destructor@>=
+virtual ~FineContainer()
+{
+	for (int i = 0; i < _Stype::numConts(); i++)
+		delete ref_conts[i];
+	delete [] ref_conts;
+}
+
+
+
+@ Here is |FineContainer| specialization for folded tensors.
+@<|FoldedFineContainer| class declaration@>=
+class FoldedFineContainer : public FineContainer<FGSTensor>, public FoldedStackContainer {
+public:@;
+	FoldedFineContainer(const StackContainer<FGSTensor>& sc, int max)
+		: FineContainer<FGSTensor>(sc, max) @+ {}
+};
+
+@ Here is |FineContainer| specialization for unfolded tensors.
+@<|UnfoldedFineContainer| class declaration@>=
+class UnfoldedFineContainer : public FineContainer<UGSTensor>, public UnfoldedStackContainer {
+public:@;
+	UnfoldedFineContainer(const StackContainer<UGSTensor>& sc, int max)
+		: FineContainer<UGSTensor>(sc, max) @+ {}
+};
+
+
+@ End of {\tt fine\_container.h} file.
diff --git a/dynare++/tl/cc/fs_tensor.cweb b/dynare++/tl/cc/fs_tensor.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..1635dffcc2f91b888aea0c02a9c6ddac41dc3ff5
--- /dev/null
+++ b/dynare++/tl/cc/fs_tensor.cweb
@@ -0,0 +1,306 @@
+@q $Id: fs_tensor.cweb 280 2005-06-13 09:40:02Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt fs\_tensor.cpp} file.
+
+@c
+#include "fs_tensor.h"
+#include "gs_tensor.h"
+#include "sparse_tensor.h"
+#include "rfs_tensor.h"
+#include "tl_exception.h"
+
+@<|FFSTensor| contraction constructor@>;
+@<|FFSTensor::calcMaxOffset| code@>;
+@<|FFSTensor| conversion from sparse@>;
+@<|FFSTensor| conversion from unfolded@>;
+@<|FFSTensor::unfold| code@>;
+@<|FFSTensor::increment| code@>;
+@<|FFSTensor::decrement| code@>;
+@<|FFSTensor::getOffset| code@>;
+@<|FFSTensor::addSubTensor| code@>;
+@<|UFSTensor| contraction constructor@>;
+@<|UFSTensor| conversion from folded@>;
+@<|UFSTensor::fold| code@>;
+@<|UFSTensor| increment and decrement@>;
+@<|UFSTensor::getOffset| code@>;
+@<|UFSTensor::addSubTensor| code@>;
+@<|UFSTensor::unfoldData| code@>;
+
+@ This constructs a fully symmetric tensor as given by the contraction:
+$$\left[g_{y^n}\right]_{\alpha_1\ldots\alpha_n}=
+\left[t_{y^{n+1}}\right]_{\alpha_1\ldots\alpha_n\beta}[x]^\beta$$
+
+We go through all columns of output tensor $[g]$ and for each column
+we cycle through all variables, insert a variable to the column
+coordinates obtaining a column of tensor $[t]$. the column is multiplied
+by an appropriate item of |x| and added to the column of $[g]$ tensor.
+
+@<|FFSTensor| contraction constructor@>=
+FFSTensor::FFSTensor(const FFSTensor& t, const ConstVector& x)
+	: FTensor(along_col, IntSequence(t.dimen()-1, t.nvar()),
+			  t.nrows(), calcMaxOffset(t.nvar(), t.dimen()-1), t.dimen()-1),
+	  nv(t.nvar())
+{
+	TL_RAISE_IF(t.dimen() < 1,
+				"Wrong dimension for tensor contraction of FFSTensor");
+	TL_RAISE_IF(t.nvar() != x.length(),
+				"Wrong number of variables for tensor contraction of FFSTensor");
+
+	zeros();
+
+	for (Tensor::index to = begin(); to != end(); ++to) {
+		for (int i = 0; i < nvar(); i++) {
+			IntSequence from_ind(i, to.getCoor());
+			Tensor::index from(&t, from_ind);
+			addColumn(x[i], t, *from, *to);
+		}
+	}
+}
+
+
+@ This returns number of indices for folded tensor with full
+symmetry. Let $n$ be a number of variables |nvar| and $d$ the
+dimension |dim|. Then the number of indices is $\pmatrix{n+d-1\cr d}$.
+  
+@<|FFSTensor::calcMaxOffset| code@>=
+int FFSTensor::calcMaxOffset(int nvar, int d)
+{
+	if (nvar == 0 && d == 0)
+		return 1;
+	if (nvar == 0 && d > 0)
+		return 0;
+	return noverk(nvar + d - 1, d);
+}
+
+@ The conversion from sparse tensor is clear. We go through all the
+tensor and write to the dense what is found.
+@<|FFSTensor| conversion from sparse@>=
+FFSTensor::FFSTensor(const FSSparseTensor& t)
+	: FTensor(along_col, IntSequence(t.dimen(), t.nvar()),
+			  t.nrows(), calcMaxOffset(t.nvar(), t.dimen()), t.dimen()),
+	  nv(t.nvar())
+{
+	zeros();
+	for (FSSparseTensor::const_iterator it = t.getMap().begin();
+		 it != t.getMap().end(); ++it) {
+		index ind(this, (*it).first);
+		get((*it).second.first, *ind) = (*it).second.second;
+	}
+}
+
+
+@ The conversion from unfolded copies only columns of respective
+coordinates. So we go through all the columns in the folded tensor
+(this), make an index of the unfolded vector from coordinates, and
+copy the column.
+ 
+@<|FFSTensor| conversion from unfolded@>=
+FFSTensor::FFSTensor(const UFSTensor& ut)
+	: FTensor(along_col, IntSequence(ut.dimen(), ut.nvar()),
+			  ut.nrows(), calcMaxOffset(ut.nvar(), ut.dimen()), ut.dimen()),
+	  nv(ut.nvar())
+{
+	for (index in = begin(); in != end(); ++in) {
+		index src(&ut, in.getCoor());
+		copyColumn(ut, *src, *in);
+	}
+}
+
+@ Here just make a new instance and return the reference.
+@<|FFSTensor::unfold| code@>=
+UTensor& FFSTensor::unfold() const
+{
+	return *(new UFSTensor(*this));
+}
+
+@ Incrementing is easy. We have to increment by calling static method
+|UTensor::increment| first. In this way, we have coordinates of
+unfolded tensor. Then we have to skip to the closest folded index
+which corresponds to monotonizeing the integer sequence.
+
+@<|FFSTensor::increment| code@>=
+void FFSTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in FFSTensor::increment");
+
+	UTensor::increment(v, nv);
+	v.monotone();
+}
+
+@ Decrement calls static |FTensor::decrement|.
+
+@<|FFSTensor::decrement| code@>=
+void FFSTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in FFSTensor::decrement");
+
+	FTensor::decrement(v, nv);
+}
+
+@ 
+@<|FFSTensor::getOffset| code@>=
+int FFSTensor::getOffset(const IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input vector size in FFSTensor::getOffset");
+
+	return FTensor::getOffset(v, nv);
+}
+
+@ Here we add a general symmetry tensor to the (part of) full symmetry
+tensor provided that the unique variable of the full symmetry tensor
+is a stack of variables from the general symmetry tensor.
+
+We check for the dimensions and number of variables. Then we calculate
+a shift of coordinates when going from the general symmetry tensor to
+full symmetry (it corresponds to shift of coordinates induces by
+stacking the variables). Then we add the appropriate columns by going
+through the columns in general symmetry, adding the shift and sorting.
+
+@<|FFSTensor::addSubTensor| code@>=
+void FFSTensor::addSubTensor(const FGSTensor& t)
+{
+	TL_RAISE_IF(dimen() != t.getDims().dimen(),
+				"Wrong dimensions for FFSTensor::addSubTensor");
+	TL_RAISE_IF(nvar() != t.getDims().getNVS().sum(),
+				"Wrong nvs for FFSTensor::addSubTensor");
+
+	@<set shift for |addSubTensor|@>;
+	for (Tensor::index ind = t.begin(); ind != t.end(); ++ind) {
+		IntSequence c(ind.getCoor());
+		c.add(1, shift);
+		c.sort();
+		Tensor::index tar(this, c);
+		addColumn(t, *ind, *tar);
+	}
+}
+
+@ 
+@<set shift for |addSubTensor|@>=
+	IntSequence shift_pre(t.getSym().num(), 0);
+	for (int i = 1; i < t.getSym().num(); i++)
+		shift_pre[i] = shift_pre[i-1]+t.getDims().getNVS()[i-1];
+	IntSequence shift(t.getSym(), shift_pre);
+
+@ This is a bit more straightforward than |@<|FFSTensor| contraction constructor@>|.
+We do not add column by column but we do it by submatrices due to
+regularity of the unfolded tensor.
+ 
+@<|UFSTensor| contraction constructor@>=
+UFSTensor::UFSTensor(const UFSTensor& t, const ConstVector& x)
+	: UTensor(along_col, IntSequence(t.dimen()-1, t.nvar()),
+			  t.nrows(), calcMaxOffset(t.nvar(), t.dimen()-1), t.dimen()-1),
+	  nv(t.nvar())
+{
+	TL_RAISE_IF(t.dimen() < 1,
+				"Wrong dimension for tensor contraction of UFSTensor");
+	TL_RAISE_IF(t.nvar() != x.length(),
+				"Wrong number of variables for tensor contraction of UFSTensor");
+
+	zeros();
+
+	for (int i = 0; i < ncols(); i++) {
+		ConstTwoDMatrix tpart(t, i*nvar(), nvar());
+		Vector outcol(*this, i);
+		tpart.multaVec(outcol, x);
+	}
+}
+
+@ Here we convert folded full symmetry tensor to unfolded. We copy all
+columns of folded tensor, and then call |unfoldData()|.
+
+@<|UFSTensor| conversion from folded@>=
+UFSTensor::UFSTensor(const FFSTensor& ft)
+	: UTensor(along_col, IntSequence(ft.dimen(), ft.nvar()),
+			  ft.nrows(), calcMaxOffset(ft.nvar(), ft.dimen()), ft.dimen()),
+	  nv(ft.nvar())
+{
+	for (index src = ft.begin(); src != ft.end(); ++src) {
+		index in(this, src.getCoor());
+		copyColumn(ft, *src, *in);
+	}
+	unfoldData();
+}
+
+@ Here we just return a reference to new instance of folded tensor.
+@<|UFSTensor::fold| code@>=
+FTensor& UFSTensor::fold() const
+{
+	return *(new FFSTensor(*this));
+}
+
+@ Here we just call |UTensor| respective static methods.
+@<|UFSTensor| increment and decrement@>=
+void UFSTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in UFSTensor::increment");
+
+	UTensor::increment(v, nv);
+}
+
+void UFSTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in UFSTensor::decrement");
+
+	UTensor::decrement(v, nv);
+}
+
+@ 
+@<|UFSTensor::getOffset| code@>=
+int UFSTensor::getOffset(const IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input vector size in UFSTensor::getOffset");
+
+	return UTensor::getOffset(v, nv);
+}
+
+@ This is very similar to |@<|FFSTensor::addSubTensor| code@>|. The
+only difference is the addition. We go through all columns in the full
+symmetry tensor and cancel the shift. If the coordinates after the
+cancellation are positive, we find the column in the general symmetry
+tensor, and add it.
+
+@<|UFSTensor::addSubTensor| code@>=
+void UFSTensor::addSubTensor(const UGSTensor& t)
+{
+	TL_RAISE_IF(dimen() != t.getDims().dimen(),
+				"Wrong dimensions for UFSTensor::addSubTensor");
+	TL_RAISE_IF(nvar() != t.getDims().getNVS().sum(),
+				"Wrong nvs for UFSTensor::addSubTensor");
+
+	@<set shift for |addSubTensor|@>;
+	for (Tensor::index tar = begin(); tar != end(); ++tar) {
+		IntSequence c(tar.getCoor());
+		c.sort();
+		c.add(-1, shift);
+		if (c.isPositive() && c.less(t.getDims().getNVX())) {
+			Tensor::index from(&t, c);
+			addColumn(t, *from, *tar);
+		}
+	}
+}
+
+
+@ Here we go through all columns, find a column of folded index, and
+then copy the column data. Finding the index is done by sorting the
+integer sequence.
+
+@<|UFSTensor::unfoldData| code@>=
+void UFSTensor::unfoldData()
+{
+	for (index in = begin(); in != end(); ++in) {
+		IntSequence v(in.getCoor());
+		v.sort();
+		index tmp(this, v);
+		copyColumn(*tmp, *in);
+	}
+}
+
+
+@ End of {\tt fs\_tensor.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/fs_tensor.hweb b/dynare++/tl/cc/fs_tensor.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..134d0b06621d627e404c59751ce5e8cd9639a39e
--- /dev/null
+++ b/dynare++/tl/cc/fs_tensor.hweb
@@ -0,0 +1,129 @@
+@q $Id: fs_tensor.hweb 741 2006-05-09 11:12:46Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Full symmetry tensor. Start of {\tt fs\_tensor.h} file.
+
+Here we define folded and unfolded tensors for full symmetry. All
+tensors from here are identifying the multidimensional index with
+columns.
+
+@c
+#ifndef FS_TENSOR_H
+#define FS_TENSOR_H
+
+#include "tensor.h"
+#include "symmetry.h"
+
+class FGSTensor;
+class UGSTensor;
+class FRSingleTensor;
+class FSSparseTensor;
+@<|FFSTensor| class declaration@>;
+@<|UFSTensor| class declaration@>;
+
+#endif
+
+
+@ Folded tensor with full symmetry maintains only information about
+number of symmetrical variables |nv|. Further, we implement what is
+left from the super class |FTensor|.
+
+We implement |getOffset| which should be used with care since
+its complexity.
+
+We implement a method adding a given general symmetry tensor to the
+full symmetry tensor supposing the variables of the general symmetry
+tensor are stacked giving only one variable of the full symmetry
+tensor. For instance, if $x=[y^T, u^T]^T$, then we can add tensor
+$\left[g_{y^2u}\right]$ to tensor $g_{x^3}$. This is done in method
+|addSubTensor|. Consult |@<|FGSTensor| class declaration@>| to know
+what is general symmetry tensor.
+
+@<|FFSTensor| class declaration@>=
+class UFSTensor;
+class FFSTensor : public FTensor {
+	int nv;
+public:@;
+    @<|FFSTensor| constructor declaration@>;
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	UTensor& unfold() const;
+	Symmetry getSym() const
+		{@+ return Symmetry(dimen());@+}
+
+	int getOffset(const IntSequence& v) const;
+	void addSubTensor(const FGSTensor& t);
+	int nvar() const
+		{@+return nv;@+}
+	static int calcMaxOffset(int nvar, int d);
+};
+
+@ Here are the constructors. The second constructor constructs a
+tensor by one-dimensional contraction from the higher dimensional
+tensor |t|. This is, it constructs a tensor
+$$\left[g_{y^n}\right]_{\alpha_1\ldots\alpha_n}=
+\left[t_{y^{n+1}}\right]_{\alpha_1\ldots\alpha_n\beta}[x]^\beta$$
+See implementation |@<|FFSTensor| contraction constructor@>| for details.
+
+The next constructor converts from sparse tensor (which is fully
+symmetric and folded by nature).
+
+The fourth constructs object from unfolded fully symmetric.
+
+The fifth constructs a subtensor of selected rows.
+
+@<|FFSTensor| constructor declaration@>=
+	FFSTensor(int r, int nvar, int d)
+		: FTensor(along_col, IntSequence(d, nvar),
+				  r, calcMaxOffset(nvar, d), d), nv(nvar)@+ {}
+	FFSTensor(const FFSTensor& t, const ConstVector& x);
+	FFSTensor(const FSSparseTensor& t);
+	FFSTensor(const FFSTensor& ft)
+		: FTensor(ft), nv(ft.nv)@+ {}
+	FFSTensor(const UFSTensor& ut);
+	FFSTensor(int first_row, int num, FFSTensor& t)
+		: FTensor(first_row, num, t), nv(t.nv)@+ {}
+
+
+@ Unfolded fully symmetric tensor is almost the same in structure as
+|FFSTensor|, but the method |unfoldData|. It takes columns which also
+exist in folded version and copies them to all their symmetrical
+locations. This is useful when constructing unfolded tensor from
+folded one.
+
+@<|UFSTensor| class declaration@>=
+class UFSTensor : public UTensor {
+	int nv;
+public:@;
+	@<|UFSTensor| constructor declaration@>;
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	FTensor& fold() const;
+	Symmetry getSym() const
+		{@+ return Symmetry(dimen());@+}
+
+	int getOffset(const IntSequence& v) const;
+	void addSubTensor(const UGSTensor& t);
+	int nvar() const
+		{@+ return nv;@+}
+	static int calcMaxOffset(int nvar, int d)
+		{@+ return power(nvar, d);@+}
+private:@;
+	void unfoldData();
+};
+
+@ 
+@<|UFSTensor| constructor declaration@>=
+	UFSTensor(int r, int nvar, int d)
+		: UTensor(along_col, IntSequence(d, nvar),
+				  r, calcMaxOffset(nvar, d), d), nv(nvar)@+ {}
+	UFSTensor(const UFSTensor& t, const ConstVector& x);
+	UFSTensor(const UFSTensor& ut)
+		: UTensor(ut), nv(ut.nv)@+ {}
+	UFSTensor(const FFSTensor& ft);
+	UFSTensor(int first_row, int num, UFSTensor& t)
+		: UTensor(first_row, num, t), nv(t.nv)@+ {}
+
+@ End of {\tt fs\_tensor.h} file.
diff --git a/dynare++/tl/cc/gs_tensor.cweb b/dynare++/tl/cc/gs_tensor.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..13e1a2a74997ab05babe5006442625bd41d28d9b
--- /dev/null
+++ b/dynare++/tl/cc/gs_tensor.cweb
@@ -0,0 +1,501 @@
+@q $Id: gs_tensor.cweb 425 2005-08-16 15:18:01Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt gs\_tensor.cpp} file.
+
+@c
+#include "gs_tensor.h"
+#include "sparse_tensor.h"
+#include "tl_exception.h"
+#include "kron_prod.h"
+
+@<|TensorDimens| constructor code@>;
+@<|TensorDimens::calcUnfoldMaxOffset| code@>;
+@<|TensorDimens::calcFoldMaxOffset| code@>;
+@<|TensorDimens::calcFoldOffset| code@>;
+@<|TensorDimens::decrement| code@>;
+@<|FGSTensor| conversion from |UGSTensor|@>;
+@<|FGSTensor| slicing from |FSSparseTensor|@>;
+@<|FGSTensor| slicing from |FFSTensor|@>;
+@<|FGSTensor| conversion from |GSSparseTensor|@>;
+@<|FGSTensor::increment| code@>;
+@<|FGSTensor::unfold| code@>;
+@<|FGSTensor::contractAndAdd| code@>;
+@<|UGSTensor| conversion from |FGSTensor|@>;
+@<|UGSTensor| slicing from |FSSparseTensor|@>;
+@<|UGSTensor| slicing from |UFSTensor|@>;
+@<|UGSTensor| increment and decrement codes@>;
+@<|UGSTensor::fold| code@>;
+@<|UGSTensor::getOffset| code@>;
+@<|UGSTensor::unfoldData| code@>;
+@<|UGSTensor::getFirstIndexOf| code@>;
+@<|UGSTensor::contractAndAdd| code@>;
+
+@ This constructs the tensor dimensions for slicing. See
+|@<|TensorDimens| class declaration@>| for details.
+@<|TensorDimens| constructor code@>=
+TensorDimens::TensorDimens(const IntSequence& ss, const IntSequence& coor)
+	: nvs(ss),
+	  sym(ss.size(), ""),
+	  nvmax(coor.size(), 0)
+{
+	TL_RAISE_IF(! coor.isSorted(),
+				"Coordinates not sorted in TensorDimens slicing constructor");
+	TL_RAISE_IF(coor[0] < 0 || coor[coor.size()-1] >= ss.size(),
+				"A coordinate out of stack range in TensorDimens slicing constructor");
+
+	for (int i = 0; i < coor.size(); i++) {
+		sym[coor[i]]++;
+		nvmax[i] = ss[coor[i]];
+	}
+}
+
+
+@ Number of unfold offsets is a product of all members of |nvmax|.
+@<|TensorDimens::calcUnfoldMaxOffset| code@>=
+int TensorDimens::calcUnfoldMaxOffset() const
+{
+	return nvmax.mult();
+}
+
+@ Number of folded offsets is a product of all unfold offsets within
+each equivalence class of the symmetry.
+
+@<|TensorDimens::calcFoldMaxOffset| code@>=
+int TensorDimens::calcFoldMaxOffset() const
+{
+	int res = 1;
+	for (int i = 0; i < nvs.size(); i++) {
+		if (nvs[i] == 0 && sym[i] > 0)
+			return 0;
+		if (sym[i] > 0)
+			res *= Tensor::noverk(nvs[i]+sym[i]-1, sym[i]);
+	}
+	return res;
+}
+
+@ Here we implement offset calculation for folded general symmetry
+tensor. The offset of a given sequence is calculated by breaking the
+sequence to subsequences according to the symmetry. The offset is
+orthogonal with respect to the blocks, this means that indexing within
+the blocks is independent. If there are two blocks, for instance, then
+the offset will be an offset within the outer block (the first)
+multiplied with all offsets of the inner block (last) plus an offset
+within the second block.
+
+Generally, the resulting offset $r$ will be
+$$\sum_{i=1}^s r_i\cdot\left(\prod_{j=i+1}^sn_j\right),$$
+where $s$ is a number of blocks (|getSym().num()|), $r_i$ is an offset
+within $i$-th block, and $n_j$ is a number of all offsets in $j$-th
+block.
+
+In the code, we go from the innermost to the outermost, maintaining the
+product in |pow|.
+
+@<|TensorDimens::calcFoldOffset| code@>=
+int TensorDimens::calcFoldOffset(const IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input vector size in TensorDimens::getFoldOffset");
+
+	int res = 0;
+	int pow = 1;
+	int blstart = v.size();
+	for (int ibl = getSym().num()-1; ibl >= 0; ibl--) {
+		int bldim = getSym()[ibl];
+		if (bldim > 0) {
+			blstart -= bldim;
+			int blnvar = getNVX()[blstart];
+			IntSequence subv(v, blstart, blstart+bldim);
+			res += FTensor::getOffset(subv, blnvar)*pow;
+			pow *= FFSTensor::calcMaxOffset(blnvar, bldim);
+		}
+	}
+	TL_RAISE_IF(blstart != 0,
+				"Error in tracing symmetry in TensorDimens::getFoldOffset");
+	return res;
+}
+
+@ In order to find the predecessor of index within folded generally
+symmetric tensor, note, that a decrease action in $i$-th partition of
+symmetric indices can happen only if all indices in all subsequent
+partitions are zero. Then the decrease action of whole the index
+consists of decrease action of the first nonzero partition from the
+right, and setting these trailing zero partitions to their maximum
+indices.
+
+So we set |iblock| to the number of last partitions. During the
+execution, |block_first|, and |block_last| will point to the first
+element of |iblock| and, first element of following block.
+
+Then we check for all trailing zero partitions, set them to their
+maximums and return |iblock| to point to the first non-zero partition
+(or the first partition). Then for this partition, we decrease the
+index (fully symmetric within that partition).
+  
+@<|TensorDimens::decrement| code@>=
+void TensorDimens::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(getNVX().size() != v.size(),
+				"Wrong size of input/output sequence in TensorDimens::decrement");
+
+	int iblock = getSym().num()-1;
+	int block_last = v.size();
+	int block_first = block_last-getSym()[iblock];
+	@<check for zero trailing blocks@>;
+	@<decrease the non-zero block@>;
+}
+
+@ 
+@<check for zero trailing blocks@>=
+	while (iblock > 0 && v[block_last-1] == 0) {
+		for (int i = block_first; i < block_last; i++)
+			v[i] = getNVX(i); // equivalent to |nvs[iblock]|
+		iblock--;
+		block_last = block_first;
+		block_first -= getSym()[iblock];
+	}
+
+@ 
+@<decrease the non-zero block@>=
+	IntSequence vtmp(v, block_first, block_last);
+	FTensor::decrement(vtmp, getNVX(block_first));
+
+
+
+@ Here we go through columns of folded, calculate column of unfolded,
+and copy data.
+
+@<|FGSTensor| conversion from |UGSTensor|@>=
+FGSTensor::FGSTensor(const UGSTensor& ut)
+	: FTensor(along_col, ut.tdims.getNVX(), ut.nrows(),
+			  ut.tdims.calcFoldMaxOffset(), ut.dimen()),
+	  tdims(ut.tdims)
+{
+	for (index ti = begin(); ti != end(); ++ti) {
+		index ui(&ut, ti.getCoor());
+		copyColumn(ut, *ui, *ti);
+	}
+}
+
+@ Here is the code of slicing constructor from the sparse tensor. We
+first calculate coordinates of first and last index of the slice
+within the sparse tensor (these are |lb| and |ub|), and then we
+iterate through all items between them (in lexicographical ordering of
+sparse tensor), and check whether an item is between the |lb| and |ub|
+in Cartesian ordering (this corresponds to belonging to the
+slices). If it belongs, then we subtract the lower bound |lb| to
+obtain coordinates in the |this| tensor and we copy the item.
+
+@<|FGSTensor| slicing from |FSSparseTensor|@>=
+FGSTensor::FGSTensor(const FSSparseTensor& t, const IntSequence& ss,
+					 const IntSequence& coor, const TensorDimens& td)
+	: FTensor(along_col, td.getNVX(), t.nrows(),
+			  td.calcFoldMaxOffset(), td.dimen()),
+	  tdims(td)
+{
+	@<set |lb| and |ub| to lower and upper bounds of indices@>;
+
+	zeros();
+	FSSparseTensor::const_iterator lbi = t.getMap().lower_bound(lb);
+	FSSparseTensor::const_iterator ubi = t.getMap().upper_bound(ub);
+	for (FSSparseTensor::const_iterator run = lbi; run != ubi; ++run) {
+		if (lb.lessEq((*run).first) && (*run).first.lessEq(ub)) {
+			IntSequence c((*run).first);
+			c.add(-1, lb);
+			Tensor::index ind(this, c);
+			TL_RAISE_IF(*ind < 0 || *ind >= ncols(),
+						"Internal error in slicing constructor of FGSTensor");
+			get((*run).second.first, *ind) = (*run).second.second;
+		}
+	}
+}
+
+@ Here we first set |s_offsets| to offsets of partitions whose lengths
+are given by |ss|. So |s_offsets| is a cumulative sum of |ss|.
+
+Then we create |lb| to be coordinates of the possibly first index from
+the slice, and |ub| to be coordinates of possibly last index of the
+slice.
+
+@<set |lb| and |ub| to lower and upper bounds of indices@>=
+	IntSequence s_offsets(ss.size(), 0);
+	for (int i = 1; i < ss.size(); i++)
+		s_offsets[i] = s_offsets[i-1] + ss[i-1];
+
+	IntSequence lb(coor.size());
+	IntSequence ub(coor.size());
+	for (int i = 0; i < coor.size(); i++) {
+		lb[i] = s_offsets[coor[i]];
+		ub[i] = s_offsets[coor[i]] + ss[coor[i]] - 1;
+	}
+
+
+@ The code is similar to |@<|FGSTensor| slicing from |FSSparseTensor|@>|.
+@<|FGSTensor| slicing from |FFSTensor|@>=
+FGSTensor::FGSTensor(const FFSTensor& t, const IntSequence& ss,
+					 const IntSequence& coor, const TensorDimens& td)
+	: FTensor(along_col, td.getNVX(), t.nrows(),
+			  td.calcFoldMaxOffset(), td.dimen()),
+	  tdims(td)
+{
+	if (ncols() == 0)
+		return;
+
+	@<set |lb| and |ub| to lower and upper bounds of indices@>;
+
+	zeros();
+	Tensor::index lbi(&t, lb);
+	Tensor::index ubi(&t, ub);
+	++ubi;
+	for (Tensor::index run = lbi; run != ubi; ++run) {
+		if (lb.lessEq(run.getCoor()) && run.getCoor().lessEq(ub)) {
+			IntSequence c(run.getCoor());
+			c.add(-1, lb);
+			Tensor::index ind(this, c);
+			TL_RAISE_IF(*ind < 0 || *ind >= ncols(),
+						"Internal error in slicing constructor of FGSTensor");
+			copyColumn(t, *run, *ind);
+		}
+	}
+}
+
+@ 
+@<|FGSTensor| conversion from |GSSparseTensor|@>=
+FGSTensor::FGSTensor(const GSSparseTensor& t)
+	: FTensor(along_col, t.getDims().getNVX(), t.nrows(),
+			  t.getDims().calcFoldMaxOffset(), t.dimen()), tdims(t.getDims())
+{
+	zeros();
+	for (FSSparseTensor::const_iterator it = t.getMap().begin();
+		 it != t.getMap().end(); ++it) {
+		index ind(this, (*it).first);
+		get((*it).second.first, *ind) = (*it).second.second;
+	}
+}
+
+@ First we increment as unfolded, then we must monotonize within
+partitions defined by the symmetry. This is done by
+|IntSequence::pmonotone|.
+
+@<|FGSTensor::increment| code@>=
+void FGSTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in FGSTensor::increment");
+
+	UTensor::increment(v, tdims.getNVX());
+	v.pmonotone(tdims.getSym());
+}
+
+
+
+
+@ Return unfolded version of the tensor.
+@<|FGSTensor::unfold| code@>=
+UTensor& FGSTensor::unfold() const
+{
+	return *(new UGSTensor(*this));
+}
+
+
+@ Here we implement the contraction
+$$\left[r_{x^iz^k}\right]_{\alpha_1\ldots\alpha_i\gamma_1\ldots\gamma_k}=
+\left[t_{x^iy^jz^k}\right]_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j\gamma_1\ldots\gamma_k}
+\left[c\right]^{\beta_1\ldots\beta_j}
+$$
+More generally, $x^i$ and $z^k$ can represent also general symmetries. 
+
+The operation can be rewritten as a matrix product
+$$\left[t_{x^iy^jz^k}\right]\cdot\left(I_l\otimes c\otimes I_r\right)$$
+where $l$ is a number of columns in tensor with symmetry on the left
+(i.e. $x^i$), and $r$ is a number of columns in tensor with a symmetry
+on the right (i.e. $z^k$). The code proceeds accordingly. We first
+form two symmetries |sym_left| and |sym_right|, then calculate the
+number of columns |dleft|$=l$ and |dright|$=r$, form the Kronecker
+product and multiply and add.
+
+The input parameter |i| is the order of a variable being contracted
+starting from 0.
+
+@<|FGSTensor::contractAndAdd| code@>=
+void FGSTensor::contractAndAdd(int i, FGSTensor& out,
+							   const FRSingleTensor& col) const
+{
+	TL_RAISE_IF(i < 0 || i >= getSym().num(),
+				"Wrong index for FGSTensor::contractAndAdd");
+
+	TL_RAISE_IF(getSym()[i] != col.dimen() || tdims.getNVS()[i] != col.nvar(),
+				"Wrong dimensions for FGSTensor::contractAndAdd");
+
+	@<set |sym_left| and |sym_right| to symmetries around |i|@>;
+	int dleft = TensorDimens(sym_left, tdims.getNVS()).calcFoldMaxOffset();
+	int dright = TensorDimens(sym_right, tdims.getNVS()).calcFoldMaxOffset();
+	KronProdAll kp(3);
+	kp.setUnit(0, dleft);
+	kp.setMat(1, col);
+	kp.setUnit(2, dright);
+	FGSTensor tmp(out.nrows(), out.getDims());
+	kp.mult(*this, tmp);
+	out.add(1.0, tmp);
+}
+
+@ Here we have a symmetry of |this| tensor and we have to set
+|sym_left| to the subsymmetry left from the |i|-th variable and
+|sym_right| to the subsymmetry right from the |i|-th variable. So we
+copy first all the symmetry and then put zeros to the left for
+|sym_right| and to the right for |sym_left|.
+
+@<set |sym_left| and |sym_right| to symmetries around |i|@>=
+	Symmetry sym_left(getSym());
+	Symmetry sym_right(getSym());
+	for (int j = 0; j < getSym().num(); j++) {
+		if (j <= i)
+			sym_right[j] = 0;
+		if (j >= i)
+			sym_left[j] = 0;
+	}
+
+
+@ Here we go through folded tensor, and each index we convert to index
+of the unfolded tensor and copy the data to the unfolded. Then we
+unfold data within the unfolded tensor.
+
+@<|UGSTensor| conversion from |FGSTensor|@>=
+UGSTensor::UGSTensor(const FGSTensor& ft)
+	: UTensor(along_col, ft.tdims.getNVX(), ft.nrows(),
+			  ft.tdims.calcUnfoldMaxOffset(), ft.dimen()),
+	  tdims(ft.tdims)
+{
+	for (index fi = ft.begin(); fi != ft.end(); ++fi) {
+		index ui(this, fi.getCoor());
+		copyColumn(ft, *fi, *ui);
+	}
+	unfoldData();
+}
+
+@ This makes a folded slice from the sparse tensor and unfolds it.
+@<|UGSTensor| slicing from |FSSparseTensor|@>=
+UGSTensor::UGSTensor(const FSSparseTensor& t, const IntSequence& ss,
+					 const IntSequence& coor, const TensorDimens& td)
+	: UTensor(along_col, td.getNVX(), t.nrows(),
+			  td.calcUnfoldMaxOffset(), td.dimen()),
+	  tdims(td)
+{
+	if (ncols() == 0)
+		return;
+
+	FGSTensor ft(t, ss, coor, td);
+	for (index fi = ft.begin(); fi != ft.end(); ++fi) {
+		index ui(this, fi.getCoor());
+		copyColumn(ft, *fi, *ui);
+	}
+	unfoldData();
+}
+
+@ This makes a folded slice from dense and unfolds it. 
+@<|UGSTensor| slicing from |UFSTensor|@>=
+UGSTensor::UGSTensor(const UFSTensor& t, const IntSequence& ss,
+					 const IntSequence& coor, const TensorDimens& td)
+	: UTensor(along_col, td.getNVX(), t.nrows(),
+			  td.calcUnfoldMaxOffset(), td.dimen()),
+	  tdims(td)
+{
+	FFSTensor folded(t);
+	FGSTensor ft(folded, ss, coor, td);
+	for (index fi = ft.begin(); fi != ft.end(); ++fi) {
+		index ui(this, fi.getCoor());
+		copyColumn(ft, *fi, *ui);
+	}
+	unfoldData();
+}
+
+
+@ Clear, just call |UTensor| static methods.
+@<|UGSTensor| increment and decrement codes@>=
+void UGSTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in UGSTensor::increment");
+
+	UTensor::increment(v, tdims.getNVX());
+}
+
+void UGSTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in UGSTensor::decrement");
+
+	UTensor::decrement(v, tdims.getNVX());
+}
+
+
+@ Return a new instance of folded version.
+@<|UGSTensor::fold| code@>=
+FTensor& UGSTensor::fold() const
+{
+	return *(new FGSTensor(*this));
+}
+
+@ Return an offset of a given index.
+@<|UGSTensor::getOffset| code@>=
+int UGSTensor::getOffset(const IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input vector size in UGSTensor::getOffset");
+
+	return UTensor::getOffset(v, tdims.getNVX());
+}
+
+@ Unfold all data. We go through all the columns and for each we
+obtain an index of the first equivalent, and copy the data.
+
+@<|UGSTensor::unfoldData| code@>=
+void UGSTensor::unfoldData()
+{
+	for (index in = begin(); in != end(); ++in)
+		copyColumn(*(getFirstIndexOf(in)), *in);
+}
+
+@ Here we return the first index which is equivalent in the symmetry
+to the given index. It is a matter of sorting all the symmetry
+partitions of the index.
+
+@<|UGSTensor::getFirstIndexOf| code@>=
+Tensor::index UGSTensor::getFirstIndexOf(const index& in) const
+{
+	IntSequence v(in.getCoor());
+	int last = 0;
+	for (int i = 0; i < tdims.getSym().num(); i++) {
+		IntSequence vtmp(v, last, last+tdims.getSym()[i]);
+		vtmp.sort();
+		last += tdims.getSym()[i];
+	}
+	return index(this, v);
+}
+
+@ Here is perfectly same code with the same semantics as in 
+|@<|FGSTensor::contractAndAdd| code@>|.
+
+@<|UGSTensor::contractAndAdd| code@>=
+void UGSTensor::contractAndAdd(int i, UGSTensor& out,
+							   const URSingleTensor& col) const
+{
+	TL_RAISE_IF(i < 0 || i >= getSym().num(),
+				"Wrong index for UGSTensor::contractAndAdd");
+	TL_RAISE_IF(getSym()[i] != col.dimen() || tdims.getNVS()[i] != col.nvar(),
+				"Wrong dimensions for UGSTensor::contractAndAdd");
+
+	@<set |sym_left| and |sym_right| to symmetries around |i|@>;
+	int dleft = TensorDimens(sym_left, tdims.getNVS()).calcUnfoldMaxOffset();
+	int dright = TensorDimens(sym_right, tdims.getNVS()).calcUnfoldMaxOffset();
+	KronProdAll kp(3);
+	kp.setUnit(0, dleft);
+	kp.setMat(1, col);
+	kp.setUnit(2, dright);
+	UGSTensor tmp(out.nrows(), out.getDims());
+	kp.mult(*this, tmp);
+	out.add(1.0, tmp);
+}
+
+@ End of {\tt gs\_tensor.cpp} file.
diff --git a/dynare++/tl/cc/gs_tensor.hweb b/dynare++/tl/cc/gs_tensor.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..cdc6734f9c5d16bab506303574008e62b7ca2ea1
--- /dev/null
+++ b/dynare++/tl/cc/gs_tensor.hweb
@@ -0,0 +1,222 @@
+@q $Id: gs_tensor.hweb 741 2006-05-09 11:12:46Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 General symmetry tensor. Start of {\tt gs\_tensor.h} file.
+
+Here we define tensors for general symmetry. All tensors from here are
+identifying the multidimensional index with columns. Thus all
+symmetries regard to columns. The general symmetry here is not the most
+general. It captures all symmetries of indices which are given by
+continuous partitioning of indices. Two items are symmetric if they
+belong to the same group. The continuity implies that if two items
+belong to one group, then all items between them belong to that
+group. This continuous partitioning of indices is described by
+|Symmetry| class.
+
+The dimension of the tensors here are described (besides the symmetry)
+also by number of variables for each group. This is dealt in the class
+for tensor dimensions defined also here.
+
+@c
+#ifndef GS_TENSOR_H
+#define GS_TENSOR_H
+
+#include "tensor.h"
+#include "fs_tensor.h"
+#include "symmetry.h"
+#include "rfs_tensor.h"
+
+class FGSTensor;
+class UGSTensor;
+class FSSparseTensor;
+
+@<|TensorDimens| class declaration@>;
+@<|FGSTensor| class declaration@>;
+@<|UGSTensor| class declaration@>;
+
+#endif
+
+@ This class encapsulates symmetry information for the general
+symmetry tensor. It maintains a vector of variable numbers |nvs|, and
+symmetry |sym|. For example, let the symmetry be $y^2u^3$, and
+variable numbers be 10 for $y$, and 5 for $u$. Then the |nvs| is
+$(10,5)$, and |sym| is $(2,3)$. Also it maintains |nvmax| unfolded |nvs| with
+respect to the symmetry, this is $(10,10,5,5,5)$.
+
+The constructors of |TensorDimens| are clear and pretty intuitive but
+the constructor which is used for slicing fully symmetric tensor. It
+constructs the dimensions from the partitioning of variables of fully
+symmetric tensor. Let the partitioning be, for instance, $(a,b,c,d)$,
+where $(n_a,n_b,n_c,n_d)$ are lengths of the partitions. Let one want
+to get a slice only of the part of the fully symmetric tensor
+corresponding to indices of the form $b^2d^3$. This corresponds to the
+symmetry $a^0b^2c^0d^3$. So, the dimension of the slice would be also
+$(n_a,n_b,n_c,n_d)$ for number of variables and $(0,2,0,3)$ for the
+symmetry. So we provide the constructor which takes sizes of
+partitions $(n_a,n_b,n_c,n_d)$ as |IntSequence|, and indices of picked
+partitions, in our case $(1,1,3,3,3)$, as |IntSequence|.
+
+The class is able to calculate number of offsets (columns or rows depending
+what matrix coordinate we describe) in unfolded and folded tensors
+with the given symmetry.
+
+@s TensorDimens int
+
+@<|TensorDimens| class declaration@>=
+class TensorDimens {
+protected:@;
+	IntSequence nvs;
+	Symmetry sym;
+	IntSequence nvmax;
+public:@;
+	TensorDimens(const Symmetry& s, const IntSequence& nvars)
+		: nvs(nvars), sym(s), nvmax(sym, nvs)@+ {}
+	TensorDimens(int nvar, int dimen)
+		: nvs(1), sym(dimen), nvmax(dimen, nvar)
+		{@+ nvs[0] = nvar;@+}
+	TensorDimens(const TensorDimens& td)
+		: nvs(td.nvs), sym(td.sym), nvmax(td.nvmax)@+ {}
+	virtual ~TensorDimens()@+ {}
+	TensorDimens(const IntSequence& ss, const IntSequence& coor);
+	const TensorDimens& operator=(const TensorDimens& td)
+		{@+ nvs = td.nvs;@+ sym = td.sym;@+ nvmax = td.nvmax;@+ return *this;@+}
+	bool operator==(const TensorDimens& td) const
+		{@+ return nvs == td.nvs && sym == td.sym;@+}
+	bool operator!=(const TensorDimens& td) const
+		{@+ return !operator==(td);@+}
+
+	int dimen() const
+		{@+ return sym.dimen();@+}
+	int getNVX(int i) const
+		{@+ return nvmax[i];@+}
+	const IntSequence& getNVS() const
+		{ @+ return nvs;@+}
+	const IntSequence& getNVX() const
+		{@+ return nvmax;@+}
+	const Symmetry& getSym() const
+		{@+ return sym;@+}
+
+	int calcUnfoldMaxOffset() const;
+	int calcFoldMaxOffset() const;
+	int calcFoldOffset(const IntSequence& v) const;
+	void decrement(IntSequence& v) const; 
+};
+
+@ Here is a class for folded general symmetry tensor. It only contains
+tensor dimensions, it defines types for indices, implement virtual
+methods of super class |FTensor|.
+
+We add a method |contractAndAdd| which performs a contraction of one
+variable in the tensor. This is, for instance
+$$\left[r_{x^iz^k}\right]_{\alpha_1\ldots\alpha_i\gamma_1\ldots\gamma_k}=
+\left[t_{x^iy^jz^k}\right]_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j\gamma_1\ldots\gamma_k}
+\left[c\right]^{\beta_1\ldots\beta_j}
+$$
+
+Also we add |getOffset| which should be used with care.
+
+@<|FGSTensor| class declaration@>=
+class GSSparseTensor;
+class FGSTensor : public FTensor {
+	friend class UGSTensor;
+
+	const TensorDimens tdims;
+public:@;
+	@<|FGSTensor| constructor declarations@>;
+	virtual ~FGSTensor()@+ {}
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const
+		{@+ tdims.decrement(v);@+}
+	UTensor& unfold() const;
+	const TensorDimens& getDims() const
+		{@+ return tdims;@+}
+	const Symmetry& getSym() const
+		{@+ return getDims().getSym();@+}
+
+	void contractAndAdd(int i, FGSTensor& out,
+						const FRSingleTensor& col) const;
+	int getOffset(const IntSequence& v) const
+		{@+ return tdims.calcFoldOffset(v);@+}
+};
+
+@ These are standard constructors followed by two slicing. The first
+constructs a slice from the sparse, the second from the dense (both
+fully symmetric). Next constructor is just a conversion from
+|GSSParseTensor|. The last constructor allows for in-place conversion
+from |FFSTensor| to |FGSTensor|.
+
+@<|FGSTensor| constructor declarations@>=
+	FGSTensor(int r, const TensorDimens& td)
+		: FTensor(along_col, td.getNVX(), r,
+				  td.calcFoldMaxOffset(), td.dimen()), tdims(td)@+ {}
+	FGSTensor(const FGSTensor& ft)
+		: FTensor(ft), tdims(ft.tdims)@+ {}
+	FGSTensor(const UGSTensor& ut);
+	FGSTensor(int first_row, int num, FGSTensor& t)
+		: FTensor(first_row, num, t), tdims(t.tdims)@+ {}
+	FGSTensor(const FSSparseTensor& t, const IntSequence& ss,
+			  const IntSequence& coor, const TensorDimens& td);
+	FGSTensor(const FFSTensor& t, const IntSequence& ss,
+			  const IntSequence& coor, const TensorDimens& td);
+	FGSTensor(const GSSparseTensor& sp);
+	FGSTensor(FFSTensor& t)
+		: FTensor(0, t.nrows(), t), tdims(t.nvar(), t.dimen())@+ {}
+
+
+@ Besides similar things that has |FGSTensor|, we have here also
+method |unfoldData|, and helper method |getFirstIndexOf|
+which corresponds to sorting coordinates in fully symmetric case (here
+the action is more complicated, so we put it to the method).
+
+@<|UGSTensor| class declaration@>=
+class UGSTensor : public UTensor {
+	friend class FGSTensor;
+
+	const TensorDimens tdims;
+public:@;
+	@<|UGSTensor| constructor declarations@>;
+	virtual ~UGSTensor()@+ {}
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	FTensor& fold() const;
+	const TensorDimens& getDims() const
+		{@+ return tdims;@+}
+	const Symmetry& getSym() const
+		{@+ return getDims().getSym();@+}
+
+	void contractAndAdd(int i, UGSTensor& out,
+						const URSingleTensor& col) const;
+	int getOffset(const IntSequence& v) const;
+private:@;
+	void unfoldData();
+public:@;
+	index getFirstIndexOf(const index& in) const;
+};
+
+
+@ These are standard constructors. The last two constructors are
+slicing. The first makes a slice from fully symmetric sparse, the
+second from fully symmetric dense unfolded tensor. The last
+constructor allows for in-place conversion from |UFSTensor| to
+|UGSTensor|.
+
+@<|UGSTensor| constructor declarations@>=
+	UGSTensor(int r, const TensorDimens& td)
+		: UTensor(along_col, td.getNVX(), r,
+				  td.calcUnfoldMaxOffset(), td.dimen()), tdims(td)@+ {}
+	UGSTensor(const UGSTensor& ut)
+		: UTensor(ut), tdims(ut.tdims)@+ {}
+	UGSTensor(const FGSTensor& ft);
+	UGSTensor(int first_row, int num, UGSTensor& t)
+		: UTensor(first_row,  num, t), tdims(t.tdims)@+ {}
+	UGSTensor(const FSSparseTensor& t, const IntSequence& ss,
+			  const IntSequence& coor, const TensorDimens& td);
+	UGSTensor(const UFSTensor& t, const IntSequence& ss,
+			  const IntSequence& coor, const TensorDimens& td);
+	UGSTensor(UFSTensor& t)
+		: UTensor(0, t.nrows(), t), tdims(t.nvar(), t.dimen())@+ {}
+
+
+@ End of {\tt gs\_tensor.h} file.
diff --git a/dynare++/tl/cc/int_sequence.cweb b/dynare++/tl/cc/int_sequence.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..d466d87ad944a5d65854de2a6c75cb23e71d2e9a
--- /dev/null
+++ b/dynare++/tl/cc/int_sequence.cweb
@@ -0,0 +1,351 @@
+@q $Id: int_sequence.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt int\_sequence.cpp} file.
+
+@c
+#include "int_sequence.h"
+#include "symmetry.h"
+#include "tl_exception.h"
+
+#include <stdio.h>
+#include <limits.h>
+
+@<|IntSequence| constructor code 1@>;
+@<|IntSequence| constructor code 2@>;
+@<|IntSequence| constructor code 3@>;
+@<|IntSequence| constructor code 4@>;
+@<|IntSequence::operator=| code@>;
+@<|IntSequence::operator==| code@>;
+@<|IntSequence::operator<| code@>;
+@<|IntSequence::lessEq| code@>;
+@<|IntSequence::less| code@>;
+@<|IntSequence::sort| code@>;
+@<|IntSequence::monotone| code@>;
+@<|IntSequence::pmonotone| code@>;
+@<|IntSequence::sum| code@>;
+@<|IntSequence::mult| code@>;
+@<|IntSequence::getPrefixLength| code@>;
+@<|IntSequence::getNumDistinct| code@>;
+@<|IntSequence::getMax| code@>;
+@<|IntSequence::add| code 1@>;
+@<|IntSequence::add| code 2@>;
+@<|IntSequence::isPositive| code@>;
+@<|IntSequence::isConstant| code@>;
+@<|IntSequence::isSorted| code@>;
+@<|IntSequence::print| code@>;
+
+@ This unfolds a given integer sequence with respect to the given
+symmetry. If for example the symmetry is $(2,3)$, and the sequence is
+$(a,b)$, then the result is $(a,a,b,b,b)$.
+
+@<|IntSequence| constructor code 1@>=
+IntSequence::IntSequence(const Symmetry& sy, const IntSequence& se)
+	: data(new int[sy.dimen()]), length(sy.dimen()), destroy(true)
+{
+	int k = 0;
+	for (int i = 0; i < sy.num(); i++)
+		for (int j = 0;	 j < sy[i]; j++, k++)
+			operator[](k) = se[i];
+}
+
+
+@ This constructs an implied symmetry (implemented as |IntSequence|
+from a more general symmetry and equivalence class (implemented as
+|vector<int>|). For example, let the general symmetry be $y^3u^2$ and
+the equivalence class is $\{0,4\}$ picking up first and fifth
+variable, we calculate symmetry (at this point only |IntSequence|)
+corresponding to the picked variables. These are $yu$. Thus the
+constructed sequence must be $(1,1)$, meaning that we picked one $y$
+and one $u$.
+
+
+@<|IntSequence| constructor code 2@>=
+IntSequence::IntSequence(const Symmetry& sy, const vector<int>& se)
+	: data(new int[sy.num()]), length(sy.num()), destroy(true)
+{
+	TL_RAISE_IF(sy.dimen() <= se[se.size()-1],
+				"Sequence is not reachable by symmetry in IntSequence()");
+	for (int i = 0; i < length; i++) @/
+		operator[](i) = 0;
+
+    for (unsigned int i = 0; i < se.size(); i++) @/
+		operator[](sy.findClass(se[i]))++;
+}
+
+@ This constructs an ordered integer sequence from the given ordered
+sequence inserting the given number to the sequence.
+
+@<|IntSequence| constructor code 3@>=
+IntSequence::IntSequence(int i, const IntSequence& s)
+	: data(new int[s.size()+1]), length(s.size()+1), destroy(true)
+{
+	int j = 0;
+	while (j < s.size() && s[j] < i)
+		j++;
+	for (int jj = 0; jj < j; jj++)
+		operator[](jj) = s[jj];
+	operator[](j) = i;
+	for (int jj = j; jj < s.size(); jj++)
+		operator[](jj+1) = s[jj];
+}
+
+@ 
+@<|IntSequence| constructor code 4@>=
+IntSequence::IntSequence(int i, const IntSequence& s, int pos)
+	: data(new int[s.size()+1]), length(s.size()+1), destroy(true)
+{
+	TL_RAISE_IF(pos < 0 || pos > s.size(),
+				"Wrong position for insertion IntSequence constructor");
+	for (int jj = 0; jj < pos; jj++)
+		operator[](jj) = s[jj];
+	operator[](pos) = i;
+	for (int jj = pos; jj < s.size(); jj++)
+		operator[](jj+1) = s[jj];
+}
+
+@ 
+@<|IntSequence::operator=| code@>=
+const IntSequence& IntSequence::operator=(const IntSequence& s)
+ {
+	 TL_RAISE_IF(!destroy && length != s.length,
+				 "Wrong length for in-place IntSequence::operator=");
+	 if (destroy && length != s.length) {
+		 delete [] data;
+		 data = new int[s.length];
+		 destroy = true;
+		 length = s.length;
+	 }
+	 memcpy(data, s.data, sizeof(int)*length);
+	 return *this;
+ }
+
+
+@ 
+@<|IntSequence::operator==| code@>=
+bool IntSequence::operator==(const IntSequence& s) const
+{
+	if (size() != s.size())
+		return false;
+
+	int i = 0;
+	while (i < size() && operator[](i) == s[i])
+		i++;
+	return i == size();
+}
+
+@ We need some linear irreflexive ordering, we implement it as
+lexicographic ordering without identity.
+@<|IntSequence::operator<| code@>=
+bool IntSequence::operator<(const IntSequence& s) const
+{
+	int len = min(size(), s.size());
+
+	int i = 0;
+	while (i < len && operator[](i) == s[i])
+		i++;
+	return (i < s.size() && (i == size() || operator[](i) < s[i]));
+}
+
+@ 
+@<|IntSequence::lessEq| code@>=
+bool IntSequence::lessEq(const IntSequence& s) const
+{
+	TL_RAISE_IF(size() != s.size(),
+				"Sequence with different lengths in IntSequence::lessEq");
+
+	int i = 0;
+	while (i < size() && operator[](i) <= s[i])
+		i++;
+	return (i == size());
+}
+
+@ 
+@<|IntSequence::less| code@>=
+bool IntSequence::less(const IntSequence& s) const
+{
+	TL_RAISE_IF(size() != s.size(),
+				"Sequence with different lengths in IntSequence::less");
+
+	int i = 0;
+	while (i < size() && operator[](i) < s[i])
+		i++;
+	return (i == size());
+}
+
+@ This is a bubble sort, all sequences are usually very short, so this
+sin might be forgiven.
+
+@<|IntSequence::sort| code@>=
+void IntSequence::sort()
+{
+	for (int i = 0; i < length; i++) {
+		int swaps = 0;
+		for (int j = 0; j < length-1; j++) {
+			if (data[j] > data[j+1]) {
+				int s = data[j+1];
+				data[j+1] = data[j];
+				data[j] = s;
+				swaps++;
+			}
+		}
+		if (swaps == 0)
+			return;
+	}
+}
+
+@ Here we monotonize the sequence. If an item is less then its
+predecessor, it is equalized.
+
+@<|IntSequence::monotone| code@>=
+void IntSequence::monotone()
+{
+	for (int i = 1; i < length; i++)
+		if (data[i-1] > data[i])@/
+			data[i] = data[i-1];
+}
+
+@ This partially monotones the sequence. The partitioning is done by a
+symmetry. So the subsequence given by the symmetry classes are
+monotonized. For example, if the symmetry is $y^2u^3$, and the
+|IntSequence| is $(5,3,1,6,4)$, the result is $(5,5,1,6,6)$.
+
+@<|IntSequence::pmonotone| code@>=
+void IntSequence::pmonotone(const Symmetry& s)
+{
+	int cum = 0;
+	for (int i = 0; i < s.num(); i++) {
+		for (int j = cum + 1; j < cum + s[i]; j++)
+			if (data[j-1] > data[j])@/
+				data[j] = data[j-1];
+		cum += s[i];
+	}
+}
+
+@ This returns sum of all elements. Useful for symmetries.
+@<|IntSequence::sum| code@>=
+int IntSequence::sum() const
+{
+	int res = 0;
+	for (int i = 0; i < length; i++) @/
+		res += operator[](i);
+	return res;
+}
+
+@ This returns product of subsequent items. Useful for Kronecker product
+dimensions.
+
+@<|IntSequence::mult| code@>=
+int IntSequence::mult(int i1, int i2) const
+{
+	int res = 1;
+	for (int i = i1; i < i2; i++)@/
+		res *= operator[](i);
+	return res;
+}
+
+@ Return a number of the same items in the beginning of the sequence.
+@<|IntSequence::getPrefixLength| code@>=
+int IntSequence::getPrefixLength() const
+{
+	int i = 0;
+	while (i+1 < size() && operator[](i+1) == operator[](0))
+		i++;
+	return i+1;
+}
+
+@ This returns a number of distinct items in the sequence. It supposes
+that the sequence is ordered. For the empty sequence it returns zero.
+
+@<|IntSequence::getNumDistinct| code@>=
+int IntSequence::getNumDistinct() const
+{
+	int res = 0;
+	if (size() > 0)
+		res++;
+	for (int i = 1; i < size(); i++)
+		if (operator[](i) != operator[](i-1))
+			res++;
+	return res;
+}
+
+@ This returns a maximum of the sequence. If the sequence is empty, it
+returns the least possible |int| value.
+
+@<|IntSequence::getMax| code@>=
+int IntSequence::getMax() const
+{
+	int res = INT_MIN;
+	for (int i = 0; i < size(); i++)
+		if (operator[](i) > res)
+			res = operator[](i);
+	return res;
+}
+
+@ 
+@<|IntSequence::add| code 1@>=
+void IntSequence::add(int i)
+{
+	for (int j = 0; j < size(); j++)
+		operator[](j) += i;
+}
+
+@ 
+@<|IntSequence::add| code 2@>=
+void IntSequence::add(int f, const IntSequence& s)
+{
+	TL_RAISE_IF(size() != s.size(),
+				"Wrong sequence length in IntSequence::add");
+	for (int j = 0; j < size(); j++)
+		operator[](j) += f*s[j];
+}
+
+@ 
+@<|IntSequence::isPositive| code@>=
+bool IntSequence::isPositive() const
+{
+	int i = 0;
+	while (i < size() && operator[](i) >= 0)
+		i++;
+	return (i == size());
+}
+
+@ 
+@<|IntSequence::isConstant| code@>=
+bool IntSequence::isConstant() const
+{
+	bool res = true;
+	int i = 1;
+	while (res && i < size()) {
+		res = res && operator[](0) == operator[](i);
+		i++;
+	}
+	return res;
+}
+
+@ 
+@<|IntSequence::isSorted| code@>=
+bool IntSequence::isSorted() const
+{
+	bool res = true;
+	int i = 1;
+	while (res && i < size()) {
+		res = res && operator[](i-1) <= operator[](i);
+		i++;
+	}
+	return res;
+}
+
+
+
+@ Debug print.
+@<|IntSequence::print| code@>=
+void IntSequence::print() const
+{
+	printf("[");
+	for (int i = 0; i < size(); i++)@/
+		printf("%2d ",operator[](i));
+	printf("]\n");
+}
+
+@ End of {\tt int\_sequence.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/int_sequence.hweb b/dynare++/tl/cc/int_sequence.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..5f76f34499ea440e0c9927f1e2257507a888283f
--- /dev/null
+++ b/dynare++/tl/cc/int_sequence.hweb
@@ -0,0 +1,132 @@
+@q $Id: int_sequence.hweb 758 2006-05-22 08:31:18Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Integer sequence. Start of {\tt int\_sequence.h} file.
+
+Here we define an auxiliary abstraction for a sequence of integers. The
+basic functionality is to hold an ordered sequence of integers with
+constant length. We prefer using this simple class before STL
+|vector<int>| since it is more efficient for our purposes.
+
+The class is used in index of a tensor, in symmetry definition, in
+Kronecker product dimensions, or as a class of an equivalence. The
+latter case is not ordered, but we always order equivalence classes in
+order to ensure unique representativeness. For almost all cases we
+need the integer sequence to be ordered (sort), or monotonize (indices
+of folded tensors), or partially monotonize (indices of folded tensors
+not fully symmetric), or calculate a product of all members or only of
+a part (used in Kronecker product dimensions). When we calculate
+offsets in folded tensors, we need to obtain a number of the same
+items in the front (|getPrefixLength|), and also to add some integer
+number to all items.
+
+Also, we need to construct a subsequence of a sequence, so
+some instances do destroy the underlying data, and some not.
+
+@s IntSequence int
+@s Symmetry int
+@c
+#ifndef INT_SEQUENCE_H
+#define INT_SEQUENCE_H
+
+
+#include <string.h>
+#include <vector>
+
+using namespace std;
+
+@<|IntSequence| class declaration@>;
+
+#endif
+
+@ The implementation of |IntSequence| is straightforward. It has a
+pointer |data|, a |length| of the data, and a flag |destroy|, whether
+the instance must destroy the underlying data.
+
+@<|IntSequence| class declaration@>=
+class Symmetry;
+class IntSequence {
+	int* data;
+	int length;
+	bool destroy;
+public:@/
+	@<|IntSequence| constructors@>;
+	@<|IntSequence| inlines and operators@>;
+	@<|IntSequence| orderings@>;
+	void sort();
+	void monotone();
+	void pmonotone(const Symmetry& s);
+	int sum() const;
+	int mult(int i1, int i2) const;
+	int mult() const
+		{@+return mult(0, length);@+}
+	void add(int i);
+	void add(int f, const IntSequence& s); 
+	int getPrefixLength() const;
+	int getNumDistinct() const;
+	int getMax() const;
+	bool isPositive() const;
+	bool isConstant() const;
+	bool isSorted() const;
+	void print() const;
+};
+
+@ We have a constructor allocating a given length of data, constructor
+allocating and then initializing all members to a given number, a copy
+constructor, a conversion from |vector<int>|, a subsequence
+constructor, a constructor used for calculating implied symmetry from
+a more general symmetry and one equivalence class (see |Symmetry|
+class). Finally we have a constructor which unfolds a sequence with
+respect to a given symmetry and constructor which inserts a given
+number to the ordered sequence or given number to a given position.
+
+@<|IntSequence| constructors@>=
+	IntSequence(int l)
+		: data(new int[l]), length(l), destroy(true)@+ {}	
+	IntSequence(int l, int n)
+		:  data(new int[l]), length(l), destroy(true)
+		{@+ for (int i = 0; i < length; i++) data[i] = n;@+}
+	IntSequence(const IntSequence& s)
+		: data(new int[s.length]), length(s.length), destroy(true)
+		{@+ memcpy(data, s.data, length*sizeof(int));@+}
+	IntSequence(IntSequence& s, int i1, int i2)
+		: data(s.data+i1), length(i2-i1), destroy(false)@+ {}
+	IntSequence(const IntSequence& s, int i1, int i2)
+		: data(new int[i2-i1]), length(i2-i1), destroy(true)
+		{@+ memcpy(data, s.data+i1, sizeof(int)*length);@+}
+	IntSequence(const Symmetry& sy, const vector<int>& se);
+	IntSequence(const Symmetry& sy, const IntSequence& se);
+	IntSequence(int i, const IntSequence& s);
+	IntSequence(int i, const IntSequence& s, int pos);
+	IntSequence(int l, const int* d)
+		: data(new int[l]), length(l), destroy(true)
+		{@+ memcpy(data, d, sizeof(int)*length);@+}
+
+
+@ These are clear inlines and operators.
+@<|IntSequence| inlines and operators@>=
+    const IntSequence& operator=(const IntSequence& s);
+    virtual ~IntSequence()
+		{@+ if (destroy) delete [] data;@+}
+	bool operator==(const IntSequence& s) const;
+	bool operator!=(const IntSequence& s) const
+		{@+ return ! operator==(s);@+}
+	int& operator[](int i)
+		{@+ return data[i];@+}
+	int operator[](int i) const
+		{@+ return data[i];@+}
+	int size() const
+		{@+ return length;@+}
+
+@ We provide two orderings. The first |operator<| is the linear
+lexicographic ordering, the second |less| is the non-linear Cartesian
+ordering.
+@<|IntSequence| orderings@>=
+	bool operator<(const IntSequence& s) const;
+	bool operator<=(const IntSequence& s) const
+		{@+ return (operator==(s) || operator<(s));@+}
+	bool lessEq(const IntSequence& s) const;
+	bool less(const IntSequence& s) const;
+
+
+@ End of {\tt int\_sequence.h} file.
diff --git a/dynare++/tl/cc/kron_prod.cweb b/dynare++/tl/cc/kron_prod.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..189df486158fe43ff4c0185517310ed0a10c0a0e
--- /dev/null
+++ b/dynare++/tl/cc/kron_prod.cweb
@@ -0,0 +1,457 @@
+@q $Id: kron_prod.cweb 1834 2008-05-18 20:23:54Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt kron\_prod.cpp} file.
+@c
+#include "kron_prod.h"
+#include "tl_exception.h"
+
+#include <stdio.h>
+
+@<|KronProdDimens| constructor code@>;
+@<|KronProd::checkDimForMult| code@>;
+@<|KronProd::kronMult| code@>;
+@<|KronProdAll::setMat| code@>;
+@<|KronProdAll::setUnit| code@>;
+@<|KronProdAll::isUnit| code@>;
+@<|KronProdAll::multRows| code@>;
+@<|KronProdIA::mult| code@>;
+@<|KronProdAI| constructor code@>;
+@<|KronProdAI::mult| code@>;
+@<|KronProdIAI::mult| code@>;
+@<|KronProdAll::mult| code@>;
+@<|KronProdAllOptim::optimizeOrder| code@>;
+
+@ Here we construct Kronecker product dimensions from Kronecker
+product dimensions by picking a given matrix and all other set to
+identity. The constructor takes dimensions of $A_1\otimes
+A_2\otimes\ldots\otimes A_n$, and makes dimensions of $I\otimes
+A_i\otimes I$, or $I\otimes A_n$, or $A_1\otimes I$ for a given
+$i$. The identity matrices must fit into the described order. See
+header file.
+
+We first decide what is a length of the resulting dimensions. Possible
+length is three for $I\otimes A\otimes I$, and two for $I\otimes A$,
+or $A\otimes I$.
+
+Then we fork according to |i|.
+
+@<|KronProdDimens| constructor code@>=
+KronProdDimens::KronProdDimens(const KronProdDimens& kd, int i)
+	: rows((i==0 || i==kd.dimen()-1)? (2):(3)),
+	  cols((i==0 || i==kd.dimen()-1)? (2):(3))
+{
+	TL_RAISE_IF(i < 0 || i >= kd.dimen(),
+				"Wrong index for pickup in KronProdDimens constructor");
+
+	int kdim = kd.dimen();
+	if (i == 0) {
+		@<set AI dimensions@>;
+	} else if (i == kdim-1){
+		@<set IA dimensions@>;
+	} else {
+		@<set IAI dimensions@>;
+	}
+}
+
+@ The first rows and cols are taken from |kd|. The dimensions of
+identity matrix is a number of rows in $A_2\otimes\ldots\otimes A_n$
+since the matrix $A_1\otimes I$ is the first.
+ 
+@<set AI dimensions@>=
+rows[0] = kd.rows[0];
+rows[1] = kd.rows.mult(1, kdim);
+cols[0] = kd.cols[0];
+cols[1] = rows[1];
+
+@ The second dimension is taken from |kd|. The dimensions of identity
+matrix is a number of columns of $A_1\otimes\ldots A_{n-1}$, since the
+matrix $I\otimes A_n$ is the last.
+
+@<set IA dimensions@>=
+rows[0] = kd.cols.mult(0, kdim-1);
+rows[1] = kd.rows[kdim-1];
+cols[0] = rows[0];
+cols[1] = kd.cols[kdim-1];
+
+@ The dimensions of the middle matrix are taken from |kd|. The
+dimensions of the first identity matrix are a number of columns of
+$A_1\otimes\ldots\otimes A_{i-1}$, and the dimensions of the last
+identity matrix are a number of rows of $A_{i+1}\otimes\ldots\otimes
+A_n$.
+ 
+@<set IAI dimensions@>=
+rows[0] = kd.cols.mult(0, i);
+cols[0] = rows[0];
+rows[1] = kd.rows[i];
+cols[1] = kd.cols[i];
+cols[2] = kd.rows.mult(i+1, kdim);
+rows[2] = cols[2];
+
+
+@ This raises an exception if dimensions are bad for multiplication
+|out = in*this|.
+
+@<|KronProd::checkDimForMult| code@>=
+void KronProd::checkDimForMult(const ConstTwoDMatrix& in, const TwoDMatrix& out) const
+{
+	int my_rows;
+	int my_cols;
+	kpd.getRC(my_rows, my_cols);
+	TL_RAISE_IF(in.nrows() != out.nrows() || in.ncols() != my_rows,
+				"Wrong dimensions for KronProd in KronProd::checkDimForMult");
+}
+
+@ Here we Kronecker multiply two given vectors |v1| and |v2| and
+store the result in preallocated |res|.
+
+@<|KronProd::kronMult| code@>=
+void KronProd::kronMult(const ConstVector& v1, const ConstVector& v2,
+						Vector& res)
+{
+	TL_RAISE_IF(res.length() != v1.length()*v2.length(),
+				"Wrong vector lengths in KronProd::kronMult");
+	res.zeros();
+	for (int i = 0; i < v1.length(); i++) {
+		Vector sub(res, i*v2.length(), v2.length());
+		sub.add(v1[i], v2);
+	}
+}
+
+
+@ 
+@<|KronProdAll::setMat| code@>=
+void KronProdAll::setMat(int i, const TwoDMatrix& m)
+{
+	matlist[i] = &m;
+	kpd.setRC(i, m.nrows(), m.ncols());
+}
+
+@ 
+@<|KronProdAll::setUnit| code@>=
+void KronProdAll::setUnit(int i, int n)
+{
+	matlist[i] = NULL;
+	kpd.setRC(i, n, n);
+}
+
+@ 
+@<|KronProdAll::isUnit| code@>=
+bool KronProdAll::isUnit() const
+{
+	int i = 0;
+	while (i < dimen() && matlist[i] == NULL)
+		i++; 
+	return i == dimen();
+}
+
+@ Here we multiply $B\cdot(I\otimes A)$. If $m$ is a dimension of the
+identity matrix, then the product is equal to
+$B\cdot\hbox{diag}_m(A)$. If $B$ is partitioned accordingly, then the
+result is $[B_1A, B_2A,\ldots B_mA]$.
+
+Here, |outi| are partitions of |out|, |ini| are const partitions of
+|in|, and |id_cols| is $m$. We employ level-2 BLAS.
+
+@<|KronProdIA::mult| code@>=
+void KronProdIA::mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const
+{
+	checkDimForMult(in, out);
+
+	int id_cols = kpd.cols[0];
+	ConstTwoDMatrix a(mat);
+
+	for (int i = 0; i < id_cols; i++) {
+		TwoDMatrix outi(out, i*a.ncols(), a.ncols());
+		ConstTwoDMatrix ini(in, i*a.nrows(), a.nrows()); 
+		outi.mult(ini, a);
+	}
+}
+
+@ Here we construct |KronProdAI| from |KronProdIAI|. It is clear.
+@<|KronProdAI| constructor code@>=
+KronProdAI::KronProdAI(const KronProdIAI& kpiai)
+	: KronProd(KronProdDimens(2)), mat(kpiai.mat)
+{
+	kpd.rows[0] = mat.nrows();
+	kpd.cols[0] = mat.ncols();
+	kpd.rows[1] = kpiai.kpd.rows[2];
+	kpd.cols[1] = kpiai.kpd.cols[2];
+}
+
+
+@ Here we multiply $B\cdot(A\otimes I)$. Let the dimension of the
+matrix $A$ be $m\times n$, the dimension of $I$ be $p$, and a number
+of rows of $B$ be $q$. We use the fact that $B\cdot(A\otimes
+I)=\hbox{reshape}(\hbox{reshape}(B, q, mp)\cdot A, q, np)$. This works
+only for matrix $B$, whose storage has leading dimension equal to
+number of rows.
+
+For cases where the leading dimension is not equal to the number of
+rows, we partition the matrix $A\otimes I$ to $m\times n$ square
+partitions $a_{ij}I$. Therefore, we partition $B$ to $m$ partitions
+$[B_1, B_2,\ldots,B_m]$. Each partition of $B$ has the same number of
+columns as the identity matrix. If $R$ denotes the resulting matrix,
+then it can be partitioned to $n$ partitions
+$[R_1,R_2,\ldots,R_n]$. Each partition of $R$ has the same number of
+columns as the identity matrix. Then we have $R_i=\sum a_{ji}B_j$.
+
+In code, |outi| is $R_i$, |ini| is $B_j$, and |id_cols| is a dimension
+of the identity matrix
+ 
+@<|KronProdAI::mult| code@>=
+void KronProdAI::mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const
+{
+	checkDimForMult(in, out);
+
+	int id_cols = kpd.cols[1];
+	ConstTwoDMatrix a(mat);
+
+	if (in.getLD() == in.nrows()) {
+		ConstTwoDMatrix in_resh(in.nrows()*id_cols, a.nrows(), in.getData().base());
+		TwoDMatrix out_resh(in.nrows()*id_cols, a.ncols(), out.getData().base());
+		out_resh.mult(in_resh, a);
+	} else {
+		out.zeros();
+		for (int i = 0; i < a.ncols(); i++) {
+			TwoDMatrix outi(out, i*id_cols, id_cols);
+			for (int j = 0; j < a.nrows(); j++) {
+				ConstTwoDMatrix ini(in, j*id_cols, id_cols);
+				outi.add(a.get(j,i), ini);
+			}
+		}
+	}
+}
+
+
+@ Here we multiply $B\cdot(I\otimes A\otimes I)$. If $n$ is a
+dimension of the first identity matrix, then we multiply
+$B\cdot\hbox{diag}_n(A\otimes I)$. So we partition $B$ and result $R$
+accordingly, and multiply $B_i\cdot(A\otimes I)$, which is in fact
+|KronProdAI::mult|. Note that number of columns of partitions of $B$
+are number of rows of $A\otimes I$, and number of columns of $R$ are
+number of columns of $A\otimes I$.
+
+In code, |id_cols| is $n$, |akronid| is a Kronecker product object of
+$A\otimes I$, and |in_bl_width|, and |out_bl_width| are rows and cols of
+$A\otimes I$.
+
+ 
+@<|KronProdIAI::mult| code@>=
+void KronProdIAI::mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const
+{
+	checkDimForMult(in, out);
+
+	int id_cols = kpd.cols[0];
+
+	KronProdAI akronid(*this);
+	int in_bl_width;
+	int out_bl_width;
+	akronid.kpd.getRC(in_bl_width, out_bl_width);
+
+	for (int i = 0; i < id_cols; i++) {
+		TwoDMatrix outi(out, i*out_bl_width, out_bl_width);
+		ConstTwoDMatrix ini(in, i*in_bl_width, in_bl_width);
+		akronid.mult(ini, outi);
+	}
+}
+
+@ Here we multiply $B\cdot(A_1\otimes\ldots\otimes A_n)$. First we
+multiply $B\cdot(A_1\otimes)$, then this is multiplied by all
+$I\otimes A_i\otimes I$, and finally by $I\otimes A_n$.
+
+If the dimension of the Kronecker product is only 1, then we multiply
+two matrices in straight way and return.
+
+The intermediate results are stored on heap pointed by |last|. A new
+result is allocated, and then the former storage is deallocated.
+
+We have to be careful in cases when last or first matrix is unit and
+no calculations are performed in corresponding codes. The codes should
+handle |last| safely also if no calcs are done.
+ 
+@<|KronProdAll::mult| code@>=
+void KronProdAll::mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const
+{
+	@<quick copy if product is unit@>;
+	@<quick zero if one of the matrices is zero@>;
+	@<quick multiplication if dimension is 1@>;
+	int c;
+	TwoDMatrix* last = NULL;
+	@<perform first multiplication AI@>;
+	@<perform intermediate multiplications IAI@>;
+	@<perform last multiplication IA@>;
+}
+
+@ 
+@<quick copy if product is unit@>=
+	if (isUnit()) {
+		out.zeros();
+		out.add(1.0, in);
+		return;
+	}
+
+@ If one of the matrices is exactly zero or the |in| matrix is zero,
+set out to zero and return
+
+@<quick zero if one of the matrices is zero@>=
+	bool is_zero = false;
+	for (int i = 0; i < dimen() && ! is_zero; i++)
+		is_zero = matlist[i] && matlist[i]->isZero();
+	if (is_zero || in.isZero()) {
+		out.zeros();
+		return;
+	}
+
+@ 
+@<quick multiplication if dimension is 1@>=
+	if (dimen() == 1) {
+		if (matlist[0]) // always true
+			out.mult(in, ConstTwoDMatrix(*(matlist[0])));
+		return;
+	}
+
+@ Here we have to construct $A_1\otimes I$, allocate intermediate
+result |last|, and perform the multiplication.
+
+@<perform first multiplication AI@>=
+	if (matlist[0]) {
+		KronProdAI akronid(*this);
+		c = akronid.kpd.ncols();
+		last = new TwoDMatrix(in.nrows(), c);
+		akronid.mult(in, *last);
+	} else {
+		last = new TwoDMatrix(in.nrows(), in.ncols(), in.getData().base());
+	}
+
+@ Here we go through all $I\otimes A_i\otimes I$, construct the
+product, allocate new storage for result |newlast|, perform the
+multiplication, deallocate old |last|, and set |last| to |newlast|.
+
+@<perform intermediate multiplications IAI@>=
+	for (int i = 1; i < dimen()-1; i++) {
+		if (matlist[i]) {
+			KronProdIAI interkron(*this, i);
+			c = interkron.kpd.ncols();
+			TwoDMatrix* newlast = new TwoDMatrix(in.nrows(), c);
+			interkron.mult(*last, *newlast);
+			delete last;
+			last = newlast;
+		}
+	}
+
+@ Here just construct $I\otimes A_n$ and perform multiplication and
+deallocate |last|.
+
+@<perform last multiplication IA@>=
+	if (matlist[dimen()-1]) {
+		KronProdIA idkrona(*this);
+		idkrona.mult(*last, out);
+	} else {
+		out = *last;
+	}
+	delete last;
+
+@ This calculates a Kornecker product of rows of matrices, the row
+indices are given by the integer sequence. The result is allocated and
+returned. The caller is repsonsible for its deallocation.
+
+@<|KronProdAll::multRows| code@>=
+Vector* KronProdAll::multRows(const IntSequence& irows) const
+{
+	TL_RAISE_IF(irows.size() != dimen(),
+				"Wrong length of row indices in KronProdAll::multRows");
+
+	Vector* last = NULL;
+	ConstVector* row;
+	vector<Vector*> to_delete;
+	for (int i = 0; i < dimen(); i++) {
+		int j = dimen()-1-i;
+		@<set |row| to the row of |j|-th matrix@>;
+		@<set |last| to product of |row| and |last|@>;
+		delete row;
+	}
+
+	for (unsigned int i = 0; i < to_delete.size(); i++)
+		delete to_delete[i];
+
+	return last;
+}
+
+@ If the |j|-th matrix is real matrix, then the row is constructed
+from the matrix. It the matrix is unit, we construct a new vector,
+fill it with zeros, than set the unit to appropriate place, and make
+the |row| as ConstVector of this vector, which sheduled for
+deallocation.
+
+@<set |row| to the row of |j|-th matrix@>=
+	if (matlist[j])
+		row = new ConstVector(irows[j], *(matlist[j]));
+	else {
+		Vector* aux = new Vector(ncols(j));
+		aux->zeros();
+		(*aux)[irows[j]] = 1.0;
+		to_delete.push_back(aux);
+		row = new ConstVector(*aux);
+	}
+
+@ If the |last| is exists, we allocate new storage, Kronecker
+multiply, deallocate the old storage. If the |last| does not exist,
+then we only make |last| equal to |row|.
+ 
+@<set |last| to product of |row| and |last|@>=
+	if (last) {
+		Vector* newlast;
+		newlast = new Vector(last->length()*row->length());
+		kronMult(*row, ConstVector(*last), *newlast);
+		delete last;
+		last = newlast;
+	} else { 
+		last = new Vector(*row);
+	}
+
+
+@ This permutes the matrices so that the new ordering would minimize
+memory consumption. As shown in |@<|KronProdAllOptim| class declaration@>|,
+we want ${m_k\over n_k}\leq{m_{k-1}\over n_{k-1}}\ldots\leq{m_1\over n_1}$,
+where $(m_i,n_i)$ is the dimension of $A_i$. So we implement the bubble
+sort.
+
+@<|KronProdAllOptim::optimizeOrder| code@>=
+void KronProdAllOptim::optimizeOrder()
+{
+	for (int i = 0; i < dimen(); i++) {
+		int swaps = 0;
+		for (int j = 0; j < dimen()-1; j++) {
+			if (((double)kpd.rows[j])/kpd.cols[j] < ((double)kpd.rows[j+1])/kpd.cols[j+1]) {
+				@<swap dimensions and matrices at |j| and |j+1|@>;
+				@<project the swap to the permutation |oper|@>;
+			}
+		}
+		if (swaps == 0) {
+			return;
+		}
+	}
+}
+
+@ 
+@<swap dimensions and matrices at |j| and |j+1|@>=
+	int s = kpd.rows[j+1];
+	kpd.rows[j+1] = kpd.rows[j];
+	kpd.rows[j] = s;
+	s = kpd.cols[j+1];
+	kpd.cols[j+1] = kpd.cols[j];
+	kpd.cols[j] = s;
+	const TwoDMatrix* m = matlist[j+1];
+	matlist[j+1] = matlist[j];
+	matlist[j] = m;
+
+@ 
+@<project the swap to the permutation |oper|@>=
+	s = oper.getMap()[j+1];
+	oper.getMap()[j+1] = oper.getMap()[j];
+	oper.getMap()[j] = s;
+	swaps++;
+
+
+@ End of {\tt kron\_prod.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/kron_prod.hweb b/dynare++/tl/cc/kron_prod.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..69eb72718ead0b8b083a345adfe76174f60cb19c
--- /dev/null
+++ b/dynare++/tl/cc/kron_prod.hweb
@@ -0,0 +1,296 @@
+@q $Id: kron_prod.hweb 2269 2008-11-23 14:33:22Z michel $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Kronecker product. Start of {\tt kron\_prod.h} file. 
+
+Here we define an abstraction for a Kronecker product of a sequence of
+matrices. This is $A_1\otimes\ldots\otimes A_n$. Obviously we do not
+store the product in memory. First we need to represent a dimension
+of the Kronecker product. Then we represent the Kronecker product,
+simply it is the Kronecker product dimension with a vector of
+references to the matrices $A_1,\ldots, A_n$.
+
+The main task of this class is to calculate a matrix product
+$B\cdot(A_1\otimes A_2\otimes\ldots\otimes A_n)$ which in
+our application has much more moderate dimensions than $A_1\otimes
+A_2\otimes\ldots\otimes A_n$. We calculate it as
+$$B\cdot(A_1\otimes I)\cdot\ldots\cdot(I\otimes A_i\otimes
+I)\cdot\ldots\cdot (I\otimes A_n)$$
+where dimensions of identity matrices differ and are given by the
+chosen order. One can naturally ask, whether there is some optimal
+order minimizing maximum storage needed for intermediate
+results. The optimal ordering is implemented by class |KronProdAllOptim|.
+
+For this multiplication, we also need to represent products of type
+$A\otimes I$, $I\otimes A\otimes I$, and $I\otimes A$.
+
+@s KronProdDimens int
+@s KronProd int
+
+@c
+
+#ifndef KRON_PROD_H
+#define KRON_PROD_H
+
+#include "twod_matrix.h"
+#include "permutation.h"
+#include "int_sequence.h"
+
+class KronProdAll;
+class KronProdAllOptim;
+class KronProdIA;
+class KronProdIAI;
+class KronProdAI;
+
+@<|KronProdDimens| class declaration@>;
+@<|KronProd| class declaration@>;
+@<|KronProdAll| class declaration@>;
+@<|KronProdAllOptim| class declaration@>;
+@<|KronProdIA| class declaration@>;
+@<|KronProdAI| class declaration@>;
+@<|KronProdIAI| class declaration@>;
+
+#endif
+
+@ |KronProdDimens| maintains a dimension of the Kronecker product. So,
+it maintains two sequences, one for rows, and one for columns.
+
+@<|KronProdDimens| class declaration@>=
+class KronProdDimens {
+	friend class KronProdAll;
+	friend class KronProdAllOptim;
+	friend class KronProdIA;
+	friend class KronProdIAI;
+	friend class KronProdAI;
+private:@;
+	IntSequence rows;
+	IntSequence cols;
+public:@;
+	@<|KronProdDimens| constructors@>;
+	@<|KronProdDimens| inline operators@>;
+	@<|KronProdDimens| inline methods@>;
+};
+
+@ We define three constructors. First initializes to a given
+dimension, and all rows and cols are set to zeros. Second is a copy
+constructor. The third constructor takes dimensions of $A_1\otimes
+A_2\otimes\ldots\otimes A_n$, and makes dimensions of $I\otimes
+A_i\otimes I$, or $I\otimes A_n$, or $A_1\otimes I$ for a given
+$i$. The dimensions of identity matrices are such that
+$$A_1\otimes A_2\otimes\ldots\otimes A_n=
+(A_1\otimes I)\cdot\ldots\cdot(I\otimes A_i\otimes I)
+\cdot\ldots\cdot(I\otimes A_n)$$
+Note that the matrices on the right do not commute only because sizes
+of identity matrices which are then given by this ordering.
+
+@<|KronProdDimens| constructors@>=
+	KronProdDimens(int dim)
+		: rows(dim,0), cols(dim, 0)@+ {}
+	KronProdDimens(const KronProdDimens& kd)
+		: rows(kd.rows), cols(kd.cols)@+ {}
+	KronProdDimens(const KronProdDimens& kd, int i);
+
+@ 
+@<|KronProdDimens| inline operators@>=
+	const KronProdDimens& operator=(const KronProdDimens& kd)
+		{@+ rows = kd.rows;@+ cols = kd.cols;@+ return *this;@+}
+	bool operator==(const KronProdDimens& kd) const
+		{@+ return rows == kd.rows && cols == kd.cols;@+}
+
+@ 
+@<|KronProdDimens| inline methods@>=
+	int dimen() const
+		{@+ return rows.size();@+}
+	void setRC(int i, int r, int c)
+		{@+ rows[i] = r;@+ cols[i] = c;@+}
+	void getRC(int i, int& r, int& c) const
+		{@+ r = rows[i];@+ c = cols[i];@+}
+	void getRC(int& r, int& c) const
+		{@+ r = rows.mult();@+ c = cols.mult();@+}
+	int nrows() const
+		{@+ return rows.mult();@+}
+	int ncols() const
+		{@+ return cols.mult();@+}
+	int nrows(int i) const
+		{@+ return rows[i];@+}
+	int ncols(int i) const
+		{@+ return cols[i];@+}
+
+@ Here we define an abstract class for all Kronecker product classes,
+which are |KronProdAll| (the most general), |KronProdIA| (for
+$I\otimes A$), |KronProdAI| (for $A\otimes I$), and |KronProdIAI| (for
+$I\otimes A\otimes I$). The purpose of the super class is to only
+define some common methods and common member |kpd| for dimensions and
+declare pure virtual |mult| which is implemented by the subclasses.
+
+The class also contains a static method |kronMult|, which calculates a
+Kronecker product of two vectors and stores it in the provided
+vector. It is useful at a few points of the library.
+
+@<|KronProd| class declaration@>=
+class KronProd {
+protected:@/
+	KronProdDimens kpd;
+public:@/
+	KronProd(int dim)
+		: kpd(dim)@+ {}
+	KronProd(const KronProdDimens& kd)
+		: kpd(kd)@+ {}
+	KronProd(const KronProd& kp)
+		: kpd(kp.kpd)@+ {}
+	virtual ~KronProd()@+ {}
+
+	int dimen() const
+		{@+ return kpd.dimen();@+}
+
+	virtual void mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const =0;
+	void mult(const TwoDMatrix& in, TwoDMatrix& out) const
+		{@+ mult(ConstTwoDMatrix(in), out);@+}
+
+	void checkDimForMult(const ConstTwoDMatrix& in, const TwoDMatrix& out) const;
+	void checkDimForMult(const TwoDMatrix& in, const TwoDMatrix& out) const
+		{@+ checkDimForMult(ConstTwoDMatrix(in), out);@+}
+
+	static void kronMult(const ConstVector& v1, const ConstVector& v2,
+						 Vector& res);
+
+	int nrows() const
+		{@+ return kpd.nrows();@+}
+	int ncols() const
+		{@+ return kpd.ncols();@+}
+	int nrows(int i) const
+		{@+ return kpd.nrows(i);@+}
+	int ncols(int i) const
+		{@+ return kpd.ncols(i);@+}
+};
+
+@ |KronProdAll| is a main class of this file. It represents the
+Kronecker product $A_1\otimes A_2\otimes\ldots\otimes A_n$. Besides
+dimensions, it stores pointers to matrices in |matlist| array. If a
+pointer is null, then the matrix is considered to be unit. The array
+is set by calls to |setMat| method (for real matrices) or |setUnit|
+method (for unit matrices).
+
+The object is constructed by a constructor, which allocates the
+|matlist| and initializes dimensions to zeros. Then a caller must feed
+the object with matrices by calling |setMat| and |setUnit| repeatedly
+for different indices.
+
+We implement the |mult| method of |KronProd|, and a new method
+|multRows|, which creates a vector of kronecker product of all rows of
+matrices in the object. The rows are given by the |IntSequence|.
+
+@<|KronProdAll| class declaration@>=
+class KronProdAll : public KronProd {
+	friend class KronProdIA;
+	friend class KronProdIAI;
+	friend class KronProdAI;
+protected:@;
+	const TwoDMatrix** const matlist;
+public:@;
+	KronProdAll(int dim)
+		: KronProd(dim), matlist(new const TwoDMatrix*[dim])@+ {}
+	virtual ~KronProdAll()
+		{@+ delete [] matlist;@+}
+	void setMat(int i, const TwoDMatrix& m);
+	void setUnit(int i, int n);
+	const TwoDMatrix& getMat(int i) const
+		{@+ return *(matlist[i]);@+}
+
+	void mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const;
+	Vector* multRows(const IntSequence& irows) const;
+private:@;
+	bool isUnit() const;
+};
+
+@ The class |KronProdAllOptim| minimizes memory consumption of the
+product $B\cdot(A_1\otimes A_2\otimes\ldots\otimes A_k)$. The
+optimization is done by reordering of the matrices $A_1,\ldots,A_k$,
+in order to minimize a sum of all storages needed for intermediate
+results. The optimal ordering is also nearly optimal with respect to
+number of flops.
+
+Let $(m_i,n_i)$ be dimensions of $A_i$. It is easy to observe, that
+for $i$-th step we need storage of $r\cdot n_1\cdot\ldots\cdot
+n_i\cdot m_{i+1}\cdot\ldots\cdot m_k$, where $r$ is a number of rows
+of $B$. To minimize the sum through all $i$ over all permutations of
+matrices, it is equivalent to minimize the sum
+$\sum_{i=1}^k{m_{i+1}\cdot\ldots\cdot m_k\over n_{i+1}\cdot\ldots\cdot
+n_k}$. The optimal ordering will yield ${m_k\over
+n_k}\leq{m_{k-1}\over n_{k-1}}\ldots\leq{m_1\over n_1}$.
+
+Now observe, that the number of flops for $i$-th step is $r\cdot
+n_1\cdot\ldots\cdot n_i\cdot m_i\cdot\ldots\cdot m_k$. In order to
+minimize a number of flops, it is equivalent to minimize
+$\sum_{i=1}^km_i{m_{i+1}\cdot\ldots\cdot m_k\over
+n_{i+1}\cdot\ldots\cdot n_k}$. Note that, normally, the $m_i$ does not
+change as much as $n_{j+1},\ldots,n_k$, so the ordering minimizing the
+memory will be nearly optimal with respect to number of flops.
+
+The class |KronProdAllOptim| inherits from |KronProdAll|. A public
+method |optimizeOrder| does the reordering. The permutation is stored
+in |oper|. So, as long as |optimizeOrder| is not called, the class is
+equivalent to |KronProdAll|.
+
+@<|KronProdAllOptim| class declaration@>=
+class KronProdAllOptim : public KronProdAll {
+protected:@;
+	Permutation oper;
+public:@;
+	KronProdAllOptim(int dim)
+		: KronProdAll(dim), oper(dim) @+ {}
+	void optimizeOrder();
+	const Permutation& getPer() const
+		{@+ return oper; @+}
+};
+
+@ This class represents $I\otimes A$. We have only one reference to
+the matrix, which is set by constructor.
+
+@<|KronProdIA| class declaration@>=
+class KronProdIA : public KronProd {
+	friend class KronProdAll;
+	const TwoDMatrix& mat;
+public:@/
+	KronProdIA(const KronProdAll& kpa)
+		: KronProd(KronProdDimens(kpa.kpd, kpa.dimen()-1)),
+		  mat(kpa.getMat(kpa.dimen()-1))
+		{}
+	void mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const;
+};
+
+@ This class represents $A\otimes I$. We have only one reference to
+the matrix, which is set by constructor.
+
+@<|KronProdAI| class declaration@>=
+class KronProdAI : public KronProd {
+	friend class KronProdIAI;
+	friend class KronProdAll;
+	const TwoDMatrix& mat;
+public:@/
+	KronProdAI(const KronProdAll& kpa)
+		: KronProd(KronProdDimens(kpa.kpd, 0)),
+		  mat(kpa.getMat(0))
+		{}
+	KronProdAI(const KronProdIAI& kpiai);
+
+	void mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const;
+};
+
+@ This class represents $I\otimes A\otimes I$. We have only one reference to
+the matrix, which is set by constructor.
+@<|KronProdIAI| class declaration@>=
+class KronProdIAI : public KronProd {
+	friend class KronProdAI;
+	friend class KronProdAll;
+	const TwoDMatrix& mat;
+public:@/
+	KronProdIAI(const KronProdAll& kpa, int i)
+		: KronProd(KronProdDimens(kpa.kpd, i)),
+		  mat(kpa.getMat(i))
+		{}
+	void mult(const ConstTwoDMatrix& in, TwoDMatrix& out) const;
+};
+
+
+@ End of {\tt kron\_prod.h} file. 
diff --git a/dynare++/tl/cc/main.web b/dynare++/tl/cc/main.web
new file mode 100644
index 0000000000000000000000000000000000000000..243e8b6844c56c1d96a6f53efe529da37313dea9
--- /dev/null
+++ b/dynare++/tl/cc/main.web
@@ -0,0 +1,380 @@
+@q $Id: main.web 2338 2009-01-14 10:40:30Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+\let\ifpdf\relax
+\input eplain
+\def\title{{\mainfont Tensor Library}}
+
+
+@i c++lib.w
+@s const_reverse_iterator int
+@s value_type int
+
+\titletrue
+\null\vfill
+\centerline{\titlefont Multidimensional Tensor Library}
+\vskip\baselineskip
+\centerline{\vtop{\hsize=10cm\leftskip=0pt plus 1fil
+  \rightskip=0pt plus 1fil\noindent
+	primary use in perturbation methods for Stochastic
+	Dynamic General Equilibrium (SDGE) models}}
+\vfill\vfill
+Copyright \copyright\ 2004 by Ondra Kamenik
+
+@*1 Library overview.
+
+The design of the library was driven by the needs of perturbation
+methods for solving Stochastic Dynamic General Equilibrium models. The
+aim of the library is not to provide an exhaustive interface to
+multidimensional linear algebra. The tensor library's main purposes
+include:
+\unorderedlist
+
+\li Define types for tensors, for a multidimensional index of a
+tensor, and types for folded and unfolded tensors. The tensors defined
+here have only one multidimensional index and one reserved
+one-dimensional index. The tensors should allow modelling of higher
+order derivatives with respect to a few vectors with different sizes
+(for example $\left[g_{y^2u^3}\right]$). The tensors should allow
+folded and unfolded storage modes and conversion between them. A
+folded tensor stores symmetric elements only once, while an unfolded
+stores data as a whole multidimensional cube.
+
+\li Define both sparse and dense tensors. We need only one particular
+type of sparse tensor. This in contrast to dense tensors, where we
+need much wider family of types.
+
+\li Implement the Faa Di Bruno multidimensional formula. So, the main
+purpose of the library is to implement the following step of Faa Di Bruno:
+$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_k}
+=\left[h_{y^l}\right]_{\gamma_1\ldots\gamma_l}
+\left(\sum_{c\in M_{l,k}}
+\prod_{m=1}^l\left[g_{c_m}\right]^{\gamma_m}_{c_m(\alpha)}\right)$$
+where $s$ can be a compound vector of variables, $M_{l,k}$ is a set of
+all equivalences of $k$ element set having $l$ classes, $c_m$ is
+$m$-th class of equivalence $c$, and $c_m(\alpha)$ is a tuple of
+picked indices from $\alpha$ by class $c_m$.
+
+Note that the sparse tensors play a role of $h$ in the Faa Di Bruno, not
+of $B$ nor $g$.
+
+\endunorderedlist
+
+The following table is a road-map to various abstractions in the library.
+
+\def\defloc#1#2{#1\hfill\break{\tt #2}}
+
+\noindent
+\halign to\hsize{%
+\vtop{\hsize=6.6cm\rightskip=0pt plus 1fil\noindent #}&
+\vtop{\advance\hsize by-6.6cm%
+      \raggedright\noindent\vrule width 0pt height 14pt #}\cr
+Class defined in & Purpose\cr
+\noalign{\hrule}\cr
+\defloc{|@<|Tensor| class declaration@>|}{tensor.hweb}&
+Virtual base class for all dense tensors, defines |index| as the
+multidimensonal iterator
+\cr
+\defloc{|@<|FTensor| class declaration@>|}{tensor.hweb}&
+Virtual base class for all folded tensors
+\cr
+\defloc{|@<|UTensor| class declaration@>|}{tensor.hweb}&
+Virtual base class for all unfolded tensors
+\cr
+\defloc{|@<|FFSTensor| class declaration@>|}{fs\_tensor.hweb}&
+Class representing folded full symmetry dense tensor,
+for instance $\left[g_{y^3}\right]$
+\cr
+\defloc{|@<|FGSTensor| class declaration@>|}{gs\_tensor.hweb}&
+Class representing folded general symmetry dense tensor,
+for instance $\left[g_{y^2u^3}\right]$
+\cr
+\defloc{|@<|UFSTensor| class declaration@>|}{fs\_tensor.hweb}&
+Class representing unfolded full symmetry dense tensor,
+for instance $\left[g_{y^3}\right]$
+\cr
+\defloc{|@<|UGSTensor| class declaration@>|}{gs\_tensor.hweb}&
+Class representing unfolded general symmetry dense tensor,
+for instance $\left[g_{y^2u^3}\right]$
+\cr
+|@<|URTensor| class declaration@>|\hfill\break
+\defloc{|@<|FRTensor| class declaration@>|}{rfs\_tensor.hweb}&
+Class representing unfolded/folded full symmetry, row-orient\-ed,
+dense tensor. Row-oriented tensors are used in the Faa Di Bruno
+above as some part (few or one column) of a product of $g$'s. Their
+fold/unfold conversions are special in such a way, that they must
+yield equivalent results if multiplied with folded/unfolded
+column-oriented counterparts.
+\cr
+|@<|URSingleTensor| class declaration@>|\hfill\break
+\defloc{|@<|FRSingleTensor| class declaration@>|}{rfs\_tensor.hweb}&
+Class representing unfolded/folded full symmetry, row-orient\-ed,
+single column, dense tensor. Besides use in the Faa Di Bruno, the
+single column row oriented tensor models also higher moments of normal
+distribution.
+\cr
+\defloc{|@<|UPSTensor| class declaration@>|}{ps\_tensor.hweb}&
+Class representing unfolded, column-orient\-ed tensor whose symmetry
+is not that of the $\left[B_{y^2u^3}\right]$ but rather of something
+as $\left[B_{yuuyu}\right]$. This tensor evolves during the product
+operation for unfolded tensors and its basic operation is to add
+itself to a tensor with nicer symmetry, here $\left[B_{y^2u^3}\right]$.
+\cr
+\defloc{|@<|FPSTensor| class declaration@>|}{ps\_tensor.hweb}&
+Class representing partially folded, column-orient\-ed tensor who\-se
+symmetry is not that of the $\left[B_{y^3u^4}\right]$ but rather
+something as $\left[B_{yu\vert y^3u\vert u^4}\right]$, where the
+portions of symmetries represent folded dimensions which are combined
+in unfolded manner. This tensor evolves during the Faa Di Bruno
+for folded tensors and its basic operation is to add itself to a
+tensor with nicer symmetry, here folded $\left[B_{y^3u^4}\right]$.
+\cr
+\defloc{|@<|USubTensor| class declaration@>|}{pyramid\_prod.hweb}&
+Class representing unfolded full symmetry, row-orient\-ed tensor which
+contains a few columns of huge product
+$\prod_{m=1}^l\left[g_{c_m}\right]^{\gamma_m}_{c_m(\alpha)}$. This is
+needed during the Faa Di Bruno for folded matrices.
+\cr
+\defloc{|@<|IrregTensor| class declaration@>|}{pyramid2\_prod.hweb}&
+Class representing a product of columns of derivatives
+$\left[z_{y^ku^l}\right]$, where $z=[y^T,v^T,w^T]^T$. Since the first
+part of $z$ is $y$, the derivatives contain many zeros, which are not
+stored, hence the tensor's irregularity. The tensor is used when
+calculating one step of Faa Di Bruno formula, i.e.
+$\left[f_{z^l}\right]\sum\prod_{m=1}^l\left[z_{c_m}\right]^{\gamma_m}_{c_m(\alpha)}$.
+\cr
+\defloc{|@<|FSSparseTensor| class declaration@>|}{sparse\_tensor.hweb}&
+Class representing full symmetry, column-oriented, sparse tensor. It
+is able to store elements keyed by the multidimensional index, and
+multiply itself with one column of row-oriented tensor.
+\cr
+\defloc{|@<|FGSContainer| class declaration@>|}{t\_container.hweb}&
+Container of |FGSTensor|s. It implements the Faa Di Bruno with
+unfolded or folded tensor $h$ yielding folded $B$. The methods are
+|FGSContainer::multAndAdd|.
+\cr
+\defloc{|@<|UGSContainer| class declaration@>|}{t\_container.hweb}&
+Container of |FGSTensor|s. It implements the Faa Di Bruno with
+unfolded tensor $h$ yielding unfolded $B$. The method is
+|UGSContainer::multAndAdd|.
+\cr
+\defloc{|@<|StackContainerInterface| class declaration@>|}
+{stack\_container.hweb}&Virtual pure interface describing all logic
+of stacked containers for which we will do the Faa Di Bruno operation. 
+\cr
+\defloc{|@<|UnfoldedStackContainer| class declaration@>|}
+{stack\_container.hweb}&Implements the Faa Di Bruno operation for stack of
+containers of unfolded tensors.
+\cr
+\defloc{|@<|FoldedStackContainer| class declaration@>|}{stack\_container.hweb}
+&Implements the Faa Di Bruno for stack of
+containers of fold\-ed tensors.
+\cr
+\defloc{|@<|ZContainer| class declaration@>|}{stack\_container.hweb}&
+The class implements the interface |StackContainerInterface| according
+to $z$ appearing in context of SDGE models. By a simple inheritance,
+we obtain |@<|UnfoldedZContainer| class declaration@>| and also
+|@<|FoldedZContainer| class declaration@>|.
+\cr
+\defloc{|@<|GContainer| class declaration@>|}{stack\_container.hweb}&
+The class implements the interface |StackContainerInterface| according
+to $G$ appearing in context of SDGE models. By a simple inheritance,
+we obtain |@<|UnfoldedGContainer| class declaration@>| and also
+|@<|FoldedGContainer| class declaration@>|.
+\cr
+\defloc{|@<|Equivalence| class declaration@>|}{equivalence.hweb}&
+The class represents an equivalence on $n$-element set. Useful in the
+Faa Di Bruno.
+\cr
+\defloc{|@<|EquivalenceSet| class declaration@>|}{equivalence.hweb}&
+The class representing all equivalences on $n$-element set. Useful in the
+Faa Di Bruno.
+\cr
+\defloc{|@<|Symmetry| class declaration@>|}{symmetry.hweb}&
+The class defines a symmetry of general symmetry tensor. This is it
+defines a basic shape of the tensor. For $\left[B_{y^2u^3}\right]$,
+the symmetry is $y^2u^3$.
+\cr
+\defloc{|@<|Permutation| class declaration@>|}{permutation.hweb}&
+The class represents a permutation of $n$ indices. Useful in the
+Faa Di Bruno.
+\cr
+\defloc{|@<|IntSequence| class declaration@>|}{int\_sequence.hweb}&
+The class represents a sequence of integers. Useful everywhere.
+\cr
+|@<|TwoDMatrix| class declaration@>|\hfill\break
+\defloc{|@<|ConstTwoDMatrix| class declaration@>|}{twod\_matrix.hweb}&
+The class provides an interface to a code handling two-di\-men\-si\-onal
+matrices. The code resides in Sylvester module, in directory {\tt
+sylv/cc}. The object files from that directory need to be linked: {\tt
+GeneralMatrix.o}, {\tt Vector.o} and {\tt SylvException.o}. There is
+no similar interface to |Vector| and |ConstVector| classes from the
+Sylvester module and they are used directly.
+\cr
+\defloc{|@<|KronProdAll| class declaration@>|}{kron\_prod.hweb}&
+The class represents a Kronecker product of a sequence of arbitrary
+matrices and is able to multiply a matrix from the right without
+storing the Kronecker product in memory.
+\cr
+\defloc{|@<|KronProdAllOptim| class declaration@>|}{kron\_prod.hweb}&
+The same as |KronProdAll| but it optimizes the order of matrices in
+the product to minimize the used memory during the Faa Di Bruno
+operation. Note that it is close to optimal flops.
+\cr
+|@<|FTensorPolynomial| class declaration@>|\hfill\break
+\defloc{|@<|UTensorPolynomial| class declaration@>|}{t\_polynomial.hweb}&
+Abstractions representing a polynomial whose coefficients are
+folded/unfolded tensors and variable is a column vector. The classes
+provide methods for traditional and horner-like polynomial
+evaluation. This is useful in simulation code.
+\cr
+|@<|FNormalMoments| class declaration@>|\hfill\break
+\defloc{|@<|UNormalMoments| class declaration@>|}{normal\_moments.hweb}&
+These are containers for folded/unfolded single column tensors for
+higher moments of normal distribution. The code contains an algorithm
+for generating the moments for arbitrary covariance matrix.
+\cr
+\defloc{|@<|TLStatic| class declaration@>|}{tl\_static.hweb}&
+The class encapsulates all static information needed for the
+library. It includes a Pascal triangle (for quick computation of
+binomial coefficients), and precalculated equivalence sets.
+\cr
+\defloc{|@<|TLException| class definition@>|}{tl\_exception.hweb}&
+Simple class thrown as an exception.
+\cr
+}
+
+@s Tensor int
+@s FTensor int
+@s UTensor int
+@s FFSTensor int
+@s UFSTensor int
+@s FGSTensor int
+@s UGSTensor int
+@s FRTensor int
+@s URTensor int
+@s FRSingleTensor int
+@s URSingleTensor int
+@s UPSTensor int
+@s UGSContainer int
+@s ZContainer int
+@s GContainer int
+@s StackContainerInterface int
+@s FoldedStackContainer int
+@s UnfoldedStackContainer int
+@s FoldedZContainer int
+@s UnfoldedZContainer int
+@s FoldedGContainer int
+@s UnfoldedGContainer int
+@s Permutation int
+@s KronProdAll int
+@s KronProdAllOptim int
+@s FTensorPolynomial int
+@s UTensorPolynomial int
+@s FNormalMoments int
+@s UNormalMoments int
+@s TLStatic int
+@s FSSparseTensor int
+@ The tensor library is multi-threaded. This means, if appropriate
+compilation options were set, some codes are launched
+concurrently. This boosts the performance on SMP machines or single
+processors with hyper-threading support. The basic property of the
+thread implementation in the library is that we do not allow running
+more concurrent threads than the preset limit. This prevents threads
+from competing for memory in such a way that the OS constantly switches
+among threads with frequent I/O for swaps. This may occur since one
+thread might need much own memory. The threading support allows for
+detached threads, the synchronization points during the Faa Di Bruno
+operation are relatively short, so the resulting load is close to the
+preset maximum number parallel threads.
+
+@ A few words to the library's test suite. The suite resides in
+directory {\tt tl/testing}. There is a file {\tt tests.cpp} which
+contains all tests and {\tt main()} function. Also there are files
+{\tt factory.h} and {\tt factory.cpp} implementing random generation
+of various objects. The important property of these random objects is
+that they are the same for all object's invocations. This is very
+important in testing and debugging. Further, one can find files {\tt
+monoms.h} and {\tt monoms.cpp}. See below for their explanation.
+
+There are a few types of tests:
+\orderedlist
+\li We test for tensor indices. We go through various tensors with
+various symmetries, convert indices from folded to unfolded and
+vice-versa. We test whether their coordinates are as expected.
+\li We test the Faa Di Bruno by comparison of the results of
+|FGSContainer::multAndAdd| against the results of |UGSContainer::multAndAdd|. The two
+ implementations are pretty different, so this is a good test.
+\li We use a code in {\tt monoms.h} and {\tt monoms.cpp} to generate a
+ random vector function $f(x(y,u))$ along with derivatives of
+ $\left[f_x\right]$, $\left[x_{y^ku^l}\right]$, and
+ $\left[f_{y^ku^l}\right]$. Then we calculate the resulting derivatives
+ $\left[f_{y^ku^l}\right]$ using |multAndAdd| method of |UGSContainer|
+ or |FGSContainer| and compare the derivatives provided by {\tt
+ monoms}. The functions generated in {\tt monoms} are monomials with
+ integer exponents, so the implementation of {\tt monoms} is quite
+ easy.
+\li We do a similar thing for sparse tensors. In this case the {\tt monoms}
+ generate a function $f(y,v(y,u),w(y,u))$, provide all the derivatives
+ and the result $\left[f_{y^ku^l}\right]$. Then we calculate the
+ derivatives with |multAndAdd| of |ZContainer| and compare.
+\li We test the polynomial evaluation by evaluating a folded and
+ unfolded polynomial in traditional and horner-like fashion. This gives
+ four methods in total. The four results are compared.
+\endorderedlist
+
+
+@*1 Utilities.
+@i sthread.hweb
+@i sthread.cweb
+@i tl_exception.hweb
+@i int_sequence.hweb
+@i int_sequence.cweb
+@i twod_matrix.hweb
+@i twod_matrix.cweb
+@i kron_prod.hweb
+@i kron_prod.cweb
+
+@*1 Combinatorics.
+@i symmetry.hweb
+@i symmetry.cweb
+@i equivalence.hweb
+@i equivalence.cweb
+@i permutation.hweb
+@i permutation.cweb
+
+@*1 Tensors.
+@i tensor.hweb
+@i tensor.cweb
+@i fs_tensor.hweb
+@i fs_tensor.cweb
+@i gs_tensor.hweb
+@i gs_tensor.cweb
+@i rfs_tensor.hweb
+@i rfs_tensor.cweb
+@i ps_tensor.hweb
+@i ps_tensor.cweb
+@i sparse_tensor.hweb
+@i sparse_tensor.cweb
+
+@*1 The Faa Di Bruno formula.
+@i t_container.hweb
+@i t_container.cweb
+@i stack_container.hweb
+@i stack_container.cweb
+@i fine_container.hweb
+@i fine_container.cweb
+@i pyramid_prod.hweb
+@i pyramid_prod.cweb
+@i pyramid_prod2.hweb
+@i pyramid_prod2.cweb
+
+@*1 Miscellany.
+@i t_polynomial.hweb
+@i t_polynomial.cweb
+@i normal_moments.hweb
+@i normal_moments.cweb
+@i tl_static.hweb
+@i tl_static.cweb
+
+@*1 Index.
\ No newline at end of file
diff --git a/dynare++/tl/cc/normal_moments.cweb b/dynare++/tl/cc/normal_moments.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..6a7c81cf4b2e26ae5bd76e654911f114c6ee17c1
--- /dev/null
+++ b/dynare++/tl/cc/normal_moments.cweb
@@ -0,0 +1,115 @@
+@q $Id: normal_moments.cweb 281 2005-06-13 09:41:16Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt normal\_moments.cpp} file.
+
+@c
+#include "normal_moments.h"
+#include "permutation.h"
+#include "kron_prod.h"
+#include "tl_static.h"
+
+@<|UNormalMoments| constructor code@>;
+@<|UNormalMoments::generateMoments| code@>;
+@<|UNormalMoments::selectEquiv| code@>;
+@<|FNormalMoments| constructor code@>;
+
+@ 
+@<|UNormalMoments| constructor code@>=
+UNormalMoments::UNormalMoments(int maxdim, const TwoDMatrix& v)
+	: TensorContainer<URSingleTensor>(1)
+{
+	if (maxdim >= 2)
+	   	generateMoments(maxdim, v);
+}
+
+
+@ Here we fill up the container with the tensors for $d=2,4,6,\ldots$
+up to the given dimension. Each tensor of moments is equal to
+$F_n\left(\otimes^nv\right).$ This has a dimension equal to
+$2n$. See the header file for proof and details.
+
+Here we sequentially construct the Kronecker power
+$\otimes^nv$, and apply $F_n$. 
+
+@<|UNormalMoments::generateMoments| code@>=
+void UNormalMoments::generateMoments(int maxdim, const TwoDMatrix& v)
+{
+	TL_RAISE_IF(v.nrows() != v.ncols(),
+				"Variance-covariance matrix is not square in UNormalMoments constructor");
+
+	int nv = v.nrows();
+	URSingleTensor* mom2 = new URSingleTensor(nv, 2);
+	mom2->getData() = v.getData();
+	insert(mom2);
+	URSingleTensor* kronv = new URSingleTensor(nv, 2);
+	kronv->getData() = v.getData();
+	for (int d = 4; d <= maxdim; d+=2) {
+		URSingleTensor* newkronv = new URSingleTensor(nv, d);
+		KronProd::kronMult(ConstVector(v.getData()),
+						   ConstVector(kronv->getData()),
+						   newkronv->getData());
+		delete kronv;
+		kronv = newkronv;
+		URSingleTensor* mom = new URSingleTensor(nv, d);
+		@<apply $F_n$ to |kronv|@>;
+		insert(mom);
+	}
+	delete kronv;
+}
+
+@ Here we go through all equivalences, select only those having 2
+elements in each class, then go through all elements in |kronv| and
+add to permuted location of |mom|.
+
+The permutation must be taken as inverse of the permutation implied by
+the equivalence, since we need a permutation which after application
+to identity of indices yileds indices in the equivalence classes. Note
+how the |Equivalence::apply| method works.
+
+@<apply $F_n$ to |kronv|@>=
+	mom->zeros();
+	const EquivalenceSet eset = ebundle.get(d);
+	for (EquivalenceSet::const_iterator cit = eset.begin();
+		 cit != eset.end(); cit++) { 
+		if (selectEquiv(*cit)) {
+			Permutation per(*cit);
+			per.inverse();
+			for (Tensor::index it = kronv->begin(); it != kronv->end(); ++it) {
+				IntSequence ind(kronv->dimen());
+				per.apply(it.getCoor(), ind);
+				Tensor::index it2(mom, ind);
+				mom->get(*it2, 0) += kronv->get(*it, 0);
+			}
+		}
+	}
+
+@ We return |true| for an equivalence whose each class has 2 elements.
+@<|UNormalMoments::selectEquiv| code@>=
+bool UNormalMoments::selectEquiv(const Equivalence& e)
+{
+	if (2*e.numClasses() != e.getN())
+		return false;
+	for (Equivalence::const_seqit si = e.begin();
+		 si != e.end(); ++si) {
+		if ((*si).length() != 2)
+			return false;
+	}
+	return true;
+}
+
+@ Here we go through all the unfolded container, fold each tensor and
+insert it.
+@<|FNormalMoments| constructor code@>=
+FNormalMoments::FNormalMoments(const UNormalMoments& moms)
+	: TensorContainer<FRSingleTensor>(1)
+{
+	for (UNormalMoments::const_iterator it = moms.begin();
+		 it != moms.end(); ++it) {
+		FRSingleTensor* fm = new FRSingleTensor(*((*it).second));
+		insert(fm);
+	}
+}
+
+
+@ End of {\tt normal\_moments.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/normal_moments.hweb b/dynare++/tl/cc/normal_moments.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..8f23a6bf75371948a268b31d1d53240396c35eed
--- /dev/null
+++ b/dynare++/tl/cc/normal_moments.hweb
@@ -0,0 +1,139 @@
+@q $Id: normal_moments.hweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Moments of normal distribution. Start of {\tt normal\_moments.h} file.
+
+Here we calculate the higher order moments of normally distributed
+random vector $u$ with means equal to zero and given
+variance--covariance matrix $V$, this is $u\sim N(0,V)$. The moment
+generating function for such distribution is $f(t)=e^{{1\over 2}t^TVt}$. If
+we derivate it wrt $t$ and unfold the higher dimensional tensors
+row-wise, we obtain terms like
+$$\eqalign{
+{\partial\over\partial t}f(t)=&f(t)\cdot Vt\cr
+{\partial^2\over\partial t^2}f(t)=&f(t)\cdot(Vt\otimes Vt+v)\cr
+{\partial^3\over\partial t^3}f(t)=&f(t)\cdot
+  (Vt\otimes Vt\otimes Vt+P_?(v\otimes Vt)+P_?(Vt\otimes v)+v\otimes Vt)\cr
+{\partial^4\over\partial t^4}f(t)=&f(t)\cdot
+  (Vt\otimes Vt\otimes Vt\otimes Vt+S_?(v\otimes Vt\otimes Vt)+
+   S_?(Vt\otimes v\otimes Vt)+S_?(Vt\otimes Vt\otimes v)+S_?(v\otimes v))}
+$$
+where $v$ is vectorized $V$ ($v=\hbox{vec}(V)$), and $P_?$ is a
+suitable row permutation (corresponds to permutation of
+multidimensional indices) which permutes the tensor data, so that the
+index of a variable being derived would be the last. This ensures that
+all (permuted) tensors can be summed yielding a tensor whose indices
+have some order (in here we chose the order that more recent
+derivating variables are to the right). Finally, $S_?$ is a suitable
+sum of various $P_?$.
+
+We are interested in $S_?$ multiplying the Kronecker powers
+$\otimes^nv$. The $S_?$ is a (possibly) multi-set of permutations of
+even order. Note that we know a number of permutations in $S_?$. The
+above formulas for $F(t)$ derivatives are valid also for monomial
+$u$, and from literature we know that $2n$-th moment is ${(2n!)\over
+n!2^n}\sigma^2$. So there are ${(2n!)\over n!2^n}$ permutations in
+$S_?$.
+
+In order to find the $S_?$ we need to define a couple of
+things. First we define a sort of equivalence between the permutations
+applicable to even number of indices. We write $P_1\equiv P_2$
+whenever $P_1^{-1}\circ P_2$ permutes only whole pairs, or items
+within pairs, but not indices across the pairs. For instance the
+permutations $(0,1,2,3)$ and $(3,2,0,1)$ are equivalent, but
+$(0,2,1,3)$ is not equivalent with the two. Clearly, the $\equiv$ is
+an equivalence.
+
+This allows to define a relation $\sqsubseteq$ between the permutation
+multi-sets $S$, which is basically the subset relation $\subseteq$ but
+with respect to the equivalence $\equiv$, more formally:
+$$S_1\sqsubseteq S_2\quad\hbox{iff}\quad P\in S_1
+\Rightarrow\exists Q\in S_2:P\equiv Q$$
+This induces an equivalence $S_1\equiv S_2$.
+
+Now let $F_n$ denote a set of permutations on $2n$ indices which is
+maximal with respect to $\sqsubseteq$, and minimal with respect to
+$\equiv$. (In other words, it contains everything up to the
+equivalence $\equiv$.) It is straightforward to calculate a number of
+permutations in $F_n$. This is a total number of all permutations of
+$2n$ divided by permutations of pairs divided by permutations within
+the pairs. This is ${(2n!)\over n!2^n}$.
+
+We prove that $S_?\equiv F_n$. Clearly $S_?\sqsubseteq F_n$, since
+$F_n$ is maximal. In order to prove that $F_n\sqsubseteq S_?$, let us
+assert that for any permutation $P$ and for any (semi)positive
+definite matrix $V$ we have $PS_?\otimes^nv=S_?\otimes^nv$. Below we
+show that there is a positive definite matrix $V$ of some dimension
+that for any two permutation multi-sets $S_1$, $S_2$, we have
+$$S_1\not\equiv S_2\Rightarrow S_1(\otimes^nv)\neq S_2(\otimes^nv)$$
+So it follows that for any permutation $P$, we have $PS_?\equiv
+S_?$. For a purpose of contradiction let $P\in F_n$ be a permutation
+which is not equivalent to any permutation from $S_?$. Since $S_?$ is
+non-empty, let us pick $P_0\in S_?$. Now assert that
+$P_0^{-1}S_?\not\equiv P^{-1}S_?$ since the first contains an identity
+and the second does not contain a permutation equivalent to
+identity. Thus we have $(P\circ P_0^{-1})S_?\not\equiv S_?$ which
+gives the contradiction and we have proved that $F_n\sqsubseteq
+S_?$. Thus $F_n\equiv S_?$. Moreover, we know that $S_?$ and $F_n$
+have the same number of permutations, hence the minimality of $S_?$
+with respect to $\equiv$.
+
+Now it suffices to prove that there exists a positive definite $V$
+such that for any two permutation multi-sets $S_1$, and $S_2$ holds
+$S_1\not\equiv S_2\Rightarrow S_1(\otimes^nv)\neq S_2(\otimes^nv)$. If
+$V$ is $n\times n$ matrix, then $S_1\not\equiv S_2$ implies that there
+is identically nonzero polynomial of elements from $V$ of order $n$
+over integers. If $V=A^TA$ then there is identically non-zero
+polynomial of elements from $A$ of order $2n$. This means, that we
+have to find $n(n+1)/2$ tuple $x$ of real numbers such that all
+identically non-zero polynomials $p$ of order $2n$ over integers yield
+$p(x)\neq 0$.
+
+The $x$ is constructed as follows: $x_i = \pi^{\log{r_i}}$, where $r_i$
+is $i$-th prime. Let us consider monom $x_1^{j_1}\cdot\ldots\cdot
+x_k^{j_k}$. When the monom is evaluated, we get
+$$\pi^{\log{r_1^{j_1}}+\ldots+\log{r_k^{j_k}}}=
+\pi^{\log{\left(r_1^{j_1}\cdot\ldots\cdot r_k^{j_k}\right)}}$$
+Now it is easy to see that if an integer combination of such terms is
+zero, then the combination must be either trivial or sum to $0$ and
+all monoms must be equal. Both cases imply a polynomial identically
+equal to zero. So, any non-trivial integer polynomial evaluated at $x$
+must be non-zero.
+
+So, having this result in hand, now it is straightforward to calculate
+higher moments of normal distribution. Here we define a container,
+which does the job. In its constructor, we simply calculate Kronecker
+powers of $v$ and apply $F_n$ to $\otimes^nv$. $F_n$ is, in fact, a
+set of all equivalences in sense of class |Equivalence| over $2n$
+elements, having $n$ classes each of them having exactly 2 elements.
+
+@c
+#ifndef NORMAL_MOMENTS_H
+#define NORMAL_MOMENTS_H
+
+#include "t_container.h"
+
+@<|UNormalMoments| class declaration@>;
+@<|FNormalMoments| class declaration@>;
+
+#endif
+
+@ 
+@<|UNormalMoments| class declaration@>=
+class UNormalMoments : public TensorContainer<URSingleTensor> {
+public:@;
+	UNormalMoments(int maxdim, const TwoDMatrix& v);
+private:@;
+	void generateMoments(int maxdim, const TwoDMatrix& v);
+	static bool selectEquiv( const Equivalence& e);
+};
+
+@ 
+@<|FNormalMoments| class declaration@>=
+class FNormalMoments : public TensorContainer<FRSingleTensor> {
+public:@;
+	FNormalMoments(const UNormalMoments& moms);
+};
+
+
+@ End of {\tt normal\_moments.h} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/permutation.cweb b/dynare++/tl/cc/permutation.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..011c3a5437c7890d48b3e7ea1cfb7db815b5c5b3
--- /dev/null
+++ b/dynare++/tl/cc/permutation.cweb
@@ -0,0 +1,188 @@
+@q $Id: permutation.cweb 332 2005-07-15 13:41:48Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt permutation.cweb} file.
+@c
+
+#include "permutation.h"
+#include "tl_exception.h"
+
+@<|Permutation::apply| code@>;
+@<|Permutation::inverse| code@>;
+@<|Permutation::tailIdentity| code@>;
+@<|Permutation::computeSortingMap| code@>;
+@<|PermutationSet| constructor code 1@>;
+@<|PermutationSet| constructor code 2@>;
+@<|PermutationSet| destructor code@>;
+@<|PermutationSet::getPreserving| code@>;
+@<|PermutationBundle| constructor code@>;
+@<|PermutationBundle| destructor code@>;
+@<|PermutationBundle::get| code@>;
+@<|PermutationBundle::generateUpTo| code@>;
+
+
+@ This is easy, we simply apply the map in the fashion $s\circ m$..
+@<|Permutation::apply| code@>=
+void Permutation::apply(const IntSequence& src, IntSequence& tar) const
+{
+	TL_RAISE_IF(src.size() != permap.size() || tar.size() != permap.size(),
+				"Wrong sizes of input or output in Permutation::apply");
+	for (int i = 0; i < permap.size(); i++)
+		tar[i] = src[permap[i]];
+}
+
+
+void Permutation::apply(IntSequence& tar) const
+{
+	IntSequence tmp(tar);
+	apply(tmp, tar);
+}
+
+@ 
+@<|Permutation::inverse| code@>=
+void Permutation::inverse()
+{
+	IntSequence former(permap);
+	for (int i = 0; i < size(); i++)
+		permap[former[i]] = i;
+}
+
+
+@ Here we find a number of trailing indices which are identical with
+the permutation.
+
+@<|Permutation::tailIdentity| code@>=
+int Permutation::tailIdentity() const
+{
+	int i = permap.size();
+	while (i > 0 && permap[i-1] == i-1)
+		i--;
+	return permap.size() - i;
+}
+
+@ This calculates a map which corresponds to sorting in the following
+sense: $(\hbox{sorted }s)\circ m = s$, where $s$ is a given sequence.
+
+We go through |s| and find an the same item in sorted |s|. We
+construct the |permap| from the found pair of indices. We have to be
+careful, to not assign to two positions in |s| the same position in
+sorted |s|, so we maintain a bitmap |flag|, in which we remember
+indices from the sorted |s| already assigned.
+
+@<|Permutation::computeSortingMap| code@>=
+void Permutation::computeSortingMap(const IntSequence& s)
+{
+	IntSequence srt(s);
+	srt.sort();
+	IntSequence flags(s.size(),0);
+
+	for (int i = 0; i < s.size(); i++) {
+		int j = 0;
+		while (j < s.size() && (flags[j] || srt[j] != s[i]))
+			j++;
+		TL_RAISE_IF(j == s.size(),
+					"Internal algorithm error in Permutation::computeSortingMap");
+		flags[j] = 1;
+		permap[i] = j;
+	}
+}
+
+@ 
+@<|PermutationSet| constructor code 1@>=
+PermutationSet::PermutationSet()
+	: order(1), size(1), pers(new const Permutation*[size])
+{
+	pers[0] = new Permutation(1);
+}
+
+@ 
+@<|PermutationSet| constructor code 2@>=
+PermutationSet::PermutationSet(const PermutationSet& sp, int n)
+	: order(n), size(n*sp.size),
+	  pers(new const Permutation*[size])
+{
+	for (int i = 0; i < size; i++)
+		pers[i] = NULL;
+
+	TL_RAISE_IF(n != sp.order+1,
+				"Wrong new order in PermutationSet constructor");
+
+	int k = 0;
+	for (int i = 0; i < sp.size; i++) {
+		for (int j = 0;	 j < order; j++,k++) {
+			pers[k] = new Permutation(*(sp.pers[i]), j);
+		}
+	}
+}
+
+@ 
+@<|PermutationSet| destructor code@>=
+PermutationSet::~PermutationSet()
+{
+	for (int i = 0; i < size; i++)
+		if (pers[i])
+			delete pers[i];
+	delete [] pers;
+}
+
+@ 
+@<|PermutationSet::getPreserving| code@>=
+vector<const Permutation*> PermutationSet::getPreserving(const IntSequence& s) const
+{
+	TL_RAISE_IF(s.size() != order,
+				"Wrong sequence length in PermutationSet::getPreserving");
+
+	vector<const Permutation*> res;
+	IntSequence tmp(s.size());
+	for (int i = 0; i < size; i++) {
+		pers[i]->apply(s, tmp);
+		if (s == tmp) {
+			res.push_back(pers[i]);
+		}
+	}
+
+	return res;
+}
+
+@ 
+@<|PermutationBundle| constructor code@>=
+PermutationBundle::PermutationBundle(int nmax)
+{
+	nmax = max(nmax, 1);
+	generateUpTo(nmax);
+}
+
+@ 
+@<|PermutationBundle| destructor code@>=
+PermutationBundle::~PermutationBundle()
+{
+	for (unsigned int i = 0; i < bundle.size(); i++)
+		delete bundle[i];
+}
+
+@ 
+@<|PermutationBundle::get| code@>=
+const PermutationSet& PermutationBundle::get(int n) const
+{
+	if (n > (int)(bundle.size()) || n < 1) {
+		TL_RAISE("Permutation set not found in PermutationSet::get");
+		return *(bundle[0]);
+	} else {
+		return *(bundle[n-1]);
+	}
+}
+
+@ 
+@<|PermutationBundle::generateUpTo| code@>=
+void PermutationBundle::generateUpTo(int nmax)
+{
+	if (bundle.size() == 0)
+		bundle.push_back(new PermutationSet());
+
+	int curmax = bundle.size();
+	for (int n = curmax+1; n <= nmax; n++) {
+		bundle.push_back(new PermutationSet(*(bundle.back()), n));
+	}
+}
+
+@ End of {\tt permutation.cweb} file.
diff --git a/dynare++/tl/cc/permutation.hweb b/dynare++/tl/cc/permutation.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..b084ffec26afa050546c61bfa45d3ebb2d952270
--- /dev/null
+++ b/dynare++/tl/cc/permutation.hweb
@@ -0,0 +1,147 @@
+@q $Id: permutation.hweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Permutations. Start of {\tt permutation.h} file.
+
+The permutation class is useful when describing a permutation of
+indices in permuted symmetry tensor. This tensor comes to existence,
+for instance, as a result of the following tensor multiplication:
+$$\left[g_{y^3}\right]_{\gamma_1\gamma_2\gamma_3}
+\left[g_{yu}\right]^{\gamma_1}_{\alpha_1\beta_3}
+\left[g_{yu}\right]^{\gamma_2}_{\alpha_2\beta_1}
+\left[g_u\right]^{\gamma_3}_{\beta_2}
+$$
+If this operation is done by a Kronecker product of unfolded tensors,
+the resulting tensor has permuted indices. So, in this case the
+permutation is implied by the equivalence:
+$\{\{0,4\},\{1,3\},\{2\}\}$. This results in a permutation which maps
+indices $(0,1,2,3,4)\mapsto(0,2,4,3,1)$.
+
+The other application of |Permutation| class is to permute indices
+with the same permutation as done during sorting.
+
+Here we only define an abstraction for the permutation defined by an
+equivalence. Its basic operation is to apply the permutation to the
+integer sequence. The application is right (or inner), in sense that
+it works on indices of the sequence not items of the sequence. More
+formally $s\circ m \not=m\circ s$. In here, the application of the
+permutation defined by map $m$ is $s\circ m$.
+
+Also, we need |PermutationSet| class which contains all permutations
+of $n$ element set, and a bundle of permutations |PermutationBundle|
+which contains all permutation sets up to a given number.
+
+@s Permutation int
+@s PermutationSet int
+@s PermutationBundle int
+
+@c
+#ifndef PERMUTATION_H
+#define PERMUTATION_H
+
+#include "int_sequence.h"
+#include "equivalence.h"
+
+#include <vector>
+
+@<|Permutation| class declaration@>;
+@<|PermutationSet| class declaration@>;
+@<|PermutationBundle| class declaration@>;
+
+#endif
+
+@ The permutation object will have a map, which defines mapping of
+indices $(0,1,\ldots,n-1)\mapsto(m_0,m_1,\ldots, m_{n-1})$. The map is
+the sequence $(m_0,m_1,\ldots, m_{n-1}$. When the permutation with the
+map $m$ is applied on sequence $s$, it permutes its indices:
+$s\circ\hbox{id}\mapsto s\circ m$.
+
+So we have one constructor from equivalence, then a method |apply|,
+and finally a method |tailIdentity| which returns a number of trailing
+indices which yield identity. Also we have a constructor calculating
+map, which corresponds to permutation in sort. This is, we want
+$(\hbox{sorted }s)\circ m = s$.
+
+@<|Permutation| class declaration@>=
+class Permutation {
+protected:@;
+	IntSequence permap;
+public:@;
+	Permutation(int len)
+		: permap(len) {@+ for (int i = 0; i < len; i++) permap[i] = i;@+}
+	Permutation(const Equivalence& e)
+		: permap(e.getN()) {@+ e.trace(permap);@+}
+	Permutation(const Equivalence& e, const Permutation& per)
+		: permap(e.getN()) {@+ e.trace(permap, per);@+}
+	Permutation(const IntSequence& s)
+		: permap(s.size()) {@+ computeSortingMap(s);@+};
+	Permutation(const Permutation& p)
+		: permap(p.permap)@+ {}
+	Permutation(const Permutation& p1, const Permutation& p2)
+		: permap(p2.permap) {@+ p1.apply(permap);@+}
+	Permutation(const Permutation& p, int i)
+		: permap(p.size(), p.permap, i)@+ {}
+	const Permutation& operator=(const Permutation& p)
+		{@+ permap = p.permap;@+ return *this;@+}
+	bool operator==(const Permutation& p)
+		{@+ return permap == p.permap;@+}
+	int size() const
+		{@+ return permap.size();@+}
+	void print() const
+		{@+ permap.print();@+}
+	void apply(const IntSequence& src, IntSequence& tar) const;
+	void apply(IntSequence& tar) const;
+	void inverse();
+	int tailIdentity() const;
+	const IntSequence& getMap() const
+		{@+ return permap;@+}
+	IntSequence& getMap()
+		{@+ return permap;@+}
+protected:@;
+	void computeSortingMap(const IntSequence& s);
+};
+
+
+@ The |PermutationSet| maintains an array of of all permutations. The
+default constructor constructs one element permutation set of one
+element sets. The second constructor constructs a new permutation set
+over $n$ from all permutations over $n-1$. The parameter $n$ need not
+to be provided, but it serves to distinguish the constructor from copy
+constructor, which is not provided.
+
+The method |getPreserving| returns a factor subgroup of permutations,
+which are invariants with respect to the given sequence. This are all
+permutations $p$ yielding $p\circ s = s$, where $s$ is the given
+sequence.
+
+@<|PermutationSet| class declaration@>=
+class PermutationSet {
+	int order;
+	int size;
+	const Permutation** const pers;
+public:@;
+	PermutationSet();
+	PermutationSet(const PermutationSet& ps, int n);
+	~PermutationSet();
+	int getNum() const
+		{@+ return size;@+}
+	const Permutation& get(int i) const
+		{@+ return *(pers[i]);@+}
+	vector<const Permutation*> getPreserving(const IntSequence& s) const;
+};
+
+
+@ The permutation bundle encapsulates all permutations sets up to some
+given dimension.
+
+@<|PermutationBundle| class declaration@>=
+class PermutationBundle {
+	vector<PermutationSet*> bundle;
+public:@;
+	PermutationBundle(int nmax);
+	~PermutationBundle(); 
+	const PermutationSet& get(int n) const;
+	void generateUpTo(int nmax);
+};
+
+@ End of {\tt permutation.h} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/ps_tensor.cweb b/dynare++/tl/cc/ps_tensor.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..3857adf81bb76da7bb3688d34d632f612818c072
--- /dev/null
+++ b/dynare++/tl/cc/ps_tensor.cweb
@@ -0,0 +1,422 @@
+@q $Id: ps_tensor.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt ps\_tensor.cpp} file.
+@c
+#include "ps_tensor.h"
+#include "fs_tensor.h"
+#include "tl_exception.h"
+#include "tl_static.h"
+#include "stack_container.h"
+
+@<|UPSTensor::decideFillMethod| code@>;
+@<|UPSTensor| slicing constructor code@>;
+@<|UPSTensor| increment and decrement@>;
+@<|UPSTensor::fold| code@>;
+@<|UPSTensor::getOffset| code@>;
+@<|UPSTensor::addTo| folded code@>;
+@<|UPSTensor::addTo| unfolded code@>;
+@<|UPSTensor::tailIdentitySize| code@>;
+@<|UPSTensor::fillFromSparseOne| code@>;
+@<|UPSTensor::fillFromSparseTwo| code@>;
+@<|PerTensorDimens2::setDimensionSizes| code@>;
+@<|PerTensorDimens2::calcOffset| code@>;
+@<|PerTensorDimens2::print| code@>;
+@<|FPSTensor::increment| code@>;
+@<|FPSTensor::decrement| code@>;
+@<|FPSTensor::unfold| code@>;
+@<|FPSTensor::getOffset| code@>;
+@<|FPSTensor::addTo| code@>;
+@<|FPSTensor| sparse constructor@>;
+
+@ Here we decide, what method for filling a slice in slicing
+constructor to use. A few experiments suggest, that if the tensor is
+more than 8\% filled, the first method (|fillFromSparseOne|) is
+better. For fill factors less than 1\%, the second can be 3 times
+quicker.
+
+@<|UPSTensor::decideFillMethod| code@>=
+UPSTensor::fill_method UPSTensor::decideFillMethod(const FSSparseTensor& t)
+{
+	if (t.getFillFactor() > 0.08)
+		return first;
+	else
+		return second;
+}
+
+@ Here we make a slice. We decide what fill method to use and set it.
+ 
+@<|UPSTensor| slicing constructor code@>=
+UPSTensor::UPSTensor(const FSSparseTensor& t, const IntSequence& ss,
+					 const IntSequence& coor, const PerTensorDimens& ptd)
+	: UTensor(along_col, ptd.getNVX(),
+			  t.nrows(), ptd.calcUnfoldMaxOffset(), ptd.dimen()),
+	  tdims(ptd)
+{
+	TL_RAISE_IF(coor.size() != t.dimen(),
+				"Wrong coordinates length of stacks for UPSTensor slicing constructor");
+	TL_RAISE_IF(ss.sum() != t.nvar(),
+				"Wrong length of stacks for UPSTensor slicing constructor");
+
+	if (first == decideFillMethod(t))
+		fillFromSparseOne(t, ss, coor);
+	else
+		fillFromSparseTwo(t, ss, coor);
+}
+
+ 
+@ 
+@<|UPSTensor| increment and decrement@>=
+void UPSTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in UPSTensor::increment");
+
+	UTensor::increment(v, tdims.getNVX());
+}
+
+void UPSTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in UPSTensor::decrement");
+
+	UTensor::decrement(v, tdims.getNVX());
+}
+
+@ 
+@<|UPSTensor::fold| code@>=
+FTensor& UPSTensor::fold() const
+{
+	TL_RAISE("Never should come to this place in UPSTensor::fold");
+	FFSTensor* nothing = new FFSTensor(0,0,0);
+	return *nothing;
+}
+
+
+@ 
+@<|UPSTensor::getOffset| code@>=
+int UPSTensor::getOffset(const IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input vector size in UPSTensor::getOffset");
+
+	return UTensor::getOffset(v, tdims.getNVX());
+}
+
+@ 
+@<|UPSTensor::addTo| folded code@>=
+void UPSTensor::addTo(FGSTensor& out) const
+{
+	TL_RAISE_IF(out.getDims() != tdims,
+				"Tensors have incompatible dimens in UPSTensor::addTo");
+	for (index in = out.begin(); in != out.end(); ++in) {
+		IntSequence vtmp(dimen());
+		tdims.getPer().apply(in.getCoor(), vtmp);
+		index tin(this, vtmp);
+		out.addColumn(*this, *tin, *in);
+	}
+}
+
+@ In here, we have to add this permuted symmetry unfolded tensor to an
+unfolded not permuted tensor. One easy way would be to go through the
+target tensor, permute each index, and add the column.
+
+However, it may happen, that the permutation has some non-empty
+identity tail. In this case, we can add not only individual columns,
+but much bigger data chunks, which is usually more
+efficient. Therefore, the code is quite dirty, because we have not an
+iterator, which iterates over tensor at some higher levels. So we
+simulate it by the following code.
+
+First we set |cols| to the length of the data chunk and |off| to its
+dimension. Then we need a front part of |nvmax| of |out|, which is
+|nvmax_part|. Our iterator here is an integer sequence |outrun| with
+full length, and |outrun_part| its front part. The |outrun| is
+initialized to zeros. In each step we need to increment |outrun|
+|cols|-times, this is done by incrementing its prefix |outrun_part|.
+
+So we loop over all |cols|wide partitions of |out|, permute |outrun|
+to obtain |perrun| to obtain column of this matrix. (note that the
+trailing part of |perrun| is the same as of |outrun|. Then we
+construct submatrices, add them, and increment |outrun|.
+
+@<|UPSTensor::addTo| unfolded code@>=
+void UPSTensor::addTo(UGSTensor& out) const
+{
+	TL_RAISE_IF(out.getDims() != tdims,
+				"Tensors have incompatible dimens in UPSTensor::addTo");
+	int cols = tailIdentitySize();
+	int off = tdims.tailIdentity();
+	IntSequence outrun(out.dimen(), 0);
+	IntSequence outrun_part(outrun, 0, out.dimen()-off);
+	IntSequence nvmax_part(out.getDims().getNVX(), 0, out.dimen()-off);
+	for (int out_col = 0; out_col < out.ncols(); out_col+=cols) {
+		// permute |outrun|
+		IntSequence perrun(out.dimen());
+		tdims.getPer().apply(outrun, perrun);
+		index from(this, perrun);
+		// construct submatrices
+		ConstTwoDMatrix subfrom(*this, *from, cols);
+		TwoDMatrix subout(out, out_col, cols);
+		// add
+		subout.add(1, subfrom);
+		// increment |outrun| by cols
+		UTensor::increment(outrun_part, nvmax_part);
+	}
+}
+
+
+@ This returns a product of all items in |nvmax| which make up the
+trailing identity part.
+
+@<|UPSTensor::tailIdentitySize| code@>=
+int UPSTensor::tailIdentitySize() const
+{
+	return tdims.getNVX().mult(dimen()-tdims.tailIdentity(), dimen());
+}
+
+@ This fill method is pretty dumb. We go through all columns in |this|
+tensor, translate coordinates to sparse tensor, sort them and find an
+item in the sparse tensor. There are many not successful lookups for
+really sparse tensor, that is why the second method works better for
+really sparse tensors.
+ 
+@<|UPSTensor::fillFromSparseOne| code@>=
+void UPSTensor::fillFromSparseOne(const FSSparseTensor& t, const IntSequence& ss,
+								  const IntSequence& coor)
+{
+	IntSequence cumtmp(ss.size());
+	cumtmp[0] = 0;
+	for (int i = 1; i < ss.size(); i++)
+		cumtmp[i] = cumtmp[i-1] + ss[i-1];
+	IntSequence cum(coor.size());
+	for (int i = 0; i < coor.size(); i++)
+		cum[i] = cumtmp[coor[i]];
+
+ 	zeros();
+	for (Tensor::index run = begin(); run != end(); ++run) {
+		IntSequence c(run.getCoor());
+		c.add(1, cum);
+		c.sort();
+		FSSparseTensor::const_iterator sl = t.getMap().lower_bound(c);
+		if (sl != t.getMap().end()) {
+			FSSparseTensor::const_iterator su = t.getMap().upper_bound(c);
+			for (FSSparseTensor::const_iterator srun = sl; srun != su; ++srun)
+				get((*srun).second.first, *run) = (*srun).second.second;
+		}
+	}
+}
+
+@ This is the second way of filling the slice. For instance, let the
+slice correspond to partitions $abac$. In here we first calculate
+lower and upper bounds for index of the sparse tensor for the
+slice. These are |lb_srt| and |ub_srt| respectively. They corresponds
+to ordering $aabc$. Then we go through that interval, and select items
+which are really between the bounds.  Then we take the index, subtract
+the lower bound to get it to coordinates of the slice. We get
+something like $(i_a,j_a,k_b,l_c)$. Then we apply the inverse
+permutation as of the sorting form $abac\mapsto aabc$ to get index
+$(i_a,k_b,j_a,l_c)$. Recall that the slice is unfolded, so we have to
+apply all permutations preserving the stack coordinates $abac$. In our
+case we get list of indices $(i_a,k_b,j_a,l_c)$ and
+$(j_a,k_b,i_a,l_c)$. For all we copy the item of the sparse tensor to
+the appropriate column.
+ 
+@<|UPSTensor::fillFromSparseTwo| code@>=
+void UPSTensor::fillFromSparseTwo(const FSSparseTensor& t, const IntSequence& ss,
+								  const IntSequence& coor)
+{
+	IntSequence coor_srt(coor);
+	coor_srt.sort();
+	IntSequence cum(ss.size());
+	cum[0] = 0;
+	for (int i = 1; i < ss.size(); i++)
+		cum[i] = cum[i-1] + ss[i-1];
+	IntSequence lb_srt(coor.size());
+	IntSequence ub_srt(coor.size());
+	for (int i = 0; i < coor.size(); i++) {
+		lb_srt[i] = cum[coor_srt[i]];
+		ub_srt[i] = cum[coor_srt[i]] + ss[coor_srt[i]] - 1;
+	}
+
+	const PermutationSet& pset = tls.pbundle->get(coor.size());
+	vector<const Permutation*> pp = pset.getPreserving(coor);
+
+	Permutation unsort(coor);
+	zeros();
+	FSSparseTensor::const_iterator lbi = t.getMap().lower_bound(lb_srt);
+	FSSparseTensor::const_iterator ubi = t.getMap().upper_bound(ub_srt);
+	for (FSSparseTensor::const_iterator run = lbi; run != ubi; ++run) {
+		if (lb_srt.lessEq((*run).first) && (*run).first.lessEq(ub_srt)) {
+			IntSequence c((*run).first);
+			c.add(-1, lb_srt);
+			unsort.apply(c);
+			for (unsigned int i = 0; i < pp.size(); i++) {
+				IntSequence cp(coor.size());
+				pp[i]->apply(c, cp);
+				Tensor::index ind(this, cp);
+				TL_RAISE_IF(*ind < 0 || *ind >= ncols(),
+							"Internal error in slicing constructor of UPSTensor");
+				get((*run).second.first, *ind) = (*run).second.second;
+			}
+		}
+	}
+}
+
+
+@ Here we calculate the maximum offsets in each folded dimension
+(dimension sizes, hence |ds|).
+
+@<|PerTensorDimens2::setDimensionSizes| code@>=
+void PerTensorDimens2::setDimensionSizes()
+{
+	const IntSequence& nvs = getNVS();
+	for (int i = 0; i < numSyms(); i++) {
+		TensorDimens td(syms[i], nvs);
+		ds[i] = td.calcFoldMaxOffset();
+	}
+}
+
+@ If there are two folded dimensions, the offset in such a dimension
+is offset of the second plus offset of the first times the maximum
+offset of the second. If there are $n+1$ dimensions, the offset is a
+sum of offsets of the last dimension plus the offset in the first $n$
+dimensions multiplied by the maximum offset of the last
+dimension. This is exactly what the following code does.
+
+@<|PerTensorDimens2::calcOffset| code@>=
+int PerTensorDimens2::calcOffset(const IntSequence& coor) const
+{
+	TL_RAISE_IF(coor.size() != dimen(),
+				"Wrong length of coordinates in PerTensorDimens2::calcOffset");
+	IntSequence cc(coor);
+	int ret = 0;
+	int off = 0;
+	for (int i = 0; i < numSyms(); i++) {
+		TensorDimens td(syms[i], getNVS());
+		IntSequence c(cc, off, off+syms[i].dimen());
+		int a = td.calcFoldOffset(c);
+		ret = ret*ds[i] + a;
+		off += syms[i].dimen();
+	}
+	return ret;
+}
+
+@ 
+@<|PerTensorDimens2::print| code@>=
+void PerTensorDimens2::print() const
+{
+	printf("nvmax: "); nvmax.print();
+	printf("per:   "); per.print();
+	printf("syms:  "); syms.print();
+	printf("dims:  "); ds.print();
+}
+
+@ Here we increment the given integer sequence. It corresponds to
+|UTensor::increment| of the whole sequence, and then partial
+monotonizing of the subsequences with respect to the
+symmetries of each dimension.
+
+@<|FPSTensor::increment| code@>=
+void FPSTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong length of coordinates in FPSTensor::increment");
+	UTensor::increment(v, tdims.getNVX());
+	int off = 0;
+	for (int i = 0; i < tdims.numSyms(); i++) {
+		IntSequence c(v, off, off+tdims.getSym(i).dimen());
+		c.pmonotone(tdims.getSym(i));
+		off += tdims.getSym(i).dimen();
+	}
+}
+
+
+@ 
+@<|FPSTensor::decrement| code@>=
+void FPSTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE("FPSTensor::decrement not implemented");
+}
+
+@ 
+@<|FPSTensor::unfold| code@>=
+UTensor& FPSTensor::unfold() const
+{
+	TL_RAISE("Unfolding of FPSTensor not implemented");
+	UFSTensor* nothing = new UFSTensor(0,0,0);
+	return *nothing;
+}
+
+@ We only call |calcOffset| of the |PerTensorDimens2|.
+@<|FPSTensor::getOffset| code@>=
+int FPSTensor::getOffset(const IntSequence& v) const
+{
+	return tdims.calcOffset(v);
+}
+
+@ Here we add the tensor to |out|. We go through all columns of the
+|out|, apply the permutation to get index in the tensor, and add the
+column. Note that if the permutation is identity, then the dimensions
+of the tensors might not be the same (since this tensor is partially
+folded).
+
+@<|FPSTensor::addTo| code@>=
+void FPSTensor::addTo(FGSTensor& out) const
+{
+	for (index tar = out.begin(); tar != out.end(); ++tar) {
+		IntSequence coor(dimen());
+		tdims.getPer().apply(tar.getCoor(), coor);
+		index src(this, coor);
+		out.addColumn(*this, *src, *tar);
+	}
+}
+
+@ Here is the constructor which multiplies the Kronecker product with
+the general symmetry sparse tensor |GSSparseTensor|. The main idea is
+to go through items in the sparse tensor (each item selects rows in
+the matrices form the Kornecker product), then to Kronecker multiply
+the rows and multiply with the item, and to add the resulting row to
+the appropriate row of the resulting |FPSTensor|.
+ 
+The realization of this idea is a bit more complicated since we have
+to go through all items, and each item must be added as many times as
+it has its symmetric elements. Moreover, the permutations shuffle
+order of rows in their Kronecker product.
+
+So, we through all unfolded indices in a tensor with the same
+dimensions as the |GSSparseTensor| (sparse slice). For each such index
+we calculate its folded version (corresponds to ordering of
+subsequences within symmetries), we test if there is an item in the
+sparse slice with such coordinates, and if there is, we construct the
+Kronecker product of the rows, and go through all of items with the
+coordinates, and add to appropriate rows of |this| tensor.
+
+@<|FPSTensor| sparse constructor@>=
+FPSTensor::FPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p,
+					 const GSSparseTensor& a, const KronProdAll& kp)
+	: FTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(),
+			  a.nrows(), kp.ncols(), td.dimen()),
+	  tdims(td, e, p)
+{
+	zeros();
+
+	UGSTensor dummy(0, a.getDims());
+	for (Tensor::index run = dummy.begin(); run != dummy.end(); ++run) {
+		Tensor::index fold_ind = dummy.getFirstIndexOf(run);
+		const IntSequence& c = fold_ind.getCoor();
+		GSSparseTensor::const_iterator sl = a.getMap().lower_bound(c);
+		if (sl != a.getMap().end()) {
+			Vector* row_prod = kp.multRows(run.getCoor());
+			GSSparseTensor::const_iterator su = a.getMap().upper_bound(c);
+			for (GSSparseTensor::const_iterator srun = sl; srun != su; ++srun) {
+				Vector out_row((*srun).second.first, *this);
+				out_row.add((*srun).second.second, *row_prod);
+			}
+			delete row_prod;
+		}
+	}
+}
+
+
+@ End of {\tt ps\_tensor.cpp} file.
diff --git a/dynare++/tl/cc/ps_tensor.hweb b/dynare++/tl/cc/ps_tensor.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..28b550f1ce9f91b230b3efca3db66bda2f3261ea
--- /dev/null
+++ b/dynare++/tl/cc/ps_tensor.hweb
@@ -0,0 +1,351 @@
+@q $Id: ps_tensor.hweb 741 2006-05-09 11:12:46Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Even more general symmetry tensor. Start of {\tt ps\_tensor.h} file.
+
+Here we define an abstraction for a tensor, which has a general
+symmetry, but the symmetry is not of what is modelled by
+|Symmetry|. This kind of tensor comes to existence when we evaluate
+something like:
+$$\left[B_{y^2u^3}\right]_{\alpha_1\alpha_2\beta_1\beta_2\beta_3}=
+\cdots+\left[g_{y^3}\right]_{\gamma_1\gamma_2\gamma_3}
+\left[g_{yu}\right]^{\gamma_1}_{\alpha_1\beta_3}
+\left[g_{yu}\right]^{\gamma_2}_{\alpha_2\beta_1}
+\left[g_u\right]^{\gamma_3}_{\beta_2}+\cdots
+$$ 
+If the tensors are unfolded, we obtain a tensor
+$$g_{y^3}\cdot\left(g_{yu}\otimes g_{yu}\otimes g_{u}\right)$$
+
+Obviously, this tensor can have a symmetry not compatible with
+ordering $\alpha_1\alpha_2\beta_1\beta_2\beta_3$, (in other words, not
+compatible with symmetry $y^2u^3$). In fact, the indices are permuted.
+
+This kind of tensor must be added to $\left[B_{y^2u^3}\right]$. Its
+dimensions are the same as of $\left[B_{y^2u^3}\right]$, but some
+coordinates are permuted. The addition is the only action we need to
+do with the tensor.
+
+Another application where this permuted symmetry tensor appears is a
+slice of a fully symmetric tensor. If the symmetric dimension of the
+tensor is partitioned to continuous parts, and we are interested only
+in data with a given symmetry (permuted) of the partitions, then we
+have the permuted symmetry tensor. For instance, if $x$ is partitioned
+$x=[a,b,c,d]$, and having tensor $\left[f_{x^3}\right]$, one can d a
+slice (subtensor) $\left[f_{aca}\right]$. The data of this tensor are
+permuted of $\left[f_{a^c}\right]$.
+
+Here we also define the folded version of permuted symmetry tensor. It
+has permuted symmetry and is partially folded. One can imagine it as a
+product of a few dimensions, each of them is folded and having a few
+variables. The underlying variables are permuted. The product of such
+dimensions is described by |PerTensorDimens2|. The tensor holding the
+underlying data is |FPSTensor|.
+
+@s SortIntSequence int
+@s PerTensorDimens int
+@s UPSTensor int
+@s PerTensorDimens2 int
+@s FPSTensor int
+@s KronProdFoldStacks int
+
+@c
+
+#ifndef PS_TENSOR_H
+#define PS_TENSOR_H
+
+#include "tensor.h"
+#include "gs_tensor.h"
+#include "equivalence.h"
+#include "permutation.h"
+#include "kron_prod.h"
+#include "sparse_tensor.h"
+
+@<|SortIntSequence| class declaration@>;
+@<|PerTensorDimens| class declaration@>;
+@<|UPSTensor| class declaration@>;
+@<|PerTensorDimens2| class declaration@>;
+@<|FPSTensor| class declaration@>;
+
+#endif
+
+@ This is just a helper class for ordering a sequence on call stack.
+
+@<|SortIntSequence| class declaration@>=
+class SortIntSequence : public IntSequence {
+public:@;
+	SortIntSequence(const IntSequence& s)
+		: IntSequence(s) {@+ sort();@+}
+};
+
+
+@ Here we declare a class describing dimensions of permuted symmetry
+tensor. It inherits from |TensorDimens| and adds a permutation which
+permutes |nvmax|. It has two constructors, each corresponds to a
+context where the tensor appears.
+
+The first constructor calculates the permutation from a given equivalence.
+
+The second constructor corresponds to dimensions of a slice. Let us
+take $\left[f_{aca}\right]$ as an example. First it calculates
+|TensorDimens| of $\left[f_{a^c}\right]$, then it calculates a
+permutation corresponding to ordering of $aca$ to $a^2c$, and applies
+this permutation on the dimensions as the first constructor. The
+constructor takes only stack sizes (lengths of $a$, $b$, $c$, and
+$d$), and coordinates of picked partitions.
+
+Note that inherited methods |calcUnfoldColumns| and |calcFoldColumns|
+work, since number of columns is independent on the permutation, and
+|calcFoldColumns| does not use changed |nvmax|, it uses |nvs|, so it
+is OK.
+
+@<|PerTensorDimens| class declaration@>=
+class PerTensorDimens : public TensorDimens {
+protected:@;
+	Permutation per;
+public:@;
+	PerTensorDimens(const Symmetry& s, const IntSequence& nvars,
+					const Equivalence& e)
+		: TensorDimens(s, nvars), per(e)
+		{@+ per.apply(nvmax);@+}
+	PerTensorDimens(const TensorDimens& td, const Equivalence& e)
+		: TensorDimens(td), per(e)
+		{@+ per.apply(nvmax);@+}
+	PerTensorDimens(const TensorDimens& td, const Permutation& p)
+		: TensorDimens(td), per(p)
+		{@+ per.apply(nvmax);@+}
+	PerTensorDimens(const IntSequence& ss, const IntSequence& coor)
+		: TensorDimens(ss, SortIntSequence(coor)), per(coor)
+		{@+ per.apply(nvmax);@+}
+	PerTensorDimens(const PerTensorDimens& td)
+		: TensorDimens(td), per(td.per)@+ {}
+	const PerTensorDimens& operator=(const PerTensorDimens& td)
+		{@+ TensorDimens::operator=(td);@+ per = td.per;@+ return *this;@+}
+	bool operator==(const PerTensorDimens& td)
+		{@+ return TensorDimens::operator==(td) && per == td.per;@+}
+	int tailIdentity() const
+		{@+ return per.tailIdentity();@+}
+	const Permutation& getPer() const
+		{@+ return per;@+}
+};
+
+@ Here we declare the permuted symmetry unfolded tensor. It has
+|PerTensorDimens| as a member. It inherits from |UTensor| which
+requires to implement |fold| method. There is no folded counterpart,
+so in our implementation we raise unconditional exception, and return
+some dummy object (just to make it compilable without warnings).
+
+The class has two sorts of constructors corresponding to a context where it
+appears. The first constructs object from a given matrix, and
+Kronecker product. Within the constructor, all the calculations are
+performed. Also we need to define dimensions, these are the same of
+the resulting matrix (in our example $\left[B_{y^2u^3}\right]$) but
+permuted. The permutation is done in |PerTensorDimens| constructor.
+
+The second type of constructor is slicing. It makes a slice from
+|FSSparseTensor|. The slice is given by stack sizes, and coordinates of
+picked stacks.
+
+There are two algorithms for filling a slice of a sparse tensor. The
+first |fillFromSparseOne| works well for more dense tensors, the
+second |fillFromSparseTwo| is better for very sparse tensors. We
+provide a static method, which decides what of the two algorithms is
+better.
+
+@<|UPSTensor| class declaration@>=
+class UPSTensor : public UTensor {
+	const PerTensorDimens tdims;
+public:@;
+	@<|UPSTensor| constructors from Kronecker product@>;
+	UPSTensor(const FSSparseTensor& t, const IntSequence& ss,
+			  const IntSequence& coor, const PerTensorDimens& ptd);
+	UPSTensor(const UPSTensor& ut)
+		: UTensor(ut), tdims(ut.tdims)@+ {}
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	FTensor& fold() const;
+
+	int getOffset(const IntSequence& v) const;
+	void addTo(FGSTensor& out) const;
+	void addTo(UGSTensor& out) const;
+
+	enum fill_method {first, second};
+	static fill_method decideFillMethod(const FSSparseTensor& t);
+private:@;
+	int tailIdentitySize() const;
+	void fillFromSparseOne(const FSSparseTensor& t, const IntSequence& ss,
+						   const IntSequence& coor);
+	void fillFromSparseTwo(const FSSparseTensor& t, const IntSequence& ss,
+						   const IntSequence& coor);
+};
+
+@ Here we have four constructors making an |UPSTensor| from a product
+of matrix and Kronecker product. The first constructs the tensor from
+equivalence classes of the given equivalence in an order given by the
+equivalence. The second does the same but with optimized
+|KronProdAllOptim|, which has a different order of matrices than given
+by the classes in the equivalence. This permutation is projected to
+the permutation of the |UPSTensor|. The third, is the same as the
+first, but the classes of the equivalence are permuted by the given
+permutation. Finally, the fourth is the most general combination. It
+allows for a permutation of equivalence classes, and for optimized
+|KronProdAllOptim|, which permutes the permuted equivalence classes.
+
+@<|UPSTensor| constructors from Kronecker product@>=
+	UPSTensor(const TensorDimens& td, const Equivalence& e,
+			  const ConstTwoDMatrix& a, const KronProdAll& kp)
+		: UTensor(along_col, PerTensorDimens(td, e).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, e)
+		{@+ kp.mult(a, *this);@+}
+	UPSTensor(const TensorDimens& td, const Equivalence& e,
+			  const ConstTwoDMatrix& a, const KronProdAllOptim& kp)
+		: UTensor(along_col, PerTensorDimens(td, Permutation(e, kp.getPer())).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, Permutation(e, kp.getPer()))
+		{@+ kp.mult(a, *this);@+}
+	UPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p,
+			  const ConstTwoDMatrix& a, const KronProdAll& kp)
+		: UTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, Permutation(e, p))
+		{@+ kp.mult(a, *this);@+}
+	UPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p,
+			  const ConstTwoDMatrix& a, const KronProdAllOptim& kp)
+		: UTensor(along_col, PerTensorDimens(td, Permutation(e, Permutation(p, kp.getPer()))).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, Permutation(e, Permutation(p, kp.getPer())))
+		{@+ kp.mult(a, *this);@+}
+
+@ Here we define an abstraction for the tensor dimension with the
+symmetry like $xuv\vert uv\vert xu\vert y\vert y\vert x\vert x\vert
+y$. These symmetries come as induces symmetries of equivalence and
+some outer symmetry. Thus the underlying variables are permuted. One
+can imagine the dimensions as an unfolded product of dimensions which
+consist of folded products of variables.
+
+We inherit from |PerTensorDimens| since we need the permutation
+implied by the equivalence. The new member are the induced symmetries
+(symmetries of each folded dimensions) and |ds| which are sizes of the
+dimensions. The number of folded dimensions is return by |numSyms|.
+
+The object is constructed from outer tensor dimensions and from
+equivalence with optionally permuted classes.
+
+@<|PerTensorDimens2| class declaration@>=
+class PerTensorDimens2 : public PerTensorDimens {
+	InducedSymmetries syms;
+	IntSequence ds;
+public:@;
+	PerTensorDimens2(const TensorDimens& td, const Equivalence& e,
+					 const Permutation& p)
+		: PerTensorDimens(td, Permutation(e, p)),
+		  syms(e, p, td.getSym()),
+		  ds(syms.size())
+		{@+ setDimensionSizes();@+}
+	PerTensorDimens2(const TensorDimens& td, const Equivalence& e)
+		: PerTensorDimens(td, e),
+		  syms(e, td.getSym()),
+		  ds(syms.size())
+		{@+ setDimensionSizes();@+}
+	int numSyms() const
+		{@+ return (int)syms.size();@+}
+	const Symmetry& getSym(int i) const
+		{@+ return syms[i];@+}
+	int calcMaxOffset() const
+		{@+ return ds.mult(); @+}
+	int calcOffset(const IntSequence& coor) const;
+	void print() const;
+protected:@;
+	void setDimensionSizes();
+};
+
+@ Here we define an abstraction of the permuted symmetry folded
+tensor. It is needed in context of the Faa Di Bruno formula for folded
+stack container multiplied with container of dense folded tensors, or
+multiplied by one full symmetry sparse tensor.
+
+For example, if we perform the Faa Di Bruno for $F=f(z)$, where
+$z=[g(x,y,u,v), h(x,y,u), x, y]^T$, we get for one concrete
+equivalence:
+$$
+\left[F_{x^4y^3u^3v^2}\right]=\ldots+
+\left[f_{g^2h^2x^2y}\right]\left(
+[g]_{xv}\otimes[g]_{u^2v}\otimes
+[h]_{xu}\otimes[h]_{y^2}\otimes
+\left[\vphantom{\sum}[I]_x\otimes[I]_x\right]\otimes
+\left[\vphantom{\sum}[I]_y\right]
+\right)
++\ldots
+$$
+
+The class |FPSTensor| represents the tensor at the right. Its
+dimension corresponds to a product of 7 dimensions with the following
+symmetries: $xv\vert u^v\vert xu\vert y^2\vert x\vert x\vert y$. Such
+the dimension is described by |PerTensorDimens2|.
+
+The tensor is constructed in a context of stack container
+multiplication, so, it is constructed from dimensions |td| (dimensions
+of the output tensor), stack product |sp| (implied symmetries picking
+tensors from a stack container, here it is $z$), then a sorted integer
+sequence of the picked stacks of the stack product (it is always
+sorted, here it is $(0,0,1,1,2,2,3)$), then the tensor
+$\left[f_{g^2h^2x^2y}\right]$ (its symmetry must be the same as
+symmetry given by the |istacks|), and finally from the equivalence
+with permuted classes.
+
+We implement |increment| and |getOffset| methods, |decrement| and
+|unfold| raise an exception. Also, we implement |addTo| method, which
+adds the tensor data (partially unfolded) to folded general symmetry
+tensor.
+
+@<|FPSTensor| class declaration@>=
+template<typename _Ttype> class StackProduct;
+
+class FPSTensor : public FTensor {
+	const PerTensorDimens2 tdims;
+public:@;
+	@<|FPSTensor| constructors@>;
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	UTensor& unfold() const;
+
+	int getOffset(const IntSequence& v) const;
+	void addTo(FGSTensor& out) const;
+};
+
+@ As for |UPSTensor|, we provide four constructors allowing for
+combinations of permuting equivalence classes, and optimization of
+|KronProdAllOptim|. These constructors multiply with dense general
+symmetry tensor (coming from the dense container, or as a dense slice
+of the full symmetry sparse tensor). In addition to these 4
+constructors, we have one constructor multiplying with general
+symmetry sparse tensor (coming as a sparse slice of the full symmetry
+sparse tensor).
+
+@<|FPSTensor| constructors@>=
+	FPSTensor(const TensorDimens& td, const Equivalence& e,
+			  const ConstTwoDMatrix& a, const KronProdAll& kp)
+		: FTensor(along_col, PerTensorDimens(td, e).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, e)
+		{@+ kp.mult(a, *this);@+}
+	FPSTensor(const TensorDimens& td, const Equivalence& e,
+			  const ConstTwoDMatrix& a, const KronProdAllOptim& kp)
+		: FTensor(along_col, PerTensorDimens(td, Permutation(e, kp.getPer())).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, kp.getPer())
+		{@+ kp.mult(a, *this);@+}
+	FPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p,
+			  const ConstTwoDMatrix& a, const KronProdAll& kp)
+		: FTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, p)
+		{@+ kp.mult(a, *this);@+}
+	FPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p,
+			  const ConstTwoDMatrix& a, const KronProdAllOptim& kp)
+		: FTensor(along_col, PerTensorDimens(td, Permutation(e, Permutation(p, kp.getPer()))).getNVX(),
+				  a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, Permutation(p, kp.getPer()))
+		{@+ kp.mult(a, *this);@+}
+
+	FPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p,
+			  const GSSparseTensor& t, const KronProdAll& kp);
+
+	FPSTensor(const FPSTensor& ft)
+		: FTensor(ft), tdims(ft.tdims)@+ {}
+
+@ End of {\tt ps\_tensor.h} file.
diff --git a/dynare++/tl/cc/pyramid_prod.cweb b/dynare++/tl/cc/pyramid_prod.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..2b4693cd3fd14a873cbd275386bd85bcf0ae1bb5
--- /dev/null
+++ b/dynare++/tl/cc/pyramid_prod.cweb
@@ -0,0 +1,86 @@
+@q $Id: pyramid_prod.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt pyramid\_prod.cpp} file.
+@c
+
+#include "pyramid_prod.h"
+#include "permutation.h"
+#include "tl_exception.h"
+
+@<|USubTensor| constructor code@>;
+@<|USubTensor::addKronColumn| code@>;
+
+
+@ Here we construct the |USubTensor| object. We allocate space via the
+parent |URTensor|. Number of columns is a length of the list of
+indices |lst|, number of variables and dimensions are of the tensor
+$h$, this is given by |hdims|.
+
+We go through all equivalences with number of classes equal to
+dimension of $B$. For each equivalence we make a permutation
+|per|. Then we fetch all the necessary tensors $g$ with symmetries
+implied by symmetry of $B$ and the equivalence. Then we go through the
+list of indices, permute them by the permutation and add the Kronecker
+product of the selected columns. This is done by |addKronColumn|.
+
+@<|USubTensor| constructor code@>=
+USubTensor::USubTensor(const TensorDimens& bdims,
+					   const TensorDimens& hdims,
+					   const FGSContainer& cont,
+					   const vector<IntSequence>& lst)
+	: URTensor(lst.size(), hdims.getNVX()[0], hdims.dimen())
+{
+	TL_RAISE_IF(! hdims.getNVX().isConstant(),
+				"Tensor has not full symmetry in USubTensor()");
+	const EquivalenceSet& eset = cont.getEqBundle().get(bdims.dimen());
+	zeros();
+	for (EquivalenceSet::const_iterator it = eset.begin();
+		 it != eset.end(); ++it) {
+		if ((*it).numClasses() == hdims.dimen()) {
+			Permutation per(*it);
+			vector<const FGSTensor*> ts =
+				cont.fetchTensors(bdims.getSym(), *it);
+			for (int i = 0; i < (int)lst.size(); i++) {
+				IntSequence perindex(lst[i].size());
+				per.apply(lst[i], perindex);
+				addKronColumn(i, ts, perindex); 
+			}
+		}
+	}
+}
+
+@ This makes a Kronecker product of appropriate columns from tensors
+in |fs| and adds such data to |i|-th column of this matrix. The
+appropriate columns are defined by |pindex| sequence. A column of a
+tensor has index created from a corresponding part of |pindex|. The
+sizes of these parts are given by dimensions of the tensors in |ts|.
+
+Here we break the given index |pindex| according to the dimensions of
+the tensors in |ts|, and for each subsequence of the |pindex| we find
+an index of the folded tensor, which involves calling |getOffset| for
+folded tensor, which might be costly. We gather all columns to a
+vector |tmpcols| which are Kronecker multiplied in constructor of
+|URSingleTensor|. Finally we add data of |URSingleTensor| to the
+|i|-th column.
+
+@<|USubTensor::addKronColumn| code@>=
+void USubTensor::addKronColumn(int i, const vector<const FGSTensor*>& ts,
+							   const IntSequence& pindex)
+{
+	vector<ConstVector> tmpcols;
+	int lastdim = 0;
+	for (unsigned int j = 0; j < ts.size(); j++) {
+		IntSequence ind(pindex, lastdim, lastdim+ts[j]->dimen());
+		lastdim += ts[j]->dimen();
+		index in(ts[j], ind);
+		tmpcols.push_back(ConstVector(*(ts[j]), *in));
+	}
+
+	URSingleTensor kronmult(tmpcols);
+	Vector coli(*this, i);
+	coli.add(1.0, kronmult.getData());
+}
+
+
+@ End of {\tt pyramid\_prod.cpp} file.
diff --git a/dynare++/tl/cc/pyramid_prod.hweb b/dynare++/tl/cc/pyramid_prod.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..3c6c9f8ab0a3c3ddd7b31ea997a90ff1aaef00f5
--- /dev/null
+++ b/dynare++/tl/cc/pyramid_prod.hweb
@@ -0,0 +1,80 @@
+@q $Id: pyramid_prod.hweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Multiplying tensor columns. Start of {\tt pyramid\_prod.h} file.
+
+In here, we implement the Faa Di Bruno for folded
+tensors. Recall, that one step of the Faa Di Bruno is a formula:
+$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_k}=
+[h_{y^l}]_{\gamma_1\ldots\gamma_l}
+\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}
+$$
+
+In contrast to unfolded implementation of |UGSContainer::multAndAdd|
+with help of |KronProdAll| and |UPSTensor|, we take a completely
+different strategy. We cannot afford full instantiation of
+$$\sum_{c\in M_{l,k}}
+\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}$$
+and therefore we do it per partes. We select some number of columns,
+for instance 10, calculate 10 continuous iterators of tensor $B$. Then we
+form unfolded tensor
+$$[G]_S^{\gamma_1\ldots\gamma_l}=\left[\sum_{c\in M_{l,k}}
+\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}
+\right]_S$$
+where $S$ is the selected set of 10 indices. This is done as Kronecker
+product of vectors corresponding to selected columns. Note that, in
+general, there is no symmetry in $G$, its type is special class for
+this purpose.
+
+If $g$ is folded, then we have to form folded version of $G$. There is
+no symmetry in $G$ data, so we sum all unfolded indices corresponding
+to folded index together. This is perfectly OK, since we multiply
+these groups of (equivalent) items with the same number in fully
+symmetric $g$.
+
+After this, we perform ordinary matrix multiplication to obtain a
+selected set of columns of $B$.
+
+In here, we define a class for forming and representing
+$[G]_S^{\gamma_1\ldots\gamma_l}$. Basically, this tensor is
+row-oriented (multidimensional index is along rows), and it is fully
+symmetric. So we inherit from |URTensor|. If we need its folded
+version, we simply use a suitable conversion. The new abstraction will
+have only a new constructor allowing a construction from the given set
+of indices $S$, and given set of tensors $g$. The rest of the process
+is implemented in |@<|FGSContainer::multAndAdd| unfolded code@>| or
+|@<|FGSContainer::multAndAdd| folded code@>|.
+ 
+@c
+#ifndef PYRAMID_PROD_H
+#define PYRAMID_PROD_H
+
+#include "int_sequence.h"
+#include "rfs_tensor.h"
+#include "gs_tensor.h"
+#include "t_container.h"
+
+#include <vector>
+
+using namespace std;
+
+@<|USubTensor| class declaration@>;
+
+#endif
+
+@ Here we define the new tensor for representing
+$[G]_S^{\gamma_1\ldots\gamma_l}$. It allows a construction from
+container of folded general symmetry tensors |cont|, and set of
+indices |ts|. Also we have to supply dimensions of resulting tensor
+$B$, and dimensions of tensor $h$.
+
+@<|USubTensor| class declaration@>=
+class USubTensor : public URTensor {
+public:@;
+	USubTensor(const TensorDimens& bdims, const TensorDimens& hdims,
+			   const FGSContainer& cont, const vector<IntSequence>& lst);
+	void addKronColumn(int i, const vector<const FGSTensor*>& ts,
+					   const IntSequence& pindex);
+};
+
+@ End of {\tt pyramid\_prod.h} file.
diff --git a/dynare++/tl/cc/pyramid_prod2.cweb b/dynare++/tl/cc/pyramid_prod2.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..1f894d01a9bd59f730702cfcbce15491bc084cc8
--- /dev/null
+++ b/dynare++/tl/cc/pyramid_prod2.cweb
@@ -0,0 +1,129 @@
+@q $Id: pyramid_prod2.cweb 332 2005-07-15 13:41:48Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt pyramid\_prod2.cpp} file.
+
+@c
+#include "pyramid_prod2.h"
+#include "rfs_tensor.h"
+
+@<|IrregTensorHeader| constructor code@>;
+@<|IrregTensorHeader::increment| code@>;
+@<|IrregTensorHeader| destructor code@>;
+@<|IrregTensorHeader::calcMaxOffset| code@>;
+@<|IrregTensor| constructor code@>;
+@<|IrregTensor::addTo| code@>;
+
+@ Here we only call |sp.createPackedColumns(c, cols, unit_flag)| which
+fills |cols| and |unit_flag| for the given column |c|. Then we set
+|end_seq| according to |unit_flag| and columns lengths.
+
+@<|IrregTensorHeader| constructor code@>=
+IrregTensorHeader::IrregTensorHeader(const StackProduct<FGSTensor>& sp,
+									 const IntSequence& c)
+	: nv(sp.getAllSize()),
+	  unit_flag(sp.dimen()),
+	  cols(new Vector*[sp.dimen()]),
+	  end_seq(sp.dimen())
+{
+	sp.createPackedColumns(c, cols, unit_flag);
+	for (int i = 0; i < sp.dimen(); i++) {
+		end_seq[i] = cols[i]->length();
+		if (unit_flag[i] != -1)
+			end_seq[i] = unit_flag[i]+1;
+	}
+}
+
+
+@ Here we have to increment the given integer sequence. We do it by
+the following code, whose pattern is valid for all tensor. The only
+difference is how we increment item of coordinates.
+
+@<|IrregTensorHeader::increment| code@>=
+void IrregTensorHeader::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong size of coordinates in IrregTensorHeader::increment");
+
+	if (v.size() == 0)
+		return;
+	int i = v.size()-1;
+	@<increment |i|-th item in coordinate |v|@>;
+	while (i > 0 && v[i] == end_seq[i]) {
+		v[i] = 0;
+		i--;
+		@<increment |i|-th item in coordinate |v|@>;
+	}
+}
+
+@ Here we increment item of coordinates. Whenever we reached end of
+column coming from matrices, and |unit_flag| is not $-1$, we have to
+jump to that |unit_flag|.
+
+@<increment |i|-th item in coordinate |v|@>=
+	v[i]++;
+	if (unit_flag[i] != -1 && v[i] == cols[i]->length()-1)
+		v[i] = unit_flag[i];
+
+
+@ 
+@<|IrregTensorHeader| destructor code@>=
+IrregTensorHeader::~IrregTensorHeader()
+{
+  for (int i = 0; i < dimen(); i++)
+	  delete cols[i];
+  delete [] cols;
+}
+
+@ It is a product of all column lengths.
+@<|IrregTensorHeader::calcMaxOffset| code@>=
+int IrregTensorHeader::calcMaxOffset() const
+{
+	int res = 1;
+	for (int i = 0; i < dimen(); i++)
+		res *= cols[i]->length();
+	return res;
+}
+
+
+@ Everything is done in |IrregTensorHeader|, only we have to Kronecker
+multiply all columns of the header.
+
+@<|IrregTensor| constructor code@>=
+IrregTensor::IrregTensor(const IrregTensorHeader& h)
+	: Tensor(along_row, IntSequence(h.dimen(), 0), h.end_seq,
+			 h.calcMaxOffset(), 1, h.dimen()),
+	  header(h)
+{
+	if (header.dimen() == 1) {
+		getData() = *(header.cols[0]);
+		return;
+	}
+
+	Vector* last = new Vector(*(header.cols[header.dimen()-1]));
+	for (int i = header.dimen()-2; i > 0; i--) {
+		Vector* newlast = new Vector(last->length()*header.cols[i]->length());
+		KronProd::kronMult(ConstVector(*(header.cols[i])),
+						   ConstVector(*last), *newlast);
+		delete last;
+		last = newlast;
+	}
+	KronProd::kronMult(ConstVector(*(header.cols[0])),
+					   ConstVector(*last), getData());
+	delete last;
+}
+
+@ Clear.
+@<|IrregTensor::addTo| code@>=
+void IrregTensor::addTo(FRSingleTensor& out) const
+{
+	for (index it = begin(); it != end(); ++it) {
+		IntSequence tmp(it.getCoor());
+		tmp.sort();
+		Tensor::index ind(&out, tmp);
+		out.get(*ind, 0) += get(*it, 0);
+	}
+}
+
+
+@ End of {\tt pyramid\_prod2.cpp} file.
diff --git a/dynare++/tl/cc/pyramid_prod2.hweb b/dynare++/tl/cc/pyramid_prod2.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..42cf370edeb23c29ddac5c4417e2af2a6199c337
--- /dev/null
+++ b/dynare++/tl/cc/pyramid_prod2.hweb
@@ -0,0 +1,151 @@
+@q $Id: pyramid_prod2.hweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Multiplying stacked tensor columns. Start of {\tt pyramid\_prod2.h} file.
+
+We need to calculate the following tensor product:
+$$\left[f_{s^j}\right]_{\alpha_1\ldots\alpha_j}=
+\sum_{l=1}^j\left[f_{z^l}\right]_{\beta_1\ldots\beta_l}
+\sum_{c\in M_{l,j}}\prod_{m=1}^l\left[z_{c_m}\right]^{\beta_m}_{c_m(\alpha)}
+$$
+where $s=[y,u,u',\sigma]$, and $z$ is a composition of four variables,
+say $[v,w,y,u]$. Note that $z$ ends with $y$ and $u$, and the only
+non-zero derivative of the trailing part of $z$ involving $y$ or $u$
+is the first derivative and is the unit matrix $y_y=[1]$ or
+$u_u=[1]$. Also, we suppose that the dependence of $v$, and $w$ on $s$
+is such that whenever derivative of $w$ is nonzero, then also of
+$v$. This means that there for any derivative and any index there is a
+continuous part of derivatives of $v$ and optionally of $w$ followed by
+column of zeros containing at most one $1$.
+
+This structure can be modelled and exploited with some costs at
+programming. For example, let us consider the following product:
+$$\left[B_{y^2u^3}\right]_{\alpha_1\alpha_2\beta_1\beta_2\beta_3}=
+\ldots
+\left[f_{z^3}\right]_{\gamma_1\gamma_2\gamma_3}
+\left[z_{yu}\right]^{\gamma_1}_{\alpha_1\beta_1}
+\left[z_{y}\right]^{\gamma_2}_{\alpha_2}
+\left[z_{uu}\right]^{\gamma_3}_{\beta_2\beta_3}
+\ldots$$
+The term corresponds to equivalence $\{\{0,2\},\{1\},\{3,4\}\}$. For
+the fixed index $\alpha_1\alpha_2\beta_1\beta_2\beta_3$ we have to
+make a Kronecker product of the columns
+$$
+\left[z_{yu}\right]_{\alpha_1\beta_1}\otimes
+\left[z_{y}\right]_{\alpha_2}\otimes
+\left[z_{uu}\right]_{\beta_2\beta_3}
+$$
+which can be written as
+$$
+\left[\matrix{\left[v_{yu}\right]_{\alpha_1\beta_1}\cr
+              \left[w_{yu}\right]_{\alpha_1\beta_1}\cr 0\cr 0}\right]\otimes
+\left[\matrix{\left[v_y\right]_{\alpha_2\vphantom{(}}\cr
+              \left[w_y\right]_{\alpha_2}\cr 1_{\alpha_2}\cr 0}\right]\otimes
+\left[\matrix{\left[v_{uu}\right]_{\beta_2\beta_3\vphantom{(}}\cr
+              \left[w_{uu}\right]_{\beta_2\beta_3}\cr 0\cr 0}\right]
+$$
+where $1_{\alpha_2}$ is a column of zeros having the only $1$ at
+$\alpha_2$ index.
+
+This file develops the abstraction for this Kronecker product column
+without multiplication of the zeros at the top. Basically, it will be
+a column which is a Kronecker product of the columns without the
+zeros:
+$$
+\left[\matrix{\left[v_{yu}\right]_{\alpha_1\beta_1}\cr
+              \left[w_{yu}\right]_{\alpha_1\beta_1}}\right]\otimes
+\left[\matrix{\left[v_y\right]_{\alpha_2}\cr
+              \left[w_y\right]_{\alpha_2}\cr 1}\right]\otimes
+\left[\matrix{\left[v_{uu}\right]_{\beta_2\beta_3}\cr
+              \left[w_{uu}\right]_{\beta_2\beta_3}}\right]
+$$
+The class will have a tensor infrastructure introducing |index| which
+iterates over all items in the column with $\gamma_1\gamma_2\gamma_3$
+as coordinates in $\left[f_{z^3}\right]$. The data of such a tensor is
+not suitable for any matrix operation and will have to be accessed
+only through the |index|. Note that this does not matter, since
+$\left[f_{z^l}\right]$ are sparse.
+
+@c
+#ifndef PYRAMID_PROD2_H
+#define PYRAMID_PROD2_H
+
+#include "permutation.h"
+#include "tensor.h"
+#include "tl_exception.h"
+#include "rfs_tensor.h"
+#include "stack_container.h"
+
+#include "Vector.h"
+
+@<|IrregTensorHeader| class declaration@>;
+@<|IrregTensor| class declaration@>;
+
+#endif
+
+@ First we declare a helper class for the tensor. Its purpose is to
+gather the columns which are going to be Kronecker multiplied. The
+input of this helper class is |StackProduct<FGSTensor>| and coordinate
+|c| of the column.
+
+It maintains |unit_flag| array which says for what columns we must
+stack 1 below $v$ and $w$. In this case, the value of |unit_flag| is
+an index of the $1$, otherwise the value of |unit_flag| is -1.
+
+Also we have storage for the stacked columns |cols|. The object is
+responsible for memory management associated to this storage. That is
+why we do not allow any copy constructor, since we need to be sure
+that no accidental copies take place. We declare the copy constructor
+as private and not implement it.
+ 
+@<|IrregTensorHeader| class declaration@>=
+class IrregTensor;
+class IrregTensorHeader {
+	friend class IrregTensor;
+	int nv;
+	IntSequence unit_flag;
+    Vector** const cols;
+	IntSequence end_seq;
+public:@;
+	IrregTensorHeader(const StackProduct<FGSTensor>& sp, const IntSequence& c);
+	~IrregTensorHeader();
+	int dimen() const
+		{@+ return unit_flag.size();@+}
+	void increment(IntSequence& v) const;
+	int calcMaxOffset() const;
+private:@;
+	IrregTensorHeader(const IrregTensorHeader&);
+};
+
+
+@ Here we declare the irregular tensor. There is no special logic
+here. We inherit from |Tensor| and we must implement three methods,
+|increment|, |decrement| and |getOffset|. The last two are not
+implemented now, since they are not needed, and they raise an
+exception. The first just calls |increment| of the header. Also we
+declare a method |addTo| which adds this unfolded irregular single
+column tensor to folded (regular) single column tensor.
+
+The header |IrregTensorHeader| lives with an object by a
+reference. This is dangerous. However, we will use this class only in
+a simple loop and both |IrregTensor| and |IrregTensorHeader| will be
+destructed at the end of a block. Since the super class |Tensor| must
+be initialized before any member, we could do either a save copy of
+|IrregTensorHeader|, or relatively dangerous the reference member. For
+the reason above we chose the latter.
+
+@<|IrregTensor| class declaration@>=
+class IrregTensor : public Tensor {
+	const IrregTensorHeader& header;
+public:@;
+	IrregTensor(const IrregTensorHeader& h);
+	void addTo(FRSingleTensor& out) const;
+	void increment(IntSequence& v) const
+		{@+ header.increment(v);@+}
+	void decrement(IntSequence& v) const
+		{@+ TL_RAISE("Not implemented error in IrregTensor::decrement");@+}
+	int getOffset(const IntSequence& v) const
+		{@+ TL_RAISE("Not implemented error in IrregTensor::getOffset");@+return 0;@+}
+};
+
+@ End of {\tt pyramid\_prod2.h} file.
diff --git a/dynare++/tl/cc/rfs_tensor.cweb b/dynare++/tl/cc/rfs_tensor.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..33c6c93bfcd53d21b5acf3cd41dfc50e2b1e31fd
--- /dev/null
+++ b/dynare++/tl/cc/rfs_tensor.cweb
@@ -0,0 +1,205 @@
+@q $Id: rfs_tensor.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt rfs\_tensor.cpp} file.
+
+@c
+#include "rfs_tensor.h"
+#include "kron_prod.h"
+#include "tl_exception.h"
+
+@<|FRTensor| conversion from unfolded@>;
+@<|FRTensor::unfold| code@>;
+@<|FRTensor::increment| code@>;
+@<|FRTensor::decrement| code@>;
+@<|URTensor| conversion from folded@>;
+@<|URTensor::fold| code@>;
+@<|URTensor| increment and decrement@>;
+@<|URTensor::getOffset| code@>;
+@<|URSingleTensor| constructor 1 code@>;
+@<|URSingleTensor| constructor 2 code@>;
+@<|URSingleTensor::fold| code@>;
+@<|FRSingleTensor| conversion from unfolded@>;
+
+@ The conversion from unfolded to folded sums up all data from
+unfolded corresponding to one folded index. So we go through all the
+rows in the unfolded tensor |ut|, make an index of the folded tensor
+by sorting the coordinates, and add the row.
+ 
+@<|FRTensor| conversion from unfolded@>=
+FRTensor::FRTensor(const URTensor& ut)
+	: FTensor(along_row, IntSequence(ut.dimen(), ut.nvar()),
+			  FFSTensor::calcMaxOffset(ut.nvar(), ut.dimen()), ut.ncols(),
+			  ut.dimen()),
+	  nv(ut.nvar())
+{
+	zeros();
+	for (index in = ut.begin(); in != ut.end(); ++in) {
+		IntSequence vtmp(in.getCoor());
+		vtmp.sort();
+		index tar(this, vtmp);
+		addRow(ut, *in, *tar);
+	}
+}
+
+@ Here just make a new instance and return the reference.
+@<|FRTensor::unfold| code@>=
+UTensor& FRTensor::unfold() const
+{
+	return *(new URTensor(*this));
+}
+
+@ Incrementing is easy. The same as for |FFSTensor|.
+
+@<|FRTensor::increment| code@>=
+void FRTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in FRTensor::increment");
+
+	UTensor::increment(v, nv);
+	v.monotone();
+}
+
+@ Decrement calls static |FTensor::decrement|.
+
+@<|FRTensor::decrement| code@>=
+void FRTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in FRTensor::decrement");
+
+	FTensor::decrement(v, nv);
+}
+
+
+@ Here we convert folded full symmetry tensor to unfolded. We copy all
+columns of folded tensor to unfolded and leave other columns
+(duplicates) zero. In this way, if the unfolded tensor is folded back,
+we should get the same data.
+
+@<|URTensor| conversion from folded@>=
+URTensor::URTensor(const FRTensor& ft)
+	: UTensor(along_row, IntSequence(ft.dimen(), ft.nvar()),
+			  UFSTensor::calcMaxOffset(ft.nvar(), ft.dimen()), ft.ncols(),
+			  ft.dimen()),
+	  nv(ft.nvar())
+{
+	zeros();
+	for (index src = ft.begin(); src != ft.end(); ++src) {
+		index in(this, src.getCoor());
+		copyRow(ft, *src, *in);
+	}
+}
+
+@ Here we just return a reference to new instance of folded tensor.
+@<|URTensor::fold| code@>=
+FTensor& URTensor::fold() const
+{
+	return *(new FRTensor(*this));
+}
+
+@ Here we just call |UTensor| respective static methods.
+@<|URTensor| increment and decrement@>=
+void URTensor::increment(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in URTensor::increment");
+
+	UTensor::increment(v, nv);
+}
+
+void URTensor::decrement(IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input/output vector size in URTensor::decrement");
+
+	UTensor::decrement(v, nv);
+}
+
+@ 
+@<|URTensor::getOffset| code@>=
+int URTensor::getOffset(const IntSequence& v) const
+{
+	TL_RAISE_IF(v.size() != dimen(),
+				"Wrong input vector size in URTensor::getOffset");
+
+	return UTensor::getOffset(v, nv);
+}
+
+@ Here we construct $v_1\otimes v_2\otimes\ldots\otimes v_n$, where
+$v_1,v_2,\ldots,v_n$ are stored in |vector<ConstVector>|.
+
+@<|URSingleTensor| constructor 1 code@>=
+URSingleTensor::URSingleTensor(const vector<ConstVector>& cols)
+	: URTensor(1, cols[0].length(), cols.size())
+{
+	if (dimen() == 1) {
+		getData() = cols[0];
+		return;
+	}
+
+	Vector* last = new Vector(cols[cols.size()-1]);
+	for (int i = cols.size()-2; i > 0; i--) {
+		Vector* newlast = new Vector(Tensor::power(nvar(), cols.size()-i));
+		KronProd::kronMult(cols[i], ConstVector(*last), *newlast);
+		delete last;
+		last = newlast;
+	}
+	KronProd::kronMult(cols[0], ConstVector(*last), getData());
+	delete last;
+}
+
+@ Here we construct $v\otimes\ldots\otimes v$, where the number of $v$
+copies is |d|.
+
+@<|URSingleTensor| constructor 2 code@>=
+URSingleTensor::URSingleTensor(const ConstVector& v, int d)
+	: URTensor(1, v.length(), d)
+{
+	if (d == 1) {
+		getData() = v;
+		return;
+	}
+
+	Vector* last = new Vector(v);
+	for (int i = d-2; i > 0; i--) {
+		Vector* newlast = new Vector(last->length()*v.length());
+		KronProd::kronMult(v, ConstVector(*last), *newlast);
+		delete last;
+		last = newlast;
+	}
+	KronProd::kronMult(v, ConstVector(*last), getData());
+	delete last;
+}
+
+@ Here we construct |FRSingleTensor| from |URSingleTensor| and return
+its reference.
+
+@<|URSingleTensor::fold| code@>=
+FTensor& URSingleTensor::fold() const
+{
+	return *(new FRSingleTensor(*this));
+}
+
+
+
+@ The conversion from unfolded |URSingleTensor| to folded
+|FRSingleTensor| is completely the same as conversion from |URTensor|
+to |FRTensor|, only we do not copy rows but elements.
+ 
+@<|FRSingleTensor| conversion from unfolded@>=
+FRSingleTensor::FRSingleTensor(const URSingleTensor& ut)
+	: FRTensor(1, ut.nvar(), ut.dimen())
+{
+	zeros();
+	for (index in = ut.begin(); in != ut.end(); ++in) {
+		IntSequence vtmp(in.getCoor());
+		vtmp.sort();
+		index tar(this, vtmp);
+		get(*tar, 0) += ut.get(*in, 0);
+	}
+}
+
+
+@ End of {\tt rfs\_tensor.cpp} file.
diff --git a/dynare++/tl/cc/rfs_tensor.hweb b/dynare++/tl/cc/rfs_tensor.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..b1efe69aec3205b877199c03de02462198561543
--- /dev/null
+++ b/dynare++/tl/cc/rfs_tensor.hweb
@@ -0,0 +1,148 @@
+@q $Id: rfs_tensor.hweb 741 2006-05-09 11:12:46Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Row-wise full symmetry tensor. Start of {\tt rfs\_tensor.h} file.
+
+Here we define classes for full symmetry tensors with the
+multidimensional index identified with rows. The primary usage is for
+storage of data coming from (or from a sum of)
+$$\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}$$
+where $\alpha$ coming from a multidimensional index go through some
+set $S$ and $c$ is some equivalence. So we model a tensor of the form:
+$$\left[\prod_{m=1}^l
+\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}
+\right]_S^{\gamma_1\ldots\gamma_l}$$
+Since all $\gamma_1,\ldots,\gamma_l$ correspond to the same variable,
+the tensor is fully symmetric.  The set of indices $S$ cannot be very
+large and sometimes it is only one element. This case is handled in a
+special subclass.
+
+We provide both folded and unfolded versions. Their logic is perfectly
+the same as in |UFSTensor| and |FFSTensor| with two exceptions. One
+has been already mentioned, the multidimensional index is along the
+rows. The second are conversions between the two types. Since this
+kind of tensor is used to multiply (from the right) a tensor whose
+multidimensional index is identified with columns, we will need a
+different way of a conversion. If the multiplication of two folded
+tensors is to be equivalent with multiplication of two unfolded, the
+folding of the right tensor must sum all equivalent elements since
+they are multiplied with the same number from the folded
+tensor. (Equivalent here means all elements of unfolded tensor
+corresponding to one element in folded tensor.) For this reason, it is
+necessary to calculate a column number from the given sequence, so we
+implement |getOffset|. Process of unfolding is not used, so we
+implemented it so that unfolding and then folding a tensor would yield
+the same data.
+
+@c
+#ifndef RFS_TENSOR_H
+#define RFS_TENSOR_H
+
+#include "tensor.h"
+#include "fs_tensor.h"
+#include "symmetry.h"
+
+@<|URTensor| class declaration@>;
+@<|FRTensor| class declaration@>;
+@<|URSingleTensor| class declaration@>;
+@<|FRSingleTensor| class declaration@>;
+
+#endif
+
+@ This is straightforward and very similar to |UFSTensor|.
+@<|URTensor| class declaration@>=
+class FRTensor;
+class URTensor : public UTensor {
+	int nv;
+public:@;
+	@<|URTensor| constructor declaration@>;
+	virtual ~URTensor()@+ {}
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	FTensor& fold() const;
+
+	int getOffset(const IntSequence& v) const;
+	int nvar() const
+		{@+ return nv;@+}
+	Symmetry getSym() const
+		{@+ return Symmetry(dimen());@+}
+};
+
+@ 
+@<|URTensor| constructor declaration@>=
+	URTensor(int c, int nvar, int d)
+		: UTensor(along_row, IntSequence(d, nvar),
+				  UFSTensor::calcMaxOffset(nvar, d), c, d), nv(nvar)@+ {}
+	URTensor(const URTensor& ut)
+		: UTensor(ut), nv(ut.nv)@+ {}
+	URTensor(const FRTensor& ft);
+
+@ This is straightforward and very similar to |FFSTensor|.
+@<|FRTensor| class declaration@>=
+class FRTensor : public FTensor {
+	int nv;
+public:@;
+    @<|FRTensor| constructor declaration@>;
+	virtual ~FRTensor()@+ {}
+
+	void increment(IntSequence& v) const;
+	void decrement(IntSequence& v) const;
+	UTensor& unfold() const;
+
+	int nvar() const
+		{@+ return nv;@+}
+	int getOffset(const IntSequence& v) const
+		{@+ return FTensor::getOffset(v, nv);@+}
+	Symmetry getSym() const
+		{@+ return Symmetry(dimen());@+}
+};
+
+@ 
+@<|FRTensor| constructor declaration@>=
+	FRTensor(int c, int nvar, int d)
+		: FTensor(along_row, IntSequence(d, nvar),
+				  FFSTensor::calcMaxOffset(nvar, d), c, d), nv(nvar)@+ {}
+	FRTensor(const FRTensor& ft)
+		: FTensor(ft), nv(ft.nv)@+ {}
+	FRTensor(const URTensor& ut);
+
+@ The following class represents specialization of |URTensor| coming
+from Kronecker multiplication of a few vectors. So the resulting
+row-oriented tensor has one column. We provide two constructors,
+one constructs the tensor from a few vectors stored as
+|vector<ConstVector>|. The second makes the Kronecker power of one
+given vector.
+
+@<|URSingleTensor| class declaration@>=
+class URSingleTensor : public URTensor {
+public:@;
+	URSingleTensor(int nvar, int d)
+		: URTensor(1, nvar, d)@+ {}
+	URSingleTensor(const vector<ConstVector>& cols);
+	URSingleTensor(const ConstVector& v, int d);
+	URSingleTensor(const URSingleTensor& ut)
+		: URTensor(ut)@+ {}
+	virtual ~URSingleTensor()@+ {}
+	FTensor& fold() const;
+};
+
+@ This class represents one column row-oriented tensor. The only way
+how to construct it is from the |URSingleTensor| or from the
+scratch. The folding algorithm is the same as folding of general
+|URTensor|. Only its implementation is different, since we do not copy
+rows, but only elements.
+
+@<|FRSingleTensor| class declaration@>=
+class FRSingleTensor : public FRTensor {
+public:@;
+	FRSingleTensor(int nvar, int d)
+		: FRTensor(1, nvar, d)@+ {}
+	FRSingleTensor(const URSingleTensor& ut);
+	FRSingleTensor(const FRSingleTensor& ft)
+		: FRTensor(ft)@+ {}
+	virtual ~FRSingleTensor()@+ {}
+};
+
+
+@ End of {\tt rfs\_tensor.h} file.
diff --git a/dynare++/tl/cc/sparse_tensor.cweb b/dynare++/tl/cc/sparse_tensor.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..214e3495d361b0e861e7e6cda8a222cbc4bafa56
--- /dev/null
+++ b/dynare++/tl/cc/sparse_tensor.cweb
@@ -0,0 +1,274 @@
+@q $Id: sparse_tensor.cweb 1258 2007-05-11 13:59:10Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt sparse\_tensor.cpp} file.
+
+@c
+#include "sparse_tensor.h"
+#include "fs_tensor.h"
+#include "tl_exception.h"
+
+#include <cmath>
+
+@<|SparseTensor::insert| code@>;
+@<|SparseTensor::isFinite| code@>;
+@<|SparseTensor::getFoldIndexFillFactor| code@>;
+@<|SparseTensor::getUnfoldIndexFillFactor| code@>;
+@<|SparseTensor::print| code@>;
+@<|FSSparseTensor| constructor code@>;
+@<|FSSparseTensor| copy constructor code@>;
+@<|FSSparseTensor::insert| code@>;
+@<|FSSparseTensor::multColumnAndAdd| code@>;
+@<|FSSparseTensor::print| code@>;
+@<|GSSparseTensor| slicing constructor@>;
+@<|GSSparseTensor::insert| code@>;
+@<|GSSparseTensor::print| code@>;
+
+@ This is straightforward. Before we insert anything, we do a few
+checks. Then we reset |first_nz_row| and |last_nz_row| if necessary.
+
+@<|SparseTensor::insert| code@>=
+void SparseTensor::insert(const IntSequence& key, int r, double c)
+{
+	TL_RAISE_IF(r < 0 || r >= nr,
+				"Row number out of dimension of tensor in SparseTensor::insert");
+	TL_RAISE_IF(key.size() != dimen(),
+				"Wrong length of key in SparseTensor::insert");
+	TL_RAISE_IF(! std::isfinite(c),
+				"Insertion of non-finite value in SparseTensor::insert");
+
+	iterator first_pos = m.lower_bound(key);
+	@<check that pair |key| and |r| is unique@>;
+	m.insert(first_pos, Map::value_type(key, Item(r,c)));
+	if (first_nz_row > r)
+		first_nz_row = r;
+	if (last_nz_row < r)
+		last_nz_row = r;
+}
+
+@ 
+@<check that pair |key| and |r| is unique@>=
+	iterator last_pos = m.upper_bound(key);
+	for (iterator it = first_pos; it != last_pos; ++it)
+		if ((*it).second.first == r) {
+			TL_RAISE("Duplicate <key, r> insertion in SparseTensor::insert");
+			return;
+		}
+
+@ This returns true if all items are finite (not Nan nor Inf).
+@<|SparseTensor::isFinite| code@>=
+bool SparseTensor::isFinite() const
+{
+	bool res = true;
+	const_iterator run = m.begin();
+	while (res && run != m.end()) {
+		if (! std::isfinite((*run).second.second))
+			res = false;
+		++run;
+	}
+	return res;
+}
+
+@ This returns a ratio of a number of non-zero columns in folded
+tensor to the total number of columns.
+
+@<|SparseTensor::getFoldIndexFillFactor| code@>=
+double SparseTensor::getFoldIndexFillFactor() const
+{
+	int cnt = 0;
+	const_iterator start_col = m.begin();
+	while (start_col != m.end()) {
+		cnt++;
+		const IntSequence& key = (*start_col).first;
+		start_col = m.upper_bound(key);
+	}
+
+	return ((double)cnt)/ncols();
+}
+
+@ This returns a ratio of a number of non-zero columns in unfolded
+tensor to the total number of columns.
+
+@<|SparseTensor::getUnfoldIndexFillFactor| code@>=
+double SparseTensor::getUnfoldIndexFillFactor() const
+{
+	int cnt = 0;
+	const_iterator start_col = m.begin();
+	while (start_col != m.end()) {
+		const IntSequence& key = (*start_col).first;
+		Symmetry s(key);
+		cnt += Tensor::noverseq(s);
+		start_col = m.upper_bound(key);
+	}
+
+	return ((double)cnt)/ncols();
+}
+
+
+
+@ This prints the fill factor and all items.
+@<|SparseTensor::print| code@>=
+void SparseTensor::print() const
+{
+	printf("Fill: %3.2f %%\n", 100*getFillFactor());
+	const_iterator start_col = m.begin();
+	while (start_col != m.end()) {
+		const IntSequence& key = (*start_col).first;
+		printf("Column: ");key.print();
+		const_iterator end_col = m.upper_bound(key);
+		int cnt = 1;
+		for (const_iterator run = start_col; run != end_col; ++run, cnt++) {
+			if ((cnt/7)*7 == cnt)
+				printf("\n");
+			printf("%d(%6.2g)  ", (*run).second.first, (*run).second.second);
+		}
+		printf("\n");
+		start_col = end_col;
+	}
+}
+
+
+
+@ 
+@<|FSSparseTensor| constructor code@>=
+FSSparseTensor::FSSparseTensor(int d, int nvar, int r)
+	: SparseTensor(d, r, FFSTensor::calcMaxOffset(nvar, d)),
+	  nv(nvar), sym(d)
+{}
+
+@ 
+@<|FSSparseTensor| copy constructor code@>=
+FSSparseTensor::FSSparseTensor(const FSSparseTensor& t)
+	: SparseTensor(t),
+	  nv(t.nvar()), sym(t.sym)
+{}
+
+@ 
+@<|FSSparseTensor::insert| code@>=
+void FSSparseTensor::insert(const IntSequence& key, int r, double c)
+{
+	TL_RAISE_IF(!key.isSorted(),
+				"Key is not sorted in FSSparseTensor::insert");
+	TL_RAISE_IF(key[key.size()-1] >= nv || key[0] < 0,
+				"Wrong value of the key in FSSparseTensor::insert"); 
+	SparseTensor::insert(key, r, c);
+}
+
+@ We go through the tensor |t| which is supposed to have single
+column. If the item of |t| is nonzero, we make a key by sorting the
+index, and then we go through all items having the same key (it is its
+column), obtain the row number and the element, and do the
+multiplication.
+
+The test for non-zero is |a != 0.0|, since there will be items which
+are exact zeros.
+
+I have also tried to make the loop through the sparse tensor outer, and
+find index of tensor |t| within the loop. Surprisingly, it is little
+slower (for monomial tests with probability of zeros equal 0.3). But
+everything depends how filled is the sparse tensor.
+
+@<|FSSparseTensor::multColumnAndAdd| code@>=
+void FSSparseTensor::multColumnAndAdd(const Tensor& t, Vector& v) const
+{
+	@<check compatibility of input parameters@>;
+	for (Tensor::index it = t.begin(); it != t.end(); ++it) {
+		int ind = *it;
+		double a = t.get(ind, 0); 
+		if (a != 0.0) {
+			IntSequence key(it.getCoor());
+			key.sort();
+			@<check that |key| is within the range@>;
+			const_iterator first_pos = m.lower_bound(key);
+			const_iterator last_pos = m.upper_bound(key);
+			for (const_iterator cit = first_pos; cit != last_pos; ++cit) {
+				int r = (*cit).second.first;
+				double c = (*cit).second.second;
+				v[r] += c * a;
+			}
+		}
+	}
+}
+
+
+@ 
+@<check compatibility of input parameters@>=
+	TL_RAISE_IF(v.length() != nrows(),
+				"Wrong size of output vector in FSSparseTensor::multColumnAndAdd");
+	TL_RAISE_IF(t.dimen() != dimen(),
+				"Wrong dimension of tensor in FSSparseTensor::multColumnAndAdd");
+	TL_RAISE_IF(t.ncols() != 1,
+				"The input tensor is not single-column in FSSparseTensor::multColumnAndAdd");
+
+
+@ 
+@<check that |key| is within the range@>=
+	TL_RAISE_IF(key[0] < 0 || key[key.size()-1] >= nv,
+				"Wrong coordinates of index in FSSparseTensor::multColumnAndAdd");
+
+@ 
+@<|FSSparseTensor::print| code@>=
+void FSSparseTensor::print() const
+{
+	printf("FS Sparse tensor: dim=%d, nv=%d, (%dx%d)\n", dim, nv, nr, nc);
+	SparseTensor::print();
+}
+
+@ This is the same as |@<|FGSTensor| slicing from |FSSparseTensor|@>|. 
+@<|GSSparseTensor| slicing constructor@>=
+GSSparseTensor::GSSparseTensor(const FSSparseTensor& t, const IntSequence& ss,
+							   const IntSequence& coor, const TensorDimens& td)
+	: SparseTensor(td.dimen(), t.nrows(), td.calcFoldMaxOffset()),
+	  tdims(td)
+{
+	@<set |lb| and |ub| to lower and upper bounds of slice indices@>;
+
+	FSSparseTensor::const_iterator lbi = t.getMap().lower_bound(lb);
+	FSSparseTensor::const_iterator ubi = t.getMap().upper_bound(ub);
+	for (FSSparseTensor::const_iterator run = lbi; run != ubi; ++run) {
+		if (lb.lessEq((*run).first) && (*run).first.lessEq(ub)) {
+			IntSequence c((*run).first);
+			c.add(-1, lb);
+			insert(c, (*run).second.first, (*run).second.second);
+		}
+	}
+
+}
+
+@ This is the same as |@<set |lb| and |ub| to lower and upper bounds
+of indices@>| in {\tt gs\_tensor.cpp}, see that file for details.
+
+@<set |lb| and |ub| to lower and upper bounds of slice indices@>=
+	IntSequence s_offsets(ss.size(), 0);
+	for (int i = 1; i < ss.size(); i++)
+		s_offsets[i] = s_offsets[i-1] + ss[i-1];
+
+	IntSequence lb(coor.size());
+	IntSequence ub(coor.size());
+	for (int i = 0; i < coor.size(); i++) {
+		lb[i] = s_offsets[coor[i]];
+		ub[i] = s_offsets[coor[i]] + ss[coor[i]] - 1;
+	}
+
+
+@ 
+@<|GSSparseTensor::insert| code@>=
+void GSSparseTensor::insert(const IntSequence& s, int r, double c)
+{
+	TL_RAISE_IF(! s.less(tdims.getNVX()),
+				"Wrong coordinates of index in GSSparseTensor::insert");
+	SparseTensor::insert(s, r, c);
+}
+
+@ 
+@<|GSSparseTensor::print| code@>=
+void GSSparseTensor::print() const
+{
+	printf("GS Sparse tensor: (%dx%d)\nSymmetry: ", nr, nc);
+	tdims.getSym().print();
+	printf("NVS: ");
+	tdims.getNVS().print();
+	SparseTensor::print();
+}
+
+@ End of {\tt sparse\_tensor.cpp} file.
diff --git a/dynare++/tl/cc/sparse_tensor.hweb b/dynare++/tl/cc/sparse_tensor.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..7df3de2c1c233c229035a30d19984c2bd542c574
--- /dev/null
+++ b/dynare++/tl/cc/sparse_tensor.hweb
@@ -0,0 +1,154 @@
+@q $Id: sparse_tensor.hweb 522 2005-11-25 15:45:54Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Sparse tensor. Start of {\tt sparse\_tensor.h} file.
+
+Here we declare a sparse full and general symmetry tensors with the
+multidimensional index along columns. We implement them as a |multimap|
+associating to each sequence of coordinates |IntSequence| a set of
+pairs (row, number). This is very convenient but not optimal in terms
+of memory consumption. So the implementation can be changed.
+
+The current |multimap| implementation allows insertions.  Another
+advantage of this approach is that we do not need to calculate column
+numbers from the |IntSequence|, since the column is accessed directly
+via the key which is |IntSequence|.
+
+The only operation we need to do with the full symmetry sparse tensor
+is a left multiplication of a row oriented single column tensor. The
+result of such operation is a column of the same size as the sparse
+tensor. Other important operations are slicing operations. We need to
+do sparse and dense slices of full symmetry sparse tensors. In fact,
+the only constructor of general symmetry sparse tensor is slicing from
+the full symmetry sparse.
+
+@s SparseTensor int
+@s FSSparseTensor int
+@s GSSparseTensor int
+
+@c 
+#ifndef SPARSE_TENSOR_H
+#define SPARSE_TENSOR_H
+
+#include "symmetry.h"
+#include "tensor.h"
+#include "gs_tensor.h"
+#include "Vector.h"
+
+#include <map>
+
+using namespace std;
+
+@<|ltseq| predicate@>;
+@<|SparseTensor| class declaration@>;
+@<|FSSparseTensor| class declaration@>;
+@<|GSSparseTensor| class declaration@>;
+
+#endif
+
+@ 
+@<|ltseq| predicate@>=
+struct ltseq {
+	bool operator()(const IntSequence& s1, const IntSequence& s2) const
+		{@+ return s1 < s2;@+}
+};
+
+@ This is a super class of both full symmetry and general symmetry
+sparse tensors. It contains a |multimap| and implements insertions. It
+tracks maximum and minimum row, for which there is an item.
+
+@<|SparseTensor| class declaration@>=
+class SparseTensor {
+public:@;
+	typedef pair<int, double> Item;
+	typedef multimap<IntSequence, Item, ltseq> Map;
+	typedef Map::const_iterator const_iterator;
+protected:@;
+	typedef Map::iterator iterator;
+
+	Map m;
+	const int dim;
+	const int nr;
+	const int nc;
+	int first_nz_row;
+	int last_nz_row;
+public:@;
+	SparseTensor(int d, int nnr, int nnc)
+		: dim(d), nr(nnr), nc(nnc), first_nz_row(nr), last_nz_row(-1) @+{}
+	SparseTensor(const SparseTensor& t)
+		: m(t.m), dim(t.dim), nr(t.nr), nc(t.nc) @+{}
+	virtual ~SparseTensor() @+{}
+	void insert(const IntSequence& s, int r, double c);
+	const Map& getMap() const
+		{@+ return m;@+}
+	int dimen() const
+		{@+ return dim;@+}
+	int nrows() const
+		{@+ return nr;@+}
+	int ncols() const
+		{@+ return nc;@+}
+	double getFillFactor() const
+		{@+ return ((double)m.size())/(nrows()*ncols());@+}
+	double getFoldIndexFillFactor() const;
+	double getUnfoldIndexFillFactor() const;
+	int getNumNonZero() const
+		{@+ return m.size();@+}
+	int getFirstNonZeroRow() const
+		{@+ return first_nz_row;@+}
+	int getLastNonZeroRow() const
+		{@+ return last_nz_row;@+}
+	virtual const Symmetry& getSym() const =0;
+	void print() const;
+	bool isFinite() const;
+}
+
+@ This is a full symmetry sparse tensor. It implements
+|multColumnAndAdd| and in addition to |sparseTensor|, it has |nv|
+(number of variables), and symmetry (basically it is a dimension).
+
+@<|FSSparseTensor| class declaration@>=
+class FSSparseTensor : public SparseTensor {
+public:@;
+	typedef SparseTensor::const_iterator const_iterator;
+private:@;
+	const int nv;
+	const Symmetry sym; 
+public:@;
+	FSSparseTensor(int d, int nvar, int r);
+	FSSparseTensor(const FSSparseTensor& t);
+	void insert(const IntSequence& s, int r, double c);
+	void multColumnAndAdd(const Tensor& t, Vector& v) const;
+	const Symmetry& getSym() const
+		{@+ return sym;@+}
+	int nvar() const
+		{@+ return nv;@+}
+	void print() const;
+};
+
+
+@ This is a general symmetry sparse tensor. It has |TensorDimens| and
+can be constructed as a slice of the full symmetry sparse tensor. The
+slicing constructor takes the same form as the slicing |FGSTensor|
+constructor from full symmetry sparse tensor.
+  
+@<|GSSparseTensor| class declaration@>=
+class GSSparseTensor : public SparseTensor {
+public:@;
+	typedef SparseTensor::const_iterator const_iterator;
+private:@;
+	const TensorDimens tdims;
+public:@;
+	GSSparseTensor(const FSSparseTensor& t, const IntSequence& ss,
+				   const IntSequence& coor, const TensorDimens& td);
+	GSSparseTensor(const GSSparseTensor& t)
+		: SparseTensor(t), tdims(t.tdims) @+{}
+	void insert(const IntSequence& s, int r, double c);
+	const Symmetry& getSym() const
+		{@+ return tdims.getSym();@+}
+	const TensorDimens& getDims() const
+		{@+ return tdims;@+}
+	void print() const;
+	
+};
+
+@ End of {\tt sparse\_tensor.h} file.
diff --git a/dynare++/tl/cc/stack_container.cweb b/dynare++/tl/cc/stack_container.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..3c7753aa36174bb644ef71db8932e9b6d2663148
--- /dev/null
+++ b/dynare++/tl/cc/stack_container.cweb
@@ -0,0 +1,670 @@
+@q $Id: stack_container.cweb 1835 2008-05-19 01:54:48Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt stack\_container.cpp} file.
+
+@c
+#include "stack_container.h"
+#include "pyramid_prod2.h"
+#include "ps_tensor.h"
+
+double FoldedStackContainer::fill_threshold = 0.00005;
+double UnfoldedStackContainer::fill_threshold = 0.00005;
+@<|FoldedStackContainer::multAndAdd| sparse code@>;
+@<|FoldedStackContainer::multAndAdd| dense code@>;
+@<|WorkerFoldMAADense::operator()()| code@>;
+@<|WorkerFoldMAADense| constructor code@>;
+@<|FoldedStackContainer::multAndAddSparse1| code@>;
+@<|WorkerFoldMAASparse1::operator()()| code@>;
+@<|WorkerFoldMAASparse1| constructor code@>;
+@<|FoldedStackContainer::multAndAddSparse2| code@>;
+@<|WorkerFoldMAASparse2::operator()()| code@>;
+@<|WorkerFoldMAASparse2| constructor code@>;
+@<|FoldedStackContainer::multAndAddSparse3| code@>;
+@<|FoldedStackContainer::multAndAddSparse4| code@>;
+@<|WorkerFoldMAASparse4::operator()()| code@>;
+@<|WorkerFoldMAASparse4| constructor code@>;
+@<|FoldedStackContainer::multAndAddStacks| dense code@>;
+@<|FoldedStackContainer::multAndAddStacks| sparse code@>;
+@#
+@<|UnfoldedStackContainer::multAndAdd| sparse code@>;
+@<|UnfoldedStackContainer::multAndAdd| dense code@>;
+@<|WorkerUnfoldMAADense::operator()()| code@>;
+@<|WorkerUnfoldMAADense| constructor code@>;
+@<|UnfoldedStackContainer::multAndAddSparse1| code@>;
+@<|WorkerUnfoldMAASparse1::operator()()| code@>;
+@<|WorkerUnfoldMAASparse1| constructor code@>;
+@<|UnfoldedStackContainer::multAndAddSparse2| code@>;
+@<|WorkerUnfoldMAASparse2::operator()()| code@>;
+@<|WorkerUnfoldMAASparse2| constructor code@>;
+@<|UnfoldedStackContainer::multAndAddStacks| code@>;
+
+
+@ Here we multiply the sparse tensor with the
+|FoldedStackContainer|. We have four implementations,
+|multAndAddSparse1|, |multAndAddSparse2|, |multAndAddSparse3|, and
+|multAndAddSparse4|.  The third is not threaded yet and I expect that
+it is certainly the slowest. The |multAndAddSparse4| exploits the
+sparsity, however, it seems to be still worse than |multAndAddSparse2|
+even for really sparse matrices. On the other hand, it can be more
+efficient than |multAndAddSparse2| for large problems, since it does
+not need that much of memory and can avoid much swapping. Very
+preliminary examination shows that |multAndAddSparse2| is the best in
+terms of time.
+
+@s FSSparseTensor int
+@s IrregTensorHeader int
+@s IrregTensor int
+
+@<|FoldedStackContainer::multAndAdd| sparse code@>=
+void FoldedStackContainer::multAndAdd(const FSSparseTensor& t,
+									  FGSTensor& out) const
+{
+	TL_RAISE_IF(t.nvar() != getAllSize(),
+				"Wrong number of variables of tensor for FoldedStackContainer::multAndAdd");
+	multAndAddSparse2(t, out);
+}
+
+@ Here we perform the Faa Di Bruno step for a given dimension |dim|, and for
+the dense fully symmetric tensor which is scattered in the container
+of general symmetric tensors. The implementation is pretty the same as
+|@<|UnfoldedStackContainer::multAndAdd| dense code@>|.
+
+@<|FoldedStackContainer::multAndAdd| dense code@>=
+void FoldedStackContainer::multAndAdd(int dim, const FGSContainer& c, FGSTensor& out) const
+{
+	TL_RAISE_IF(c.num() != numStacks(),
+				"Wrong symmetry length of container for FoldedStackContainer::multAndAdd");
+
+	THREAD_GROUP@, gr;
+	SymmetrySet ss(dim, c.num());
+	for (symiterator si(ss); !si.isEnd(); ++si) {
+		if (c.check(*si)) {
+			THREAD* worker = new WorkerFoldMAADense(*this, *si, c, out);
+			gr.insert(worker);
+		}
+	}
+	gr.run();
+}
+
+@ This is analogous to |@<|WorkerUnfoldMAADense::operator()()|
+code@>|.
+
+@<|WorkerFoldMAADense::operator()()| code@>=
+void WorkerFoldMAADense::operator()()
+{
+	Permutation iden(dense_cont.num());
+	IntSequence coor(sym, iden.getMap());
+	const FGSTensor* g = dense_cont.get(sym);
+	cont.multAndAddStacks(coor, *g, out, &out);
+}
+
+@ 
+@<|WorkerFoldMAADense| constructor code@>=
+WorkerFoldMAADense::WorkerFoldMAADense(const FoldedStackContainer& container, 
+									   const Symmetry& s,
+									   const FGSContainer& dcontainer,
+									   FGSTensor& outten)
+	: cont(container), sym(s), dense_cont(dcontainer), out(outten)
+{}
+
+@ This is analogous to |@<|UnfoldedStackContainer::multAndAddSparse1|
+code@>|.
+@<|FoldedStackContainer::multAndAddSparse1| code@>=
+void FoldedStackContainer::multAndAddSparse1(const FSSparseTensor& t,
+											 FGSTensor& out) const
+{
+	THREAD_GROUP@, gr;
+	UFSTensor dummy(0, numStacks(), t.dimen());
+	for (Tensor::index ui = dummy.begin(); ui != dummy.end(); ++ui) {
+		THREAD* worker = new WorkerFoldMAASparse1(*this, t, out, ui.getCoor());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ This is analogous to |@<|WorkerUnfoldMAASparse1::operator()()| code@>|.
+The only difference is that instead of |UPSTensor| as a
+result of multiplication of unfolded tensor and tensors from
+containers, we have |FPSTensor| with partially folded permuted
+symmetry.
+
+todo: make slice vertically narrowed according to the fill of t,
+vertically narrow out accordingly.
+
+@<|WorkerFoldMAASparse1::operator()()| code@>=
+void WorkerFoldMAASparse1::operator()()
+{
+	const EquivalenceSet& eset = ebundle.get(out.dimen());
+	const PermutationSet& pset = tls.pbundle->get(t.dimen());
+	Permutation iden(t.dimen());
+
+	UPSTensor slice(t, cont.getStackSizes(), coor,
+					PerTensorDimens(cont.getStackSizes(), coor));
+	for (int iper = 0; iper < pset.getNum(); iper++) {
+		const Permutation& per = pset.get(iper);
+		IntSequence percoor(coor.size());
+		per.apply(coor, percoor);
+		for (EquivalenceSet::const_iterator it = eset.begin();
+			 it != eset.end(); ++it) {
+			if ((*it).numClasses() == t.dimen()) {
+				StackProduct<FGSTensor> sp(cont, *it, out.getSym());
+				if (! sp.isZero(percoor)) {
+					KronProdStack<FGSTensor> kp(sp, percoor);
+					kp.optimizeOrder();
+					const Permutation& oper = kp.getPer();
+					if (Permutation(oper, per) == iden) {
+						FPSTensor fps(out.getDims(), *it, slice, kp);
+						{
+							SYNCHRO@, syn(&out, "WorkerUnfoldMAASparse1");
+							fps.addTo(out);
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+@ 
+@<|WorkerFoldMAASparse1| constructor code@>=
+WorkerFoldMAASparse1::WorkerFoldMAASparse1(const FoldedStackContainer& container,
+										   const FSSparseTensor& ten,
+										   FGSTensor& outten, const IntSequence& c)
+	: cont(container), t(ten), out(outten), coor(c), ebundle(*(tls.ebundle)) @+{}
+
+
+@ Here is the second implementation of sparse folded |multAndAdd|. It
+is pretty similar to implementation of
+|@<|UnfoldedStackContainer::multAndAddSparse2| code@>|. We make a
+dense folded |slice|, and then call folded |multAndAddStacks|, which
+multiplies all the combinations compatible with the slice.
+
+@<|FoldedStackContainer::multAndAddSparse2| code@>=
+void FoldedStackContainer::multAndAddSparse2(const FSSparseTensor& t,
+											 FGSTensor& out) const
+{
+	THREAD_GROUP@, gr;
+	FFSTensor dummy_f(0, numStacks(), t.dimen());
+	for (Tensor::index fi = dummy_f.begin(); fi != dummy_f.end(); ++fi) {
+		THREAD* worker = new WorkerFoldMAASparse2(*this, t, out, fi.getCoor());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ Here we make a sparse slice first and then call |multAndAddStacks|
+if the slice is not empty. If the slice is really sparse, we call
+sparse version of |multAndAddStacks|. What means ``really sparse'' is
+given by |fill_threshold|. It is not tuned yet, a practice shows that
+it must be a really low number, since sparse |multAndAddStacks| is
+much slower than the dense version.
+ 
+Further, we take only nonzero rows of the slice, and accordingly of
+the out tensor. We jump over zero initial rows and drop zero tailing
+rows.
+
+@<|WorkerFoldMAASparse2::operator()()| code@>=
+void WorkerFoldMAASparse2::operator()()
+{
+	GSSparseTensor slice(t, cont.getStackSizes(), coor,
+						 TensorDimens(cont.getStackSizes(), coor));
+	if (slice.getNumNonZero()) {
+		if (slice.getUnfoldIndexFillFactor() > FoldedStackContainer::fill_threshold) {
+			FGSTensor dense_slice(slice);
+			int r1 = slice.getFirstNonZeroRow();
+			int r2 = slice.getLastNonZeroRow();
+			FGSTensor dense_slice1(r1, r2-r1+1, dense_slice);
+			FGSTensor out1(r1, r2-r1+1, out);
+			cont.multAndAddStacks(coor, dense_slice1, out1, &out);
+		} else
+			cont.multAndAddStacks(coor, slice, out, &out);
+	}
+}
+
+@ 
+@<|WorkerFoldMAASparse2| constructor code@>=
+WorkerFoldMAASparse2::WorkerFoldMAASparse2(const FoldedStackContainer& container,
+										   const FSSparseTensor& ten,
+										   FGSTensor& outten, const IntSequence& c)
+	: cont(container), t(ten), out(outten), coor(c)
+{}
+
+
+@ Here is the third implementation of the sparse folded
+|multAndAdd|. It is column-wise implementation, and thus is not a good
+candidate for the best performer.
+
+We go through all columns from the output. For each column we
+calculate folded |sumcol| which is a sum of all appropriate columns
+for all suitable equivalences. So we go through all suitable
+equivalences, for each we construct a |StackProduct| object and
+construct |IrregTensor| for a corresponding column of $z$. The
+|IrregTensor| is an abstraction for Kronecker multiplication of
+stacked columns of the two containers without zeros. Then the column
+is added to |sumcol|. Finally, the |sumcol| is multiplied by the
+sparse tensor.
+
+@<|FoldedStackContainer::multAndAddSparse3| code@>=
+void FoldedStackContainer::multAndAddSparse3(const FSSparseTensor& t,
+											 FGSTensor& out) const
+{
+	const EquivalenceSet& eset = ebundle.get(out.dimen());
+	for (Tensor::index run = out.begin(); run != out.end(); ++run) {
+		Vector outcol(out, *run);
+		FRSingleTensor sumcol(t.nvar(), t.dimen());
+		sumcol.zeros();
+		for (EquivalenceSet::const_iterator it = eset.begin();
+			 it != eset.end(); ++it) {
+			if ((*it).numClasses() == t.dimen()) {
+				StackProduct<FGSTensor> sp(*this, *it, out.getSym());
+				IrregTensorHeader header(sp, run.getCoor());
+				IrregTensor irten(header);
+				irten.addTo(sumcol);
+			}
+		}
+		t.multColumnAndAdd(sumcol, outcol);
+	}
+}
+
+@ Here is the fourth implementation of sparse
+|FoldedStackContainer::multAndAdd|. It is almost equivalent to
+|multAndAddSparse2| with the exception that the |FPSTensor| as a
+result of a product of a slice and Kronecker product of the stack
+derivatives is calculated in the sparse fashion. For further details, see
+|@<|FoldedStackContainer::multAndAddStacks| sparse code@>| and
+|@<|FPSTensor| sparse constructor@>|.
+ 
+@<|FoldedStackContainer::multAndAddSparse4| code@>=
+void FoldedStackContainer::multAndAddSparse4(const FSSparseTensor& t, FGSTensor& out) const
+{
+	THREAD_GROUP@, gr;
+	FFSTensor dummy_f(0, numStacks(), t.dimen());
+	for (Tensor::index fi = dummy_f.begin(); fi != dummy_f.end(); ++fi) {
+		THREAD* worker = new WorkerFoldMAASparse4(*this, t, out, fi.getCoor());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ The |WorkerFoldMAASparse4| is the same as |WorkerFoldMAASparse2|
+with the exception that we call a sparse version of
+|multAndAddStacks|.
+
+@<|WorkerFoldMAASparse4::operator()()| code@>=
+void WorkerFoldMAASparse4::operator()()
+{
+	GSSparseTensor slice(t, cont.getStackSizes(), coor,
+						 TensorDimens(cont.getStackSizes(), coor)); 
+	if (slice.getNumNonZero())
+		cont.multAndAddStacks(coor, slice, out, &out);
+}
+
+@ 
+@<|WorkerFoldMAASparse4| constructor code@>=
+WorkerFoldMAASparse4::WorkerFoldMAASparse4(const FoldedStackContainer& container,
+										   const FSSparseTensor& ten,
+										   FGSTensor& outten, const IntSequence& c)
+	: cont(container), t(ten), out(outten), coor(c)
+{}
+
+
+@ This is almost the same as
+|@<|UnfoldedStackContainer::multAndAddStacks| code@>|. The only
+difference is that we do not construct a |UPSTensor| from
+|KronProdStack|, but we construct partially folded permuted
+symmetry |FPSTensor|. Note that the tensor |g| must be unfolded
+in order to be able to multiply with unfolded rows of Kronecker
+product. However, columns of such a product are partially
+folded giving a rise to the |FPSTensor|.
+
+@<|FoldedStackContainer::multAndAddStacks| dense code@>=
+void FoldedStackContainer::multAndAddStacks(const IntSequence& coor,
+											const FGSTensor& g,
+											FGSTensor& out, const void* ad) const
+{
+	const EquivalenceSet& eset = ebundle.get(out.dimen());
+
+	UGSTensor ug(g);
+	UFSTensor dummy_u(0, numStacks(), g.dimen());
+	for (Tensor::index ui = dummy_u.begin(); ui != dummy_u.end(); ++ui) {
+		IntSequence tmp(ui.getCoor());
+		tmp.sort();
+		if (tmp == coor) {
+			Permutation sort_per(ui.getCoor());
+			sort_per.inverse();
+			for (EquivalenceSet::const_iterator it = eset.begin();
+				 it != eset.end(); ++it) {
+				if ((*it).numClasses() == g.dimen()) {
+					StackProduct<FGSTensor> sp(*this, *it, sort_per, out.getSym());
+					if (! sp.isZero(coor)) {
+						KronProdStack<FGSTensor> kp(sp, coor);
+						if (ug.getSym().isFull())
+							kp.optimizeOrder();
+						FPSTensor fps(out.getDims(), *it, sort_per, ug, kp);
+						{
+							SYNCHRO@, syn(ad, "multAndAddStacks");
+							fps.addTo(out);
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+@ This is almost the same as
+|@<|FoldedStackContainer::multAndAddStacks| dense code@>|. The only
+difference is that the Kronecker product of the stacks is multiplied
+with sparse slice |GSSparseTensor| (not dense slice |FGSTensor|). The
+multiplication is done in |@<|FPSTensor| sparse constructor@>|.
+
+@<|FoldedStackContainer::multAndAddStacks| sparse code@>=
+void FoldedStackContainer::multAndAddStacks(const IntSequence& coor,
+											const GSSparseTensor& g,
+											FGSTensor& out, const void* ad) const
+{
+	const EquivalenceSet& eset = ebundle.get(out.dimen());
+	UFSTensor dummy_u(0, numStacks(), g.dimen());
+	for (Tensor::index ui = dummy_u.begin(); ui != dummy_u.end(); ++ui) {
+		IntSequence tmp(ui.getCoor());
+		tmp.sort();
+		if (tmp == coor) {
+			Permutation sort_per(ui.getCoor());
+			sort_per.inverse();
+			for (EquivalenceSet::const_iterator it = eset.begin();
+				 it != eset.end(); ++it) {
+				if ((*it).numClasses() == g.dimen()) {
+					StackProduct<FGSTensor> sp(*this, *it, sort_per, out.getSym());
+					if (! sp.isZero(coor)) {
+						KronProdStack<FGSTensor> kp(sp, coor);
+						FPSTensor fps(out.getDims(), *it, sort_per, g, kp);
+						{
+							SYNCHRO@, syn(ad, "multAndAddStacks");
+							fps.addTo(out);
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+@ Here we simply call either |multAndAddSparse1| or
+|multAndAddSparse2|. The first one allows for optimization of
+Kronecker products, so it seems to be more efficient.
+
+@<|UnfoldedStackContainer::multAndAdd| sparse code@>=
+void UnfoldedStackContainer::multAndAdd(const FSSparseTensor& t,
+										UGSTensor& out) const
+{
+	TL_RAISE_IF(t.nvar() != getAllSize(),
+				"Wrong number of variables of tensor for UnfoldedStackContainer::multAndAdd");
+	multAndAddSparse2(t, out);
+}
+
+@ Here we implement the formula for stacks for fully symmetric tensor
+scattered in a number of general symmetry tensors contained in a given
+container. The implementations is pretty the same as in
+|multAndAddSparse2| but we do not do the slices of sparse tensor, but
+only a lookup to the container.
+
+This means that we do not iterate through a dummy folded tensor to
+obtain folded coordinates of stacks, rather we iterate through all
+symmetries contained in the container and the coordinates of stacks
+are obtained as unfolded identity sequence via the symmetry. The
+reason of doing this is that we are unable to calculate symmetry from
+stack coordinates as easily as stack coordinates from the symmetry.
+
+@<|UnfoldedStackContainer::multAndAdd| dense code@>=
+void UnfoldedStackContainer::multAndAdd(int dim, const UGSContainer& c,
+										UGSTensor& out) const
+{
+	TL_RAISE_IF(c.num() != numStacks(),
+				"Wrong symmetry length of container for UnfoldedStackContainer::multAndAdd");
+
+	THREAD_GROUP@, gr;
+	SymmetrySet ss(dim, c.num());
+	for (symiterator si(ss); !si.isEnd(); ++si) {
+		if (c.check(*si)) {
+			THREAD* worker = new WorkerUnfoldMAADense(*this, *si, c, out);
+			gr.insert(worker);
+		}
+	}
+	gr.run();
+}
+
+@ 
+@<|WorkerUnfoldMAADense::operator()()| code@>=
+void WorkerUnfoldMAADense::operator()()
+{
+	Permutation iden(dense_cont.num());
+	IntSequence coor(sym, iden.getMap());
+	const UGSTensor* g = dense_cont.get(sym);
+	cont.multAndAddStacks(coor, *g, out, &out);
+}
+
+@ 
+@<|WorkerUnfoldMAADense| constructor code@>=
+WorkerUnfoldMAADense::WorkerUnfoldMAADense(const UnfoldedStackContainer& container,
+										   const Symmetry& s,
+										   const UGSContainer& dcontainer,
+										   UGSTensor& outten)
+	: cont(container), sym(s), dense_cont(dcontainer), out(outten)@+ {}
+
+
+@ Here we implement the formula for unfolded tensors. If, for instance,
+a coordinate $z$ of a tensor $\left[f_{z^2}\right]$ is partitioned as
+$z=[a, b]$, then we perform the following:
+$$
+\eqalign{
+\left[f_{z^2}\right]\left(\sum_c\left[\matrix{a_{c(x)}\cr b_{c(y)}}\right]
+\otimes\left[\matrix{a_{c(y)}\cr b_{c(y)}}\right]\right)=&
+\left[f_{aa}\right]\left(\sum_ca_{c(x)}\otimes a_{c(y)}\right)+
+\left[f_{ab}\right]\left(\sum_ca_{c(x)}\otimes b_{c(y)}\right)+\cr
+&\left[f_{ba}\right]\left(\sum_cb_{c(x)}\otimes a_{c(y)}\right)+
+ \left[f_{bb}\right]\left(\sum_cb_{c(x)}\otimes b_{c(y)}\right)\cr
+}
+$$
+This is exactly what happens here. The code is clear. It goes through
+all combinations of stacks, and each thread is responsible for
+operation for the slice corresponding to the combination of the stacks.
+
+@<|UnfoldedStackContainer::multAndAddSparse1| code@>=
+void UnfoldedStackContainer::multAndAddSparse1(const FSSparseTensor& t,
+											   UGSTensor& out) const
+{
+	THREAD_GROUP@, gr;
+	UFSTensor dummy(0, numStacks(), t.dimen());
+	for (Tensor::index ui = dummy.begin(); ui != dummy.end(); ++ui) {
+		THREAD* worker = new WorkerUnfoldMAASparse1(*this, t, out, ui.getCoor());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ This does a step of |@<|UnfoldedStackContainer::multAndAddSparse1| code@>| for
+a given coordinates. First it makes the slice of the given stack coordinates.
+Then it multiplies everything what should be multiplied with the slice.
+That is it goes through all equivalences, creates |StackProduct|, then
+|KronProdStack|, which is added to |out|. So far everything is clear.
+
+However, we want to use optimized |KronProdAllOptim| to minimize
+a number of flops and memory needed in the Kronecker product. So we go
+through all permutations |per|, permute the coordinates to get
+|percoor|, go through all equivalences, and make |KronProdStack| and
+optimize it. The result of optimization is a permutation |oper|. Now,
+we multiply the Kronecker product with the slice, only if the slice
+has the same ordering of coordinates as the Kronecker product
+|KronProdStack|. However, it is not perfectly true. Since we go
+through {\bf all} permutations |per|, there might be two different
+permutations leading to the same ordering in |KronProdStack| and thus
+the same ordering in the optimized |KronProdStack|. The two cases
+would be counted twice, which is wrong. That is why we do not
+condition on $\hbox{coor}\circ\hbox{oper}\circ\hbox{per} =
+\hbox{coor}$, but we condition on
+$\hbox{oper}\circ\hbox{per}=\hbox{id}$. In this way, we rule out
+permutations |per| leading to the same ordering of stacks when
+applied on |coor|.
+
+todo: vertically narrow slice and out according to the fill in t.
+
+@<|WorkerUnfoldMAASparse1::operator()()| code@>=
+void WorkerUnfoldMAASparse1::operator()()
+{
+	const EquivalenceSet& eset = ebundle.get(out.dimen());
+	const PermutationSet& pset = tls.pbundle->get(t.dimen());
+	Permutation iden(t.dimen());
+
+	UPSTensor slice(t, cont.getStackSizes(), coor,
+					PerTensorDimens(cont.getStackSizes(), coor));
+	for (int iper = 0; iper < pset.getNum(); iper++) {
+		const Permutation& per = pset.get(iper);
+		IntSequence percoor(coor.size());
+		per.apply(coor, percoor);
+		for (EquivalenceSet::const_iterator it = eset.begin();
+			 it != eset.end(); ++it) {
+			if ((*it).numClasses() == t.dimen()) {
+				StackProduct<UGSTensor> sp(cont, *it, out.getSym());
+				if (! sp.isZero(percoor)) {
+					KronProdStack<UGSTensor> kp(sp, percoor);
+					kp.optimizeOrder();
+					const Permutation& oper = kp.getPer();
+					if (Permutation(oper, per) == iden) {
+						UPSTensor ups(out.getDims(), *it, slice, kp);
+						{
+							SYNCHRO@, syn(&out, "WorkerUnfoldMAASparse1");
+							ups.addTo(out);
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+@ 
+@<|WorkerUnfoldMAASparse1| constructor code@>=
+WorkerUnfoldMAASparse1::WorkerUnfoldMAASparse1(const UnfoldedStackContainer& container,
+											   const FSSparseTensor& ten,
+											   UGSTensor& outten, const IntSequence& c)
+	: cont(container), t(ten), out(outten), coor(c), ebundle(*(tls.ebundle)) @+{}
+
+
+@ In here we implement the formula by a bit different way. We use the
+fact, using notation of |@<|UnfoldedStackContainer::multAndAddSparse2|
+code@>|, that
+$$
+\left[f_{ba}\right]\left(\sum_cb_{c(x)}\otimes a_{c(y)}\right)=
+\left[f_{ab}\right]\left(\sum_ca_{c(y)}\otimes b_{c(b)}\right)\cdot P
+$$
+where $P$ is a suitable permutation of columns. The permutation
+corresponds to (in this example) a swap of $a$ and $b$. An advantage
+of this approach is that we do not need |UPSTensor| for $f_{ba}$, and
+thus we decrease the number of needed slices.
+
+So we go through all folded indices of stack coordinates, then for
+each such index |fi| we make a slice and call |multAndAddStacks|. This
+goes through all corresponding unfolded indices to perform the
+formula. Each unsorted (unfold) index implies a sorting permutation
+|sort_per| which must be used to permute stacks in |StackProduct|, and
+permute equivalence classes when |UPSTensor| is formed. In this way
+the column permutation $P$ from the formula is factored to the
+permutation of |UPSTensor|.
+ 
+@<|UnfoldedStackContainer::multAndAddSparse2| code@>=
+void UnfoldedStackContainer::multAndAddSparse2(const FSSparseTensor& t,
+											   UGSTensor& out) const
+{
+	THREAD_GROUP@, gr;
+	FFSTensor dummy_f(0, numStacks(), t.dimen());
+	for (Tensor::index fi = dummy_f.begin(); fi != dummy_f.end(); ++fi) {
+		THREAD* worker = new WorkerUnfoldMAASparse2(*this, t, out, fi.getCoor());
+		gr.insert(worker);
+	}
+	gr.run();
+}
+
+@ This does a step of |@<|UnfoldedStackContainer::multAndAddSparse2| code@>| for
+a given coordinates.
+
+todo: implement |multAndAddStacks| for sparse slice as
+|@<|FoldedStackContainer::multAndAddStacks| sparse code@>| and do this method as
+|@<|WorkerFoldMAASparse2::operator()()| code@>|.
+
+@<|WorkerUnfoldMAASparse2::operator()()| code@>=
+void WorkerUnfoldMAASparse2::operator()()
+{
+	GSSparseTensor slice(t, cont.getStackSizes(), coor,
+						 TensorDimens(cont.getStackSizes(), coor));
+	if (slice.getNumNonZero()) {
+		FGSTensor fslice(slice);
+		UGSTensor dense_slice(fslice);
+		int r1 = slice.getFirstNonZeroRow();
+		int r2 = slice.getLastNonZeroRow();
+		UGSTensor dense_slice1(r1, r2-r1+1, dense_slice);
+		UGSTensor out1(r1, r2-r1+1, out);
+		
+		cont.multAndAddStacks(coor, dense_slice1, out1, &out);
+	}
+}
+
+@ 
+@<|WorkerUnfoldMAASparse2| constructor code@>=
+WorkerUnfoldMAASparse2::WorkerUnfoldMAASparse2(const UnfoldedStackContainer& container,
+											   const FSSparseTensor& ten,
+											   UGSTensor& outten, const IntSequence& c)
+	: cont(container), t(ten), out(outten), coor(c) @+{}
+
+
+@ For a given unfolded coordinates of stacks |fi|, and appropriate
+tensor $g$, whose symmetry is a symmetry of |fi|, the method
+contributes to |out| all tensors in unfolded stack formula involving
+stacks chosen by |fi|.
+
+We go through all |ui| coordinates which yield |fi| after sorting. We
+construct a permutation |sort_per| which sorts |ui| to |fi|. We go
+through all appropriate equivalences, and construct |StackProduct|
+from equivalence classes permuted by |sort_per|, then |UPSTensor| with
+implied permutation of columns by the permuted equivalence by
+|sort_per|. The |UPSTensor| is then added to |out|.
+
+We cannot use here the optimized |KronProdStack|, since the symmetry
+of |UGSTensor& g| prescribes the ordering of the stacks. However, if
+|g| is fully symmetric, we can do the optimization harmlessly.
+
+@<|UnfoldedStackContainer::multAndAddStacks| code@>=
+void UnfoldedStackContainer::multAndAddStacks(const IntSequence& fi,
+											  const UGSTensor& g,
+											  UGSTensor& out, const void* ad) const
+{
+	const EquivalenceSet& eset = ebundle.get(out.dimen());
+
+	UFSTensor dummy_u(0, numStacks(), g.dimen());
+	for (Tensor::index ui = dummy_u.begin(); ui != dummy_u.end(); ++ui) {
+		IntSequence tmp(ui.getCoor());
+		tmp.sort();
+		if (tmp == fi) {
+			Permutation sort_per(ui.getCoor());
+			sort_per.inverse();
+			for (EquivalenceSet::const_iterator it = eset.begin();
+				 it != eset.end(); ++it) {
+				if ((*it).numClasses() == g.dimen()) {
+					StackProduct<UGSTensor> sp(*this, *it, sort_per, out.getSym());
+					if (! sp.isZero(fi)) {
+						KronProdStack<UGSTensor> kp(sp, fi);
+						if (g.getSym().isFull())
+							kp.optimizeOrder();
+						UPSTensor ups(out.getDims(), *it, sort_per, g, kp);
+						{
+							SYNCHRO@, syn(ad, "multAndAddStacks");
+							ups.addTo(out);
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+@ End of {\tt stack\_container.cpp} file.
diff --git a/dynare++/tl/cc/stack_container.hweb b/dynare++/tl/cc/stack_container.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..77ca511bb12068d899c4df233a9492ee0fdab2ad
--- /dev/null
+++ b/dynare++/tl/cc/stack_container.hweb
@@ -0,0 +1,771 @@
+@q $Id: stack_container.hweb 745 2006-05-09 13:20:00Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Stack of containers. Start of {\tt stack\_container.h} file.
+
+Here we develop abstractions for stacked containers of tensors. For
+instance, in perturbation methods for SDGE we need function
+$$z(y,u,u',\sigma)=\left[\matrix{G(y,u,u',\sigma)\cr g(y,u,\sigma)\cr y\cr u}\right]$$
+and we need to calculate one step of Faa Di Bruno formula
+$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_l}=\left[f_{z^l}\right]_{\beta_1\ldots\beta_l}
+\sum_{c\in M_{l,k}}\prod_{m=1}^l\left[z_{s^k(c_m)}\right]^{\beta_m}_{c_m(\alpha)}$$
+where we have containers for derivatives of $G$ and $g$.
+
+The main purpose of this file is to define abstractions for stack of
+containers and possibly raw variables, and code |multAndAdd| method
+calculating (one step of) the Faa Di Bruno formula for folded and
+unfolded tensors. Note also, that tensors $\left[f_{z^l}\right]$ are
+sparse.
+
+The abstractions are built as follows. At the top, there is an
+interface describing stack of columns. It contains pure virtual
+methods needed for manipulating the container stack. For technical
+reasons it is a template. Both versions (folded, and unfolded) provide
+all interface necessary for implementation of |multAndAdd|. The second
+way of inheritance is first general implementation of the interface
+|StackContainer|, and then specific (|ZContainer| for our specific
+$z$). The only method which is virtual also after |StackContainer| is
+|getType|, which is implemented in the specialization and determines
+behaviour of the stack. The complete classes are obtained by
+inheriting from the both branches, as it is drawn below:
+
+\def\drawpenta#1#2#3#4#5{%
+\hbox{$
+\hgrid=40pt\vgrid=20pt%
+\sarrowlength=25pt%
+\gridcommdiag{%
+&&\hbox{#1}&&\cr
+&\llap{virtual}\arrow(-1,-1)&&\arrow(1,-1)\rlap{virtual}&\cr
+\hbox{#2}&&&&\hbox{#3}\cr
+\arrow(0,-1)&&&&\cr
+\hbox{#4}&&&
+{\multiply\sarrowlength by 63\divide\sarrowlength by 50\arrow(-1,-2)}&\cr
+&\arrow(1,-1)&&&\cr
+&&\hbox{#5}&&\cr
+}$}}
+
+\centerline{
+\drawpenta{|StackContainerInterface<FGSTensor>|}{|StackContainer<FGSTensor>|}%
+	      {|FoldedStackContainer|}{|ZContainer<FGSTensor>|}{|FoldedZContainer|}
+}
+
+\centerline{
+\drawpenta{|StackContainerInterface<UGSTensor>|}{|StackContainer<UGSTensor>|}%
+	      {|UnfoldedStackContainer|}{|ZContainer<UGSTensor>|}{|UnfoldedZContainer|}
+}
+
+We have also two supporting classes |StackProduct| and |KronProdStack|
+and a number of worker classes used as threads.
+
+@s StackContainerInterface int
+@s StackContainer int
+@s ZContainer int
+@s FoldedStackContainer int
+@s UnfoldedStackContainer int
+@s FoldedZContainer int
+@s UnfoldedZContainer int
+@s WorkerFoldMAADense int
+@s WorkerFoldMAASparse1 int
+@s WorkerFoldMAASparse2 int
+@s WorkerFoldMAASparse4 int
+@s WorkerUnfoldMAADense int
+@s WorkerUnfoldMAASparse1 int
+@s WorkerUnfoldMAASparse2 int
+@s GContainer int
+@s FoldedGContainer int
+@s UnfoldedGContainer int
+@s StackProduct int
+@s KronProdStack int
+
+@c
+#ifndef STACK_CONTAINER_H
+#define STACK_CONTAINER_H
+
+#include "int_sequence.h"
+#include "equivalence.h"
+#include "tl_static.h"
+#include "t_container.h"
+#include "kron_prod.h"
+#include "permutation.h"
+#include "sthread.h"
+
+@<|StackContainerInterface| class declaration@>;
+@<|StackContainer| class declaration@>;
+@<|FoldedStackContainer| class declaration@>;
+@<|UnfoldedStackContainer| class declaration@>;
+@<|ZContainer| class declaration@>;
+@<|FoldedZContainer| class declaration@>;
+@<|UnfoldedZContainer| class declaration@>;
+@<|GContainer| class declaration@>;
+@<|FoldedGContainer| class declaration@>;
+@<|UnfoldedGContainer| class declaration@>;
+@<|StackProduct| class declaration@>;
+@<|KronProdStack| class declaration@>;
+@<|WorkerFoldMAADense| class declaration@>;
+@<|WorkerFoldMAASparse1| class declaration@>;
+@<|WorkerFoldMAASparse2| class declaration@>;
+@<|WorkerFoldMAASparse4| class declaration@>;
+@<|WorkerUnfoldMAADense| class declaration@>;
+@<|WorkerUnfoldMAASparse1| class declaration@>;
+@<|WorkerUnfoldMAASparse2| class declaration@>;
+
+#endif
+
+@ Here is the general interface to stack container. The subclasses
+maintain |IntSequence| of stack sizes, i.e. size of $G$, $g$, $y$, and
+$u$. Then a convenience |IntSequence| of stack offsets. Then vector of
+pointers to containers, in our example $G$, and $g$.
+
+A non-virtual subclass must implement |getType| which determines
+dependency of stack items on symmetries. There are three possible types
+for a symmetry. Either the stack item derivative wrt. the symmetry is
+a matrix, or a unit matrix, or zero.
+
+Method |isZero| returns true if the derivative of a given stack item
+wrt. to given symmetry is zero as defined by |getType| or the
+derivative is not present in the container. In this way, we can
+implement the formula conditional some of the tensors are zero, which
+is not true (they are only missing).
+
+Method |createPackedColumn| returns a vector of stack derivatives with
+respect to the given symmetry and of the given column, where all zeros
+from zero types, or unit matrices are deleted. See {\tt
+kron\_prod2.hweb} for explanation.
+
+@<|StackContainerInterface| class declaration@>=
+template <class _Ttype>@;
+class StackContainerInterface {
+public:@;
+	typedef TensorContainer<_Ttype> _Ctype;
+	typedef enum {@+ matrix, unit, zero@+} itype;
+protected:@;
+	const EquivalenceBundle& ebundle;
+public:@;
+	StackContainerInterface()
+		: ebundle(*(tls.ebundle))@+ {}
+	virtual ~StackContainerInterface()@+ {}
+	virtual const IntSequence& getStackSizes() const =0;
+	virtual IntSequence& getStackSizes() =0;
+	virtual const IntSequence& getStackOffsets() const =0;
+	virtual IntSequence& getStackOffsets() =0;
+	virtual int numConts() const =0;
+	virtual const _Ctype* getCont(int i) const =0;
+	virtual itype getType(int i, const Symmetry& s) const =0;
+	virtual int numStacks() const =0;
+	virtual bool isZero(int i, const Symmetry& s) const =0;
+	virtual const _Ttype* getMatrix(int i, const Symmetry& s) const =0;
+	virtual int getLengthOfMatrixStacks(const Symmetry& s) const =0;
+	virtual int getUnitPos(const Symmetry& s) const =0;
+	virtual Vector* createPackedColumn(const Symmetry& s,
+									   const IntSequence& coor,
+									   int& iu) const =0;
+	int getAllSize() const
+		{@+ return getStackOffsets()[numStacks()-1]
+			 + getStackSizes()[numStacks()-1];@+}
+};
+
+@ Here is |StackContainer|, which implements almost all interface
+|StackContainerInterface| but one method |getType| which is left for
+implementation to specializations.
+
+@<|StackContainer| class declaration@>=
+template <class _Ttype>@;
+class StackContainer : virtual public StackContainerInterface<_Ttype> {
+public:@;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	typedef typename StackContainerInterface<_Ttype>::_Ctype _Ctype;
+	typedef typename StackContainerInterface<_Ttype>::itype itype;
+protected:@;
+	int num_conts;
+	IntSequence stack_sizes;
+	IntSequence stack_offsets;
+	const _Ctype** const conts;
+public:@;
+	StackContainer(int ns, int nc)
+		: num_conts(nc), stack_sizes(ns, 0), stack_offsets(ns, 0),
+		  conts(new const _Ctype*[nc])@+ {}
+	virtual ~StackContainer() @+{delete [] conts;}
+	const IntSequence& getStackSizes() const
+		{@+ return stack_sizes;@+}
+	IntSequence& getStackSizes()
+		{@+ return stack_sizes;@+}
+	const IntSequence& getStackOffsets() const
+		{@+ return stack_offsets;@+}
+	IntSequence& getStackOffsets()
+		{@+ return stack_offsets;@+}
+	int numConts() const
+		{@+ return num_conts;}
+	const _Ctype* getCont(int i) const
+		{@+ return conts[i];@+}
+	virtual itype getType(int i, const Symmetry& s) const =0;
+	int numStacks() const
+		{@+ return stack_sizes.size();@+}
+	@<|StackContainer::isZero| code@>;
+	@<|StackContainer::getMatrix| code@>;
+	@<|StackContainer::getLengthOfMatrixStacks| code@>;
+	@<|StackContainer::getUnitPos| code@>;
+	@<|StackContainer::createPackedColumn| code@>;
+protected:@;
+	@<|StackContainer::calculateOffsets| code@>;
+};
+
+@ 
+@<|StackContainer::isZero| code@>=
+bool isZero(int i, const Symmetry& s) const
+{
+	TL_RAISE_IF(i < 0 || i >= numStacks(),
+				"Wrong index to stack in StackContainer::isZero.");
+	return (getType(i, s) == _Stype::zero ||
+			(getType(i, s) == _Stype::matrix && !conts[i]->check(s)));
+}
+
+@ 
+@<|StackContainer::getMatrix| code@>=
+const _Ttype* getMatrix(int i, const Symmetry& s) const
+{
+	TL_RAISE_IF(isZero(i, s) || getType(i, s) == _Stype::unit,
+				"Matrix is not returned in StackContainer::getMatrix");
+	return conts[i]->get(s);
+}
+
+@ 
+@<|StackContainer::getLengthOfMatrixStacks| code@>=
+int getLengthOfMatrixStacks(const Symmetry& s) const
+{
+	int res = 0;
+	int i = 0;
+	while (i < numStacks() && getType(i, s) == _Stype::matrix)
+		res += stack_sizes[i++];
+	return res;
+}
+
+
+@ 
+@<|StackContainer::getUnitPos| code@>=
+int getUnitPos(const Symmetry& s) const
+{
+	if (s.dimen() != 1)
+		return -1;
+	int i = numStacks()-1; 
+	while (i >= 0 && getType(i, s) != _Stype::unit)
+		i--;
+	return i;
+}
+
+
+@ 
+@<|StackContainer::createPackedColumn| code@>=
+Vector* createPackedColumn(const Symmetry& s,
+						   const IntSequence& coor, int& iu) const
+{
+	TL_RAISE_IF(s.dimen() != coor.size(),
+				"Incompatible coordinates for symmetry in StackContainer::createPackedColumn");
+
+	int len = getLengthOfMatrixStacks(s);
+	iu = -1;
+	int i = 0;
+	if (-1 != (i = getUnitPos(s))) {
+		iu = stack_offsets[i] + coor[0];
+		len++;
+	}
+
+	Vector* res = new Vector(len);
+	i = 0;
+	while (i < numStacks() && getType(i, s) == _Stype::matrix) {
+		const _Ttype* t = getMatrix(i, s);
+		Tensor::index ind(t, coor);
+		Vector subres(*res, stack_offsets[i], stack_sizes[i]);
+		subres = ConstVector(ConstGeneralMatrix(*t), *ind);
+		i++;
+	}
+	if (iu != -1)
+		(*res)[len-1] = 1;
+
+	return res;
+}
+
+@ 
+@<|StackContainer::calculateOffsets| code@>=
+void calculateOffsets()
+{
+	stack_offsets[0] = 0;
+	for (int i = 1; i < stack_offsets.size(); i++)
+		stack_offsets[i] = stack_offsets[i-1] + stack_sizes[i-1];
+}
+
+@ 
+@<|FoldedStackContainer| class declaration@>=
+class WorkerFoldMAADense;
+class WorkerFoldMAASparse1;
+class WorkerFoldMAASparse2;
+class WorkerFoldMAASparse4;
+class FoldedStackContainer : virtual public StackContainerInterface<FGSTensor> {
+	friend class WorkerFoldMAADense;
+	friend class WorkerFoldMAASparse1;
+	friend class WorkerFoldMAASparse2;
+	friend class WorkerFoldMAASparse4;
+public:@;
+	static double fill_threshold;
+	void multAndAdd(int dim, const TensorContainer<FSSparseTensor>& c ,
+					FGSTensor& out) const
+		{@+ if (c.check(Symmetry(dim))) multAndAdd(*(c.get(Symmetry(dim))), out);@+}
+	void multAndAdd(const FSSparseTensor& t, FGSTensor& out) const;
+	void multAndAdd(int dim, const FGSContainer& c, FGSTensor& out) const;
+protected:@;
+	void multAndAddSparse1(const FSSparseTensor& t, FGSTensor& out) const;
+	void multAndAddSparse2(const FSSparseTensor& t, FGSTensor& out) const;
+	void multAndAddSparse3(const FSSparseTensor& t, FGSTensor& out) const;
+	void multAndAddSparse4(const FSSparseTensor& t, FGSTensor& out) const;
+	void multAndAddStacks(const IntSequence& fi, const FGSTensor& g,
+						  FGSTensor& out, const void* ad) const;
+	void multAndAddStacks(const IntSequence& fi, const GSSparseTensor& g,
+						  FGSTensor& out, const void* ad) const;
+};
+
+
+@ 
+@<|UnfoldedStackContainer| class declaration@>=
+class WorkerUnfoldMAADense;
+class WorkerUnfoldMAASparse1;
+class WorkerUnfoldMAASparse2;
+class UnfoldedStackContainer : virtual public StackContainerInterface<UGSTensor> {
+	friend class WorkerUnfoldMAADense;
+	friend class WorkerUnfoldMAASparse1;
+	friend class WorkerUnfoldMAASparse2;
+public:@;
+	static double fill_threshold;
+	void multAndAdd(int dim, const TensorContainer<FSSparseTensor>& c ,
+					UGSTensor& out) const
+		{@+ if (c.check(Symmetry(dim))) multAndAdd(*(c.get(Symmetry(dim))), out);@+}
+	void multAndAdd(const FSSparseTensor& t, UGSTensor& out) const;
+	void multAndAdd(int dim, const UGSContainer& c, UGSTensor& out) const;
+protected:@;
+	void multAndAddSparse1(const FSSparseTensor& t, UGSTensor& out) const;
+	void multAndAddSparse2(const FSSparseTensor& t, UGSTensor& out) const;
+	void multAndAddStacks(const IntSequence& fi, const UGSTensor& g,
+						  UGSTensor& out, const void* ad) const;
+};
+
+@ Here is the specialization of the |StackContainer|. We implement
+here the $z$ needed in SDGE context. We implement |getType| and define
+a constructor feeding the data and sizes.
+
+Note that it has two containers, the first is dependent on four
+variables $G(y^*,u,u',\sigma)$, and the second dependent on three
+variables $g(y^*,u,\sigma)$. So that we would be able to stack them,
+we make the second container $g$ be dependent on four variables, the
+third being $u'$ a dummy and always returning zero if dimension of
+$u'$ is positive.
+
+@<|ZContainer| class declaration@>=
+template <class _Ttype>@;
+class ZContainer : public StackContainer<_Ttype> {
+public:@;
+	typedef StackContainer<_Ttype> _Tparent;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	typedef typename _Tparent::_Ctype _Ctype;
+	typedef typename _Tparent::itype itype;
+	ZContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng,
+			   int ny, int nu)
+		: _Tparent(4, 2)
+		{
+			_Tparent::stack_sizes[0] = ngss; _Tparent::stack_sizes[1] = ng;
+			_Tparent::stack_sizes[2] = ny; _Tparent::stack_sizes[3] = nu;
+			_Tparent::conts[0] = gss;
+			_Tparent::conts[1] = g;
+			_Tparent::calculateOffsets();
+		}
+
+	@<|ZContainer::getType| code@>;
+};
+
+@ Here we say, what happens if we derive $z$. recall the top of the
+file, how $z$ looks, and code is clear.
+
+@<|ZContainer::getType| code@>=
+itype getType(int i, const Symmetry& s) const
+{
+	if (i == 0)
+		return _Stype::matrix;
+	if (i == 1)
+		if (s[2] > 0)
+			return _Stype::zero;
+		else
+			return _Stype::matrix;
+	if (i == 2)
+		if (s == Symmetry(1,0,0,0))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+	if (i == 3)
+		if (s == Symmetry(0,1,0,0))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+
+	TL_RAISE("Wrong stack index in ZContainer::getType");
+	return _Stype::zero;
+}
+
+@ 
+@<|FoldedZContainer| class declaration@>=
+class FoldedZContainer : public ZContainer<FGSTensor>,
+						 public FoldedStackContainer {
+public:@;
+	typedef TensorContainer<FGSTensor> _Ctype;
+	FoldedZContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng,
+					 int ny, int nu)
+		: ZContainer<FGSTensor>(gss, ngss, g, ng, ny, nu)@+ {}
+};
+
+@ 
+@<|UnfoldedZContainer| class declaration@>=
+class UnfoldedZContainer : public ZContainer<UGSTensor>,
+						   public UnfoldedStackContainer {
+public:@;
+	typedef TensorContainer<UGSTensor> _Ctype;
+	UnfoldedZContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng,
+					   int ny, int nu)
+		: ZContainer<UGSTensor>(gss, ngss, g, ng, ny, nu)@+ {}
+};
+
+@ Here we have another specialization of container used in context of
+SDGE. We define a container for
+$$G(y,u,u',\sigma)=g^{**}(g^*(y,u,\sigma),u',\sigma)$$
+
+For some reason, the symmetry of $g^{**}$ has length $4$ although it
+is really dependent on three variables. (To now the reason, consult
+|@<|ZContainer| class declaration@>|.) So, it has four stack, the
+third one is dummy, and always returns zero. The first stack
+corresponds to a container of $g^*$.
+
+@<|GContainer| class declaration@>=
+template <class _Ttype>@;
+class GContainer : public StackContainer<_Ttype> {
+public:@;
+	typedef StackContainer<_Ttype> _Tparent;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	typedef typename StackContainer<_Ttype>::_Ctype _Ctype;
+	typedef typename StackContainer<_Ttype>::itype itype;
+	GContainer(const _Ctype* gs, int ngs, int nu)
+		: StackContainer<_Ttype>(4, 1)
+		{
+			_Tparent::stack_sizes[0] = ngs; _Tparent::stack_sizes[1] = nu;
+			_Tparent::stack_sizes[2] = nu; _Tparent::stack_sizes[3] = 1;
+			_Tparent::conts[0] = gs;
+			_Tparent::calculateOffsets();
+		}
+
+	@<|GContainer::getType| code@>;
+};
+
+@ Here we define the dependencies in
+$g^{**}(g^*(y,u,\sigma),u',\sigma)$. Also note, that first derivative
+of $g^*$ wrt $\sigma$ is always zero, so we also add this
+information.
+
+@<|GContainer::getType| code@>=
+itype getType(int i, const Symmetry& s) const
+{
+	if (i == 0)
+		if (s[2] > 0 || s == Symmetry(0,0,0,1))
+			return _Stype::zero;
+		else
+			return _Stype::matrix;
+	if (i == 1)
+		if (s == Symmetry(0,0,1,0))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+	if (i == 2)
+		return _Stype::zero;
+	if (i == 3)
+		if (s == Symmetry(0,0,0,1))
+			return _Stype::unit;
+		else
+			return _Stype::zero;
+
+	TL_RAISE("Wrong stack index in GContainer::getType");
+	return _Stype::zero;
+}
+
+
+@ 
+@<|FoldedGContainer| class declaration@>=
+class FoldedGContainer : public GContainer<FGSTensor>,
+						 public FoldedStackContainer {
+public:@;
+	typedef TensorContainer<FGSTensor> _Ctype;
+	FoldedGContainer(const _Ctype* gs, int ngs, int nu)
+		: GContainer<FGSTensor>(gs, ngs, nu)@+ {}
+};
+
+@ 
+@<|UnfoldedGContainer| class declaration@>=
+class UnfoldedGContainer : public GContainer<UGSTensor>,
+						   public UnfoldedStackContainer {
+public:@;
+	typedef TensorContainer<UGSTensor> _Ctype;
+	UnfoldedGContainer(const _Ctype* gs, int ngs, int nu)
+		: GContainer<UGSTensor>(gs, ngs, nu)@+ {}
+};
+
+
+@ Here we have a support class for product of |StackContainer|s. It
+only adds a dimension to |StackContainer|. It selects the symmetries
+according to equivalence classes passed to the constructor. The
+equivalence can have permuted classes by some given
+permutation. Nothing else is interesting.
+
+@<|StackProduct| class declaration@>=
+template <class _Ttype>@;
+class StackProduct {
+public:@;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	typedef typename _Stype::_Ctype _Ctype;
+	typedef typename _Stype::itype itype;
+protected:@;
+	const _Stype& stack_cont;
+	InducedSymmetries syms;
+	Permutation per;
+public:@;
+	StackProduct(const _Stype& sc, const Equivalence& e,
+				 const Symmetry& os)
+		: stack_cont(sc), syms(e, os), per(e)@+ {}
+	StackProduct(const _Stype& sc, const Equivalence& e,
+				 const Permutation& p, const Symmetry& os)
+		: stack_cont(sc), syms(e, p, os), per(e, p)@+ {}
+	int dimen() const
+		{@+ return syms.size();@+}
+	int getAllSize() const
+		{@+ return stack_cont.getAllSize();@+}
+	const Symmetry& getProdSym(int ip) const
+		{@+ return syms[ip];@+}
+	@<|StackProduct::isZero| code@>;
+	@<|StackProduct::getType| code@>;
+	@<|StackProduct::getMatrix| code@>;
+	@<|StackProduct::createPackedColumns| code@>;
+	@<|StackProduct::getSize| code@>;
+	@<|StackProduct::numMatrices| code@>;
+};
+
+@ 
+@<|StackProduct::isZero| code@>=
+bool isZero(const IntSequence& istacks) const
+{
+	TL_RAISE_IF(istacks.size() != dimen(),
+				"Wrong istacks coordinates for StackProduct::isZero");
+
+	bool res = false;
+	int i = 0;
+	while (i < dimen() && !(res = stack_cont.isZero(istacks[i], syms[i])))
+		i++;
+	return res;
+}
+
+@ 
+@<|StackProduct::getType| code@>=
+itype getType(int is, int ip) const
+{
+	TL_RAISE_IF(is < 0 || is >= stack_cont.numStacks(),
+				"Wrong index to stack in StackProduct::getType");
+	TL_RAISE_IF(ip < 0 || ip >= dimen(),
+				"Wrong index to stack container in StackProduct::getType");
+	return stack_cont.getType(is, syms[ip]);
+}
+
+@ 
+@<|StackProduct::getMatrix| code@>=
+const _Ttype* getMatrix(int is, int ip) const
+{
+	return stack_cont.getMatrix(is, syms[ip]);
+}
+
+@ 
+@<|StackProduct::createPackedColumns| code@>=
+void createPackedColumns(const IntSequence& coor,
+						 Vector** vs, IntSequence& iu) const
+{
+	TL_RAISE_IF(iu.size() != dimen(),
+				"Wrong storage length for unit flags in StackProduct::createPackedColumn");
+	TL_RAISE_IF(coor.size() != per.size(),
+				"Wrong size of index coor in StackProduct::createPackedColumn");
+	IntSequence perindex(coor.size());
+	per.apply(coor, perindex);
+	int off = 0;
+	for (int i = 0; i < dimen(); i++) {
+		IntSequence percoor(perindex, off, syms[i].dimen() + off);
+		vs[i] = stack_cont.createPackedColumn(syms[i], percoor, iu[i]);
+		off += syms[i].dimen();
+	}
+}
+
+@ 
+@<|StackProduct::getSize| code@>=
+int getSize(int is) const
+{
+	return stack_cont.getStackSizes()[is];
+}
+
+
+@ 
+@<|StackProduct::numMatrices| code@>=
+int numMatrices(const IntSequence& istacks) const
+{
+	TL_RAISE_IF(istacks.size() != dimen(),
+				"Wrong size of stack coordinates in StackContainer::numMatrices");
+	int ret = 0;
+	int ip = 0;
+	while (ip < dimen() && getType(istacks[ip], ip) == _Stype::matrix) {
+		ret++;
+		ip++;
+	}
+	return ret;
+}
+
+@ Here we only inherit from Kronecker product |KronProdAllOptim|, only to
+allow for a constructor constructing from |StackProduct|.
+
+@<|KronProdStack| class declaration@>=
+template <class _Ttype>
+class KronProdStack : public KronProdAllOptim {
+public:@;
+	typedef StackProduct<_Ttype> _Ptype;
+	typedef StackContainerInterface<_Ttype> _Stype;
+	@<|KronProdStack| constructor code@>;
+};
+
+@ Here we construct |KronProdAllOptim| from |StackContainer| and given
+selections of stack items from stack containers in the product. We
+only decide whether to insert matrix, or unit matrix.
+
+At this point, we do not call |KronProdAllOptim::optimizeOrder|, so
+the |KronProdStack| behaves like |KronProdAll| (i.e. no optimization
+is done).
+
+@<|KronProdStack| constructor code@>=
+KronProdStack(const _Ptype& sp, const IntSequence& istack)
+	: KronProdAllOptim(sp.dimen())
+{
+	TL_RAISE_IF(sp.dimen() != istack.size(),
+				"Wrong stack product dimension for KronProdStack constructor");
+	
+	for (int i = 0; i < sp.dimen(); i++) {
+		TL_RAISE_IF(sp.getType(istack[i], i) == _Stype::zero,
+					"Attempt to construct KronProdStack from zero matrix");
+		if (sp.getType(istack[i], i) == _Stype::unit)
+			setUnit(i, sp.getSize(istack[i]));
+		if (sp.getType(istack[i], i) == _Stype::matrix) {
+			const TwoDMatrix* m = sp.getMatrix(istack[i], i);
+			TL_RAISE_IF(m->nrows() != sp.getSize(istack[i]),
+						"Wrong size of returned matrix in KronProdStack constructor");
+			setMat(i, *m);
+		}
+	}
+}
+
+
+@ 
+@<|WorkerFoldMAADense| class declaration@>=
+class WorkerFoldMAADense : public THREAD {
+	const FoldedStackContainer& cont;
+	Symmetry sym;
+	const FGSContainer& dense_cont;
+	FGSTensor& out;
+public:@;
+	WorkerFoldMAADense(const FoldedStackContainer& container, 
+					   const Symmetry& s,
+					   const FGSContainer& dcontainer,
+					   FGSTensor& outten);
+	void operator()();
+};
+
+@ 
+@<|WorkerFoldMAASparse1| class declaration@>=
+class WorkerFoldMAASparse1 : public THREAD {
+	const FoldedStackContainer& cont;
+	const FSSparseTensor& t;
+	FGSTensor& out;
+	IntSequence coor;
+	const EquivalenceBundle& ebundle;
+public:@;
+	WorkerFoldMAASparse1(const FoldedStackContainer& container,
+						 const FSSparseTensor& ten,
+						 FGSTensor& outten, const IntSequence& c);
+	void operator()();
+};
+
+@ 
+@<|WorkerFoldMAASparse2| class declaration@>=
+class WorkerFoldMAASparse2 : public THREAD {
+	const FoldedStackContainer& cont;
+	const FSSparseTensor& t;
+	FGSTensor& out;
+	IntSequence coor;
+public:@;
+	WorkerFoldMAASparse2(const FoldedStackContainer& container,
+						 const FSSparseTensor& ten,
+						 FGSTensor& outten, const IntSequence& c);
+	void operator()();
+};
+
+@ 
+@<|WorkerFoldMAASparse4| class declaration@>=
+class WorkerFoldMAASparse4 : public THREAD {
+	const FoldedStackContainer& cont;
+	const FSSparseTensor& t;
+	FGSTensor& out;
+	IntSequence coor;
+public:@;
+	WorkerFoldMAASparse4(const FoldedStackContainer& container,
+						 const FSSparseTensor& ten,
+						 FGSTensor& outten, const IntSequence& c);
+	void operator()();
+};
+
+@ 
+@<|WorkerUnfoldMAADense| class declaration@>=
+class WorkerUnfoldMAADense : public THREAD {
+	const UnfoldedStackContainer& cont;
+	Symmetry sym;
+	const UGSContainer& dense_cont;
+	UGSTensor& out;
+public:@;
+	WorkerUnfoldMAADense(const UnfoldedStackContainer& container, 
+						 const Symmetry& s,
+						 const UGSContainer& dcontainer,
+						 UGSTensor& outten);
+	void operator()();
+};
+
+@ 
+@<|WorkerUnfoldMAASparse1| class declaration@>=
+class WorkerUnfoldMAASparse1 : public THREAD {
+	const UnfoldedStackContainer& cont;
+	const FSSparseTensor& t;
+	UGSTensor& out;
+	IntSequence coor;
+	const EquivalenceBundle& ebundle;
+public:@;
+	WorkerUnfoldMAASparse1(const UnfoldedStackContainer& container,
+						   const FSSparseTensor& ten,
+						   UGSTensor& outten, const IntSequence& c);
+	void operator()();
+};
+
+@ 
+@<|WorkerUnfoldMAASparse2| class declaration@>=
+class WorkerUnfoldMAASparse2 : public THREAD {
+	const UnfoldedStackContainer& cont;
+	const FSSparseTensor& t;
+	UGSTensor& out;
+	IntSequence coor;
+public:@;
+	WorkerUnfoldMAASparse2(const UnfoldedStackContainer& container,
+						   const FSSparseTensor& ten,
+						   UGSTensor& outten, const IntSequence& c);
+	void operator()();
+};
+
+
+@ End of {\tt stack\_container.h} file.
diff --git a/dynare++/tl/cc/sthread.cweb b/dynare++/tl/cc/sthread.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..4043f56b2a02fdf28464117218dc93af55650892
--- /dev/null
+++ b/dynare++/tl/cc/sthread.cweb
@@ -0,0 +1,224 @@
+@q $Id: sthread.cweb 2269 2008-11-23 14:33:22Z michel $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt sthreads.h} file. We set the default values for
+|max_parallel_threads| for both |posix| and |empty| implementation and
+both joinable and detach group. For |posix| this defaults to
+uniprocessor machine with hyper-threading, this is 2.
+
+@c
+#include <cstring>
+#include "sthread.h"
+
+#ifdef POSIX_THREADS
+namespace sthread {
+	template<>
+	int thread_group<posix>::max_parallel_threads = 2;
+	template<>
+	int detach_thread_group<posix>::max_parallel_threads = 2;
+	@<POSIX specializations methods@>;
+}
+#else
+namespace sthread {
+	template<>
+	int thread_group<empty>::max_parallel_threads = 1;
+	template<>
+	int detach_thread_group<empty>::max_parallel_threads = 1;
+	@<non-threading specialization methods@>;
+}
+#endif
+
+@ 
+@<POSIX specializations methods@>=
+	@<|thread_traits| method codes@>;
+	@<|mutex_traits| method codes@>;
+	@<|cond_traits| method codes@>;
+	@<|PosixSynchro| constructor@>;
+	@<|posix_thread_function| code@>;
+	@<|posix_detach_thread_function| code@>;
+
+@ 
+@<|thread_traits| method codes@>=
+void* posix_thread_function(void* c);
+template <>
+void thread_traits<posix>::run(_Ctype* c)
+{
+	pthread_create(&(c->getThreadIden()), NULL, posix_thread_function, (void*) c);
+}
+@#
+void* posix_detach_thread_function(void* c);
+
+template <>
+void thread_traits<posix>::detach_run(_Dtype* c)
+{
+	pthread_attr_t attr;
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+	pthread_create(&(c->getThreadIden()), &attr, posix_detach_thread_function, (void*) c);
+	pthread_attr_destroy(&attr);
+}
+@#
+
+template <>
+void thread_traits<posix>::exit()
+{
+	pthread_exit(NULL);
+}
+@#
+template <>
+void thread_traits<posix>::join(_Ctype* c)
+{
+	pthread_join(c->getThreadIden(), NULL);
+}
+
+@ 
+@<|mutex_traits| method codes@>=
+template <>
+void mutex_traits<posix>::init(pthread_mutex_t& m)
+{
+	pthread_mutex_init(&m, NULL);
+}
+@#
+template <>
+void mutex_traits<posix>::lock(pthread_mutex_t& m)
+{
+	pthread_mutex_lock(&m);
+}
+@#
+template <>
+void mutex_traits<posix>::unlock(pthread_mutex_t& m)
+{
+	pthread_mutex_unlock(&m);
+}
+
+@ 
+@<|cond_traits| method codes@>=
+template <>
+void cond_traits<posix>::init(_Tcond& cond)
+{
+	pthread_cond_init(&cond, NULL);
+}
+@#
+template <>
+void cond_traits<posix>::broadcast(_Tcond& cond)
+{
+	pthread_cond_broadcast(&cond);
+}
+@#
+template <>
+void cond_traits<posix>::wait(_Tcond& cond, _Tmutex& mutex)
+{
+	pthread_cond_wait(&cond, &mutex);
+}
+@#
+template <>
+void cond_traits<posix>::destroy(_Tcond& cond)
+{
+	pthread_cond_destroy(&cond);
+}
+
+
+@ Here we instantiate the static map, and construct |PosixSynchro|
+using that map.
+
+@<|PosixSynchro| constructor@>=
+static posix_synchro::mutex_map_t posix_mm;
+
+PosixSynchro::PosixSynchro(const void* c, const char* id)
+	: posix_synchro(c, id, posix_mm) {}
+
+@ This function is of the type |void* function(void*)| as required by
+POSIX, but it typecasts its argument and runs |operator()()|.
+@<|posix_thread_function| code@>=
+void* posix_thread_function(void* c)
+{
+	thread_traits<posix>::_Ctype* ct =
+		(thread_traits<posix>::_Ctype*)c;
+	try {
+		ct->operator()();
+	} catch (...) {
+		ct->exit();
+	}
+	return NULL;
+}
+
+@ 
+@<|posix_detach_thread_function| code@>=
+void* posix_detach_thread_function(void* c)
+{
+	thread_traits<posix>::_Dtype* ct =
+		(thread_traits<posix>::_Dtype*)c;
+	condition_counter<posix>* counter = ct->counter;
+	try {
+		ct->operator()();
+	} catch (...) {
+		ct->exit();
+	}
+	if (counter)
+		counter->decrease();
+	return NULL;
+}
+
+
+@ The only trait methods we need to work are |thread_traits::run| and
+|thread_traits::detach_run|, which directly call
+|operator()()|. Anything other is empty.
+
+@<non-threading specialization methods@>=
+template <>
+void thread_traits<empty>::run(_Ctype* c)
+{
+	c->operator()();
+}
+template <>
+void thread_traits<empty>::detach_run(_Dtype* c)
+{
+	c->operator()();
+}
+@#
+template <>
+void thread_traits<empty>::exit()
+{
+}
+@#
+template <>
+void thread_traits<empty>::join(_Ctype* c)
+{
+}
+@#
+template <>
+void mutex_traits<empty>::init(Empty& m)
+{
+}
+@#
+template <>
+void mutex_traits<empty>::lock(Empty& m)
+{
+}
+@#
+template <>
+void mutex_traits<empty>::unlock(Empty& m)
+{
+}
+@#
+template <>
+void cond_traits<empty>::init(_Tcond& cond)
+{
+}
+@#
+template <>
+void cond_traits<empty>::broadcast(_Tcond& cond)
+{
+}
+@#
+template <>
+void cond_traits<empty>::wait(_Tcond& cond, _Tmutex& mutex)
+{
+}
+@#
+template <>
+void cond_traits<empty>::destroy(_Tcond& cond)
+{
+}
+
+@ End of {\tt sthreads.h} file.
diff --git a/dynare++/tl/cc/sthread.hweb b/dynare++/tl/cc/sthread.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..e8d75cf4609626c0605b07fced6f6c01d64b618f
--- /dev/null
+++ b/dynare++/tl/cc/sthread.hweb
@@ -0,0 +1,618 @@
+@q $Id: sthread.hweb 411 2005-08-11 12:26:13Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Simple threads. Start of {\tt sthreads.h} file.
+
+This file defines types making a simple interface to
+multi-threading. It follows the classical C++ idioms for traits. We
+have three sorts of traits. The first is a |thread_traits|, which make
+interface to thread functions (run, exit, create and join), the second
+is |mutex_traits|, which make interface to mutexes (create, lock,
+unlock), and third is |cond_traits|, which make interface to
+conditions (create, wait, broadcast, and destroy). At present, there
+are two implementations. The first are POSIX threads, mutexes, and
+conditions, the second is serial (no parallelization).
+
+The file provides the following interfaces templated by the types
+implementing the threading (like types |pthread_t|, and |pthread_mutex_t|
+for POSIX thread and mutex):
+\unorderedlist
+\li |thread| is a pure virtual class, which must be inherited and a
+method |operator()()| be implemented as the running code of the
+thread. This code is run as a new thread by calling |run| method.
+\li |thread_group| allows insertion of |thread|s and running all of
+them simultaneously joining them. The number of maximum parallel
+threads can be controlled. See below.
+\li |synchro| object locks a piece of code to be executed only serially
+for a given data and specified entry-point. It locks the code until it
+is destructed. So, the typical use is to create the |synchro| object
+on the stack of a function which is to be synchronized. The
+synchronization can be subjected to specific data (then a pointer can
+be passed to |synchro|'s constructor), and can be subjected to
+specific entry-point (then |const char*| is passed to the
+constructor).
+\li |detach_thread| inherits from |thread| and models a detached
+thread in contrast to |thread| which models the joinable thread.
+\li |detach_thread_group| groups the detached threads and runs them. They
+are not joined, they are synchronized by means of a counter counting
+running threads. A change of the counter is checked by waiting on an
+associated condition.
+\endunorderedlist
+
+What implementation is selected is governed (at present) by
+|POSIX_THREADS|. If it is defined, then POSIX threads are linked. If
+it is not defined, then serial implementation is taken. In accordance
+with this, the header file defines macros |THREAD|, |THREAD_GROUP|,
+and |SYNCHRO| as the picked specialization of |thread| (or |detach_thread|),
+|thread_group| (or |detach_thread_group|), and |synchro|.
+
+The type of implementation is controlled by |thread_impl| integer
+template parameter, this can be |posix| or |empty|.
+
+The number of maximum parallel threads is controlled via a static
+member of |thread_group| and |detach_thread_group| classes.
+
+@s _Tthread int
+@s thread_traits int
+@s thread int
+@s thread_group int
+@s detach_thread int
+@s detach_thread_group int
+@s cond_traits int
+@s condition_counter int
+@s mutex_traits int
+@s mutex_map int
+@s synchro int
+@s _Tmutex int
+@s pthread_t int
+@s pthread_mutex_t int
+@s pthread_cond_t int
+@s pthread_attr_t int
+@s IF int
+@s Then int
+@s Else int
+@s RET int
+@s thread_impl int
+
+@c
+#ifndef STHREAD_H
+#define STHREAD_H
+
+#ifdef POSIX_THREADS
+# include <pthread.h>
+#endif
+
+#include <stdio.h>
+#include <list>
+#include <map>
+
+namespace sthread {
+	using namespace std;
+
+	class Empty {};
+	@<classical IF template@>;
+	enum {@+ posix, empty@+};
+	template <int> class thread_traits;
+	template <int> class detach_thread;
+	@<|thread| template class declaration@>;
+	@<|thread_group| template class declaration@>;
+	@<|thread_traits| template class declaration@>;
+	@<|mutex_traits| template class declaration@>;
+	@<|mutex_map| template class declaration@>;
+	@<|synchro| template class declaration@>;
+	@<|cond_traits| template class declaration@>;
+	@<|condition_counter| template class declaration@>;
+	@<|detach_thread| template class declaration@>;
+	@<|detach_thread_group| template class declaration@>;
+#ifdef POSIX_THREADS
+	@<POSIX thread specializations@>;
+#else
+	@<No threading specializations@>;
+#endif
+};
+
+#endif
+
+@ Here is the classical IF template.
+@<classical IF template@>=
+template<bool condition, class Then, class Else>
+struct IF {
+	typedef Then RET;
+};
+
+template<class Then, class Else>
+struct IF<false, Then, Else> {
+	typedef Else RET;
+};
+
+
+
+@ The class of |thread| is clear. The user implements |operator()()|,
+the method |run| runs the user's code as joinable thread, |exit| kills the
+execution.
+
+@<|thread| template class declaration@>=
+template <int thread_impl>
+class thread {
+	typedef thread_traits<thread_impl> _Ttraits; 
+	typedef typename _Ttraits::_Tthread _Tthread;
+	_Tthread th;
+public:@;
+	virtual ~thread() {}
+	_Tthread& getThreadIden()
+		{@+ return th;@+}
+	const _Tthread& getThreadIden() const
+		{@+ return th;@+}
+	virtual void operator()() = 0;
+	void run()
+		{@+ _Ttraits::run(this);@+}
+	void detach_run()
+		{@+ _Ttraits::detach_run(this);@+}
+	void exit()
+		{@+ _Ttraits::exit();@+}
+};
+
+@ The |thread_group| is also clear. We allow a user to insert the
+|thread|s, and then launch |run|, which will run all the threads not
+allowing more than |max_parallel_threads| joining them at the
+end. This static member can be set from outside.
+
+@<|thread_group| template class declaration@>=
+template <int thread_impl>
+class thread_group {
+	typedef thread_traits<thread_impl> _Ttraits;
+	typedef thread<thread_impl> _Ctype;
+	list<_Ctype*> tlist;
+	typedef typename list<_Ctype*>::iterator iterator;
+public:@;
+	static int max_parallel_threads;
+	void insert(_Ctype* c)
+		{@+ tlist.push_back(c);@+}
+	@<|thread_group| destructor code@>;
+	@<|thread_group::run| code@>;
+private:@;
+	@<|thread_group::run_portion| code@>;
+};
+
+@ The thread group class maintains list of pointers to threads. It
+takes responsibility of deallocating the threads. So we implement the
+destructor.
+@<|thread_group| destructor code@>=
+~thread_group()
+{
+	while (! tlist.empty()) {
+		delete tlist.front();
+		tlist.pop_front();
+	}
+}
+
+@ This runs a given number of threads in parallel starting from the
+given iterator. It returns the first iterator not run.
+
+@<|thread_group::run_portion| code@>=
+iterator run_portion(iterator start, int n)
+{
+	int c = 0;
+	for (iterator i = start; c < n; ++i, c++) {
+		(*i)->run();
+	}
+	iterator ret;
+	c = 0;
+	for (ret = start; c < n; ++ret, c++) {
+		_Ttraits::join(*ret);
+	}
+	return ret;
+}
+
+
+@ Here we run the threads ensuring that not more than
+|max_parallel_threads| are run in parallel. More over, we do not want
+to run a too low number of threads, since it is wasting with resource
+(if there are). Therefore, we run in parallel |max_parallel_threads|
+batches as long as the remaining threads are greater than the double
+number. And then the remaining batch (less than |2*max_parallel_threads|)
+is run half by half.
+
+@<|thread_group::run| code@>=
+void run()
+{
+	int rem = tlist.size();
+	iterator pfirst = tlist.begin();
+	while (rem > 2*max_parallel_threads) {
+		pfirst = run_portion(pfirst, max_parallel_threads);
+		rem -= max_parallel_threads;
+	}
+	if (rem > max_parallel_threads) {
+		pfirst = run_portion(pfirst, rem/2);
+		rem -= rem/2;
+	}
+	run_portion(pfirst, rem);
+}
+
+
+
+
+@ Clear. We have only |run|, |detach_run|, |exit| and |join|, since
+this is only a simple interface.
+
+@<|thread_traits| template class declaration@>=
+template <int thread_impl>
+struct thread_traits {
+	typedef typename IF<thread_impl==posix, pthread_t, Empty>::RET _Tthread;
+	typedef thread<thread_impl> _Ctype;
+	typedef detach_thread<thread_impl> _Dtype;
+	static void run(_Ctype* c);
+	static void detach_run(_Dtype* c);
+	static void exit();
+	static void join(_Ctype* c);
+};
+
+@ Clear. We have only |init|, |lock|, and |unlock|.
+@<|mutex_traits| template class declaration@>=
+struct ltmmkey;
+typedef pair<const void*, const char*> mmkey;
+@#
+template <int thread_impl>
+struct mutex_traits {
+	typedef typename IF<thread_impl==posix, pthread_mutex_t, Empty>::RET _Tmutex;
+	typedef map<mmkey, pair<_Tmutex, int>, ltmmkey> mutex_int_map;
+	static void init(_Tmutex& m);
+	static void lock(_Tmutex& m);
+	static void unlock(_Tmutex& m);
+};
+
+@ Here we define a map of mutexes keyed by a pair of address, and a
+string. A purpose of the map of mutexes is that, if synchronizing, we
+need to publish mutexes locking some piece of codes (characterized by
+the string) accessing the data (characterized by the pointer). So, if
+any thread needs to pass a |synchro| object, it creates its own with
+the same address and string, and must look to some public storage to
+unlock the mutex. If the |synchro| object is created for the first
+time, the mutex is created and inserted to the map. We count the
+references to the mutex (number of waiting threads) to know, when it
+is save to remove the mutex from the map. This is the only purpose of
+counting the references. Recall, that the mutex is keyed by an address
+of the data, and without removing, the number of mutexes would only
+grow.
+
+The map itself needs its own mutex to avoid concurrent insertions and
+deletions.
+
+@s mutex_int_map int
+
+@<|mutex_map| template class declaration@>=
+struct ltmmkey {
+	bool operator()(const mmkey& k1, const mmkey& k2) const
+		{return k1.first < k2.first ||
+			 (k1.first == k2.first && strcmp(k1.second, k2.second) < 0);} 
+};
+@#
+template <int thread_impl>
+class mutex_map
+	: public mutex_traits<thread_impl>::mutex_int_map
+{
+	typedef typename mutex_traits<thread_impl>::_Tmutex _Tmutex;
+	typedef mutex_traits<thread_impl> _Mtraits;
+	typedef pair<_Tmutex, int> mmval;
+	typedef map<mmkey, mmval, ltmmkey> _Tparent;
+	typedef typename _Tparent::iterator iterator;
+	typedef typename _Tparent::value_type _mvtype;
+	_Tmutex m;
+public:@;
+	mutex_map()
+		{@+ _Mtraits::init(m);@+}
+	void insert(const void* c, const char* id, const _Tmutex& m)
+		{@+ _Tparent::insert(_mvtype(mmkey(c,id), mmval(m,0)));@+}
+	bool check(const void* c, const char* id) const
+		{@+ return _Tparent::find(mmkey(c, id)) != _Tparent::end();@+}
+	@<|mutex_map::get| code@>;
+	@<|mutex_map::remove| code@>;
+	void lock_map()
+		{@+ _Mtraits::lock(m);@+}
+	void unlock_map()
+		{@+ _Mtraits::unlock(m);@+}
+
+};
+
+@ This returns a pointer to the pair of mutex and count reference number.
+@<|mutex_map::get| code@>=
+mmval* get(const void* c, const char* id)
+{
+	iterator it = _Tparent::find(mmkey(c, id));
+	if (it == _Tparent::end())
+		return NULL;
+	return &((*it).second);
+}
+
+@ This removes unconditionally the mutex from the map regardless its
+number of references. The only user of this class should be |synchro|
+class, it implementation must not remove referenced mutex.
+
+@<|mutex_map::remove| code@>=
+void remove(const void* c, const char* id)
+{
+	iterator it = _Tparent::find(mmkey(c, id));
+	if (it != _Tparent::end())
+		erase(it);
+}
+
+@ This is the |synchro| class. The constructor of this class tries to
+lock a mutex for a particular address (identification of data) and
+string (identification of entry-point). If the mutex is already
+locked, it waits until it is unlocked and then returns. The destructor
+releases the lock. The typical use is to construct the object on the
+stacked of the code being synchronized.
+
+@<|synchro| template class declaration@>=
+template <int thread_impl>
+class synchro {
+	typedef typename mutex_traits<thread_impl>::_Tmutex _Tmutex;
+	typedef mutex_traits<thread_impl> _Mtraits;
+public:@;
+	typedef mutex_map<thread_impl> mutex_map_t; 
+private:@;
+	const void* caller;
+	const char* iden;
+	mutex_map_t& mutmap;
+public:@;
+	synchro(const void* c, const char* id, mutex_map_t& mmap)
+		: caller(c), iden(id), mutmap(mmap)
+		{@+ lock();@+}
+	~synchro()
+		{@+ unlock();@+}
+private:@;
+	@<|synchro::lock| code@>;
+	@<|synchro::unlock| code@>;
+};
+
+@ The |lock| function acquires the mutex in the map. First it tries to
+get an exclusive access to the map. Then it increases a number of
+references of the mutex (if it does not exists, it inserts it). Then
+unlocks the map, and finally tries to lock the mutex of the map.
+   
+@<|synchro::lock| code@>=
+void lock() {
+	mutmap.lock_map();
+	if (!mutmap.check(caller, iden)) {
+		_Tmutex mut;
+		_Mtraits::init(mut);
+		mutmap.insert(caller, iden, mut);
+	}
+	mutmap.get(caller, iden)->second++;
+	mutmap.unlock_map();
+	_Mtraits::lock(mutmap.get(caller, iden)->first);
+}
+
+@ The |unlock| function first locks the map. Then releases the lock,
+and decreases a number of references. If it is zero, it removes the
+mutex.
+
+@<|synchro::unlock| code@>=
+void unlock() {
+	mutmap.lock_map();
+	if (mutmap.check(caller, iden)) {
+		_Mtraits::unlock(mutmap.get(caller, iden)->first);
+		mutmap.get(caller, iden)->second--;
+		if (mutmap.get(caller, iden)->second == 0)
+			mutmap.remove(caller, iden);
+	}
+	mutmap.unlock_map();
+}
+
+@ These are traits for conditions. We need |init|, |broadcast|, |wait|
+and |destroy|.
+
+@<|cond_traits| template class declaration@>=
+template <int thread_impl>
+struct cond_traits {
+	typedef typename IF<thread_impl==posix, pthread_cond_t, Empty>::RET _Tcond;
+	typedef typename mutex_traits<thread_impl>::_Tmutex _Tmutex;
+	static void init(_Tcond& cond);
+	static void broadcast(_Tcond& cond);
+	static void wait(_Tcond& cond, _Tmutex& mutex);
+	static void destroy(_Tcond& cond);
+};
+
+@ Here is the condition counter. It is a counter which starts at 0,
+and can be increased and decreased. A thread can wait until the
+counter is changed, this is implemented by condition. After the wait
+is done, another (or the same) thread, by calling |waitForChange|
+waits for another change. This can be dangerous, since it is possible
+to wait for a change which will not happen, because all the threads
+which can cause the change (by increase of decrease) might had
+finished.
+
+@<|condition_counter| template class declaration@>=
+template <int thread_impl>
+class condition_counter {
+	typedef typename mutex_traits<thread_impl>::_Tmutex _Tmutex;
+	typedef typename cond_traits<thread_impl>::_Tcond _Tcond;
+	int counter;
+	_Tmutex mut;
+	_Tcond cond;
+	bool changed;
+public:@;
+	@<|condition_counter| constructor code@>;
+	@<|condition_counter| destructor code@>;
+	@<|condition_counter::increase| code@>;
+	@<|condition_counter::decrease| code@>;
+	@<|condition_counter::waitForChange| code@>;
+};
+
+@ We initialize the counter to 0, and |changed| flag to |true|, since
+the counter was change from undefined value to 0.
+
+@<|condition_counter| constructor code@>=
+condition_counter()
+	: counter(0), changed(true)
+{
+	mutex_traits<thread_impl>::init(mut);
+	cond_traits<thread_impl>::init(cond);
+}
+
+@ In destructor, we only release the resources associated with the
+condition.
+
+@<|condition_counter| destructor code@>=
+~condition_counter()
+{
+	cond_traits<thread_impl>::destroy(cond);
+}
+
+@ When increasing, we lock the mutex, advance the counter, remember it
+is changed, broadcast, and release the mutex.
+
+@<|condition_counter::increase| code@>=
+void increase()
+{
+	mutex_traits<thread_impl>::lock(mut);
+	counter++;
+	changed = true;
+	cond_traits<thread_impl>::broadcast(cond);
+	mutex_traits<thread_impl>::unlock(mut);
+}
+
+@ Same as increase.
+@<|condition_counter::decrease| code@>=
+void decrease()
+{
+	mutex_traits<thread_impl>::lock(mut);
+	counter--;
+	changed = true;
+	cond_traits<thread_impl>::broadcast(cond);
+	mutex_traits<thread_impl>::unlock(mut);
+}
+
+@ We lock the mutex, and if there was a change since the last call of
+|waitForChange|, we return immediately, otherwise we wait for the
+change. The mutex is released.
+
+@<|condition_counter::waitForChange| code@>=
+int waitForChange()
+{
+	mutex_traits<thread_impl>::lock(mut);
+	if (!changed) {
+		cond_traits<thread_impl>::wait(cond, mut);
+	}
+	changed = false;
+	int res = counter;
+	mutex_traits<thread_impl>::unlock(mut);
+	return res;
+}
+
+
+@ The detached thread is the same as joinable |thread|. We only
+re-implement |run| method to call |thread_traits::detach_run|, and add
+a method which installs a counter. The counter is increased and
+decreased on the body of the new thread.
+
+@<|detach_thread| template class declaration@>=
+template <int thread_impl>
+class detach_thread : public thread<thread_impl> {
+public:@;
+	condition_counter<thread_impl>* counter;
+	detach_thread() : counter(NULL) {}
+	void installCounter(condition_counter<thread_impl>* c)
+		{@+ counter = c;@+}
+	void run()
+		{@+thread_traits<thread_impl>::detach_run(this);@+}
+};
+
+@ The detach thread group is (by interface) the same as
+|thread_group|. The extra thing we have here is the |counter|. The
+implementation of |insert| and |run| is different.
+
+@<|detach_thread_group| template class declaration@>=
+template<int thread_impl>
+class detach_thread_group {	
+	typedef thread_traits<thread_impl> _Ttraits;
+	typedef cond_traits<thread_impl> _Ctraits;
+	typedef detach_thread<thread_impl> _Ctype;
+	list<_Ctype *> tlist;
+	typedef typename list<_Ctype*>::iterator iterator;
+	condition_counter<thread_impl> counter;
+public:@;
+	static int max_parallel_threads;
+	@<|detach_thread_group::insert| code@>;
+	@<|detach_thread_group| destructor code@>;
+	@<|detach_thread_group::run| code@>;
+};
+
+@ When inserting, the counter is installed to the thread.
+@<|detach_thread_group::insert| code@>=
+void insert(_Ctype* c)
+{
+	tlist.push_back(c);
+	c->installCounter(&counter);
+}
+
+@ The destructor is clear.
+@<|detach_thread_group| destructor code@>=
+~detach_thread_group()
+{
+	while (!tlist.empty()) {
+		delete tlist.front();
+		tlist.pop_front();
+	}
+}
+
+@ We cycle through all threads in the group, and in each cycle we wait
+for the change in the |counter|. If the counter indicates less than
+maximum parallel threads running, then a new thread is run, and the
+iterator in the list is moved.
+
+At the end we have to wait for all thread to finish.
+
+@<|detach_thread_group::run| code@>=
+void run()
+{
+	int mpt = max_parallel_threads;
+	iterator it = tlist.begin();
+	while (it != tlist.end()) {
+		if (counter.waitForChange() < mpt) {
+			counter.increase();
+			(*it)->run();
+			++it;
+		}
+	}
+	while (counter.waitForChange() > 0) {}
+}
+
+
+@ Here we only define the specializations for POSIX threads. Then we
+define the macros. Note that the |PosixSynchro| class construct itself
+from the static map defined in {\tt sthreads.cpp}.
+ 
+@<POSIX thread specializations@>=
+typedef detach_thread<posix> PosixThread;
+typedef detach_thread_group<posix> PosixThreadGroup;
+typedef synchro<posix> posix_synchro;
+class PosixSynchro : public posix_synchro {
+public:@;
+	PosixSynchro(const void* c, const char* id);
+};
+@#
+#define THREAD@, sthread::PosixThread
+#define THREAD_GROUP@, sthread::PosixThreadGroup
+#define SYNCHRO@, sthread::PosixSynchro
+
+@ Here we define an empty class and use it as thread and
+mutex. |NoSynchro| class is also empty, but an empty constructor is
+declared. The empty destructor is declared only to avoid ``unused
+variable warning''.
+
+@<No threading specializations@>=
+typedef thread<empty> NoThread;
+typedef thread_group<empty> NoThreadGroup;
+typedef synchro<empty> no_synchro;
+class NoSynchro {
+public:@;
+	NoSynchro(const void* c, const char* id) {}
+	~NoSynchro() {}
+};
+@#
+#define THREAD@, sthread::NoThread
+#define THREAD_GROUP@, sthread::NoThreadGroup
+#define SYNCHRO@, sthread::NoSynchro
+
+@ End of {\tt sthreads.h} file.
diff --git a/dynare++/tl/cc/symmetry.cweb b/dynare++/tl/cc/symmetry.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..01b3287667deed53bcb71f86e655baaaa3da445c
--- /dev/null
+++ b/dynare++/tl/cc/symmetry.cweb
@@ -0,0 +1,154 @@
+@q $Id: symmetry.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt symmetry.cpp} file.
+
+@c
+#include "symmetry.h"
+#include "permutation.h"
+
+#include <stdio.h>
+
+@<|Symmetry| constructor code@>;
+@<|Symmetry::findClass| code@>;
+@<|Symmetry::isFull| code@>;
+@<|symiterator| constructor code@>;
+@<|symiterator| destructor code@>;
+@<|symiterator::operator++| code@>;
+@<|InducedSymmetries| constructor code@>;
+@<|InducedSymmetries| permuted constructor code@>;
+@<|InducedSymmetries::print| code@>;
+
+@ Construct symmetry as numbers of successively equal items in the sequence.
+
+@<|Symmetry| constructor code@>=
+Symmetry::Symmetry(const IntSequence& s)
+	: IntSequence(s.getNumDistinct(), 0)
+{
+	int p = 0;
+	if (s.size() > 0)
+		operator[](p) = 1;
+	for (int i = 1; i < s.size(); i++) {
+		if (s[i] != s[i-1])
+			p++; 
+		operator[](p)++;
+	}
+}
+
+@ Find a class of the symmetry containing a given index.
+@<|Symmetry::findClass| code@>=
+int Symmetry::findClass(int i) const
+{
+	int j = 0;
+	int sum = 0;
+	do {
+		sum += operator[](j);
+		j++;
+	} while (j < size() && sum <= i);
+
+	return j-1;
+}
+
+@ The symmetry is full if it allows for any permutation of indices. It
+means, that there is at most one non-zero index.
+
+@<|Symmetry::isFull| code@>=
+bool Symmetry::isFull() const
+{
+	int count = 0;
+	for (int i = 0; i < num(); i++)
+		if (operator[](i) != 0)
+			count++;
+	return count <=1;
+}
+
+
+@ Here we construct the beginning of the |symiterator|. The first
+symmetry index is 0. If length is 2, the second index is the
+dimension, otherwise we create the subordinal symmetry set and its
+beginning as subordinal |symiterator|.
+
+@<|symiterator| constructor code@>=
+symiterator::symiterator(SymmetrySet& ss)
+	: s(ss), subit(NULL), subs(NULL), end_flag(false)
+{
+	s.sym()[0] = 0;
+	if (s.size() == 2) {
+		s.sym()[1] = s.dimen();
+	} else {
+		subs = new SymmetrySet(s, s.dimen());
+		subit = new symiterator(*subs);
+	}
+}
+
+
+@ 
+@<|symiterator| destructor code@>=
+symiterator::~symiterator( )
+{
+	if (subit)
+		delete subit;
+	if (subs)
+		delete subs;
+}
+
+@ Here we move to the next symmetry. We do so only, if we are not at
+the end. If length is 2, we increase lower index and decrease upper
+index, otherwise we increase the subordinal symmetry. If we got to the
+end, we recreate the subordinal symmetry set and set the subordinal
+iterator to the beginning. At the end we test, if we are not at the
+end. This is recognized if the lowest index exceeded the dimension.
+
+@<|symiterator::operator++| code@>=
+symiterator& symiterator::operator++()
+{
+	if (!end_flag) {
+		if (s.size() == 2) {
+			s.sym()[0]++;
+			s.sym()[1]--;
+		} else {
+			++(*subit);
+			if (subit->isEnd()) {
+				delete subit;
+				delete subs;
+				s.sym()[0]++;
+				subs = new SymmetrySet(s, s.dimen()-s.sym()[0]);
+				subit = new symiterator(*subs);
+			}
+		}
+		if (s.sym()[0] == s.dimen()+1)
+			end_flag=true;
+	}
+	return *this;
+}
+
+@ 
+@<|InducedSymmetries| constructor code@>=
+InducedSymmetries::InducedSymmetries(const Equivalence& e, const Symmetry& s)
+{
+	for (Equivalence::const_seqit i = e.begin(); i != e.end(); ++i) {
+		push_back(Symmetry(s, *i));
+	}
+}
+
+@ 
+@<|InducedSymmetries| permuted constructor code@>=
+InducedSymmetries::InducedSymmetries(const Equivalence& e, const Permutation& p,
+									 const Symmetry& s)
+{
+	for (int i = 0; i < e.numClasses(); i++) {
+		Equivalence::const_seqit it = e.find(p.getMap()[i]);
+		push_back(Symmetry(s, *it));
+	}
+}
+
+@ Debug print.
+@<|InducedSymmetries::print| code@>=
+void InducedSymmetries::print() const
+{
+	printf("Induced symmetries: %d\n",size());
+	for (unsigned int i = 0; i < size(); i++)
+		operator[](i).print();
+}
+
+@ End of {\tt symmetry.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/symmetry.hweb b/dynare++/tl/cc/symmetry.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..244548f92f8198b1d947ac726f31322e77da26d7
--- /dev/null
+++ b/dynare++/tl/cc/symmetry.hweb
@@ -0,0 +1,208 @@
+@q $Id: symmetry.hweb 841 2006-07-27 14:41:11Z tamas $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Symmetry. This is {\tt symmetry.h} file
+
+Symmetry is an abstraction for a term of the form $y^3u^2$. It manages
+only indices, not the variable names. So if one uses this
+abstraction, he must keep in mind that $y$ is the first, and $u$ is
+the second.
+
+In fact, the symmetry is a special case of equivalence, but its
+implementation is much simpler. We do not need an abstraction for the
+term $yyuyu$ but due to Green theorem we can have term $y^3u^2$. That
+is why the equivalence is too general for our purposes.
+
+One of a main purposes of the tensor library is to calculate something like:
+$$\left[B_{y^2u^3}\right]_{\alpha_1\alpha_2\beta_1\beta_2\beta_3}
+=\left[g_{y^l}\right]_{\gamma_1\ldots\gamma_l}
+\left(\sum_{c\in M_{l,5}}
+\prod_{m=1}^l\left[g_{c_m}\right]^{\gamma_m}_{c_m(\alpha,\beta)}\right)$$
+If, for instance, $l=3$, and $c=\{\{0,4\},\{1,2\},\{3\}\}$, then we
+have to calculate
+$$\left[g_{y^3}\right]_{\gamma_1\gamma_2\gamma_3}
+\left[g_{yu}\right]^{\gamma_1}_{\alpha_1\beta_3}
+\left[g_{yu}\right]^{\gamma_2}_{\alpha_2\beta_1}
+\left[g_u\right]^{\gamma_3}_{\beta_2}
+$$
+
+We must be able to calculate a symmetry induced by symmetry $y^2u^3$
+and by an equivalence class from equivalence $c$. For equivalence
+class $\{0,4\}$ the induced symmetry is $yu$, since we pick first and
+fifth variable from $y^2u^3$. For a given outer symmetry, the class
+|InducedSymmetries| does this for all classes of a given equivalence.
+
+We need also to cycle through all possible symmetries yielding the
+given dimension. For this purpose we define classes |SymmetrySet| and
+|symiterator|.
+
+The symmetry is implemented as |IntSequence|, in fact, it inherits
+from it.
+
+@s Symmetry int
+@s IntSequence int
+@s SymmetrySet int
+@s symiterator int
+@s OrdSequence int
+@s InducedSymmetries int
+
+@c
+#ifndef SYMMETRY_H
+#define SYMMETRY_H
+
+#include "equivalence.h"
+#include "int_sequence.h"
+
+#include <list>
+#include <vector>
+
+@<|Symmetry| class declaration@>;
+@<|SymmetrySet| class declaration@>;
+@<|symiterator| class declaration@>;
+@<|InducedSymmetries| class declaration@>;
+
+#endif
+
+@ Clear. The method |isFull| returns true if and only if the symmetry
+allows for any permutation of indices.
+
+@<|Symmetry| class declaration@>=
+class Symmetry : public IntSequence {
+public:@/
+	@<|Symmetry| constructors@>; 
+	int num() const
+		{@+return size();@+}
+	int dimen() const
+		{@+return sum();@+}
+	int findClass(int i) const;
+	bool isFull() const;
+};
+
+@ We provide three constructors for symmetries of the form $y^n$,
+$y^nu^m$, $y^nu^m\sigma^k$. Also a copy constructor, and finally a
+constructor of implied symmetry for a symmetry and an equivalence
+class. It is already implemented in |IntSequence| so we only call
+appropriate constructor of |IntSequence|. We also provide the
+subsymmetry, which takes the given length of symmetry from the end.
+
+The last constructor constructs a symmetry from an integer sequence
+(supposed to be ordered) as a symmetry counting successively equal
+items. For instance the sequence $(a,a,a,b,c,c,d,d,d,d)$ produces
+symmetry $(3,1,2,4)$.
+
+@<|Symmetry| constructors@>=
+	Symmetry(int len, const char* dummy)
+		: IntSequence(len, 0)@+ {}
+	Symmetry(int i1)
+		: IntSequence(1, i1)@+ {}
+	Symmetry(int i1, int i2)
+		: IntSequence(2) {@+operator[](0) = i1;@+ operator[](1) = i2;@+}
+	Symmetry(int i1, int i2 ,int i3)
+		: IntSequence(3)
+		{@+
+			operator[](0) = i1;@+
+			operator[](1) = i2;@+
+			operator[](2) = i3;@+
+		}
+	Symmetry(int i1, int i2 ,int i3, int i4)
+		: IntSequence(4)
+		{@+
+			operator[](0) = i1;@+
+			operator[](1) = i2;@+
+			operator[](2) = i3;@+
+			operator[](3) = i4;@+
+		}
+	Symmetry(const Symmetry& s)
+		: IntSequence(s)@+ {}
+	Symmetry(const Symmetry& s, const OrdSequence& cl)
+		: IntSequence(s, cl.getData())@+ {}
+	Symmetry(Symmetry& s, int len)
+		: IntSequence(s, s.size()-len, s.size())@+ {}
+	Symmetry(const IntSequence& s);
+
+@ The class |SymmetrySet| defines a set of symmetries of the given
+length having given dimension. It does not store all the symmetries,
+rather it provides a storage for one symmetry, which is changed as an
+adjoint iterator moves.
+
+The iterator class is |symiterator|. It is implemented
+recursively. The iterator object, when created, creates subordinal
+iterator, which iterates over a symmetry set whose length is one less,
+and dimension is the former dimension. When the subordinal iterator
+goes to its end, the superordinal iterator increases left most index in
+the symmetry, resets the subordinal symmetry set with different
+dimension, and iterates through the subordinal symmetry set until its
+end, and so on. That's why we provide also |SymmetrySet| constructor
+for construction of a subordinal symmetry set.
+
+The typical usage of the abstractions for |SymmetrySet| and
+|symiterator| is as follows:
+
+\kern0.3cm
+\centerline{|for (symiterator si(SymmetrySet(6, 4)); !si.isEnd(); ++si) {body}|}
+\kern0.3cm
+
+\noindent It goes through all symmetries of size 4 having dimension
+6. One can use |*si| as the symmetry in the body.
+
+@<|SymmetrySet| class declaration@>=
+class SymmetrySet {
+	Symmetry run;
+	int dim;
+public:@;
+	SymmetrySet(int d, int length)
+		: run(length, ""), dim(d)@+ {}
+	SymmetrySet(SymmetrySet& s, int d)
+		: run(s.run, s.size()-1), dim(d)@+ {}
+	int dimen() const
+		{@+ return dim;@+}
+	const Symmetry& sym() const
+		{@+ return run;@+}
+	Symmetry& sym()
+		{@+ return run;@+}
+	int size() const
+		{@+ return run.size();@+}
+};
+
+@ The logic of |symiterator| was described in |@<|SymmetrySet| class
+declaration@>|. Here we only comment that: the class has a reference
+to the |SymmetrySet| only to know dimension and for access of its
+symmetry storage. Further we have pointers to subordinal |symiterator|
+and its |SymmetrySet|. These are pointers, since the recursion ends at
+length equal to 2, in which case these pointers are |NULL|.
+
+The constructor creates the iterator which initializes to the first
+symmetry (beginning).
+
+@<|symiterator| class declaration@>=
+class symiterator {
+	SymmetrySet& s;
+	symiterator* subit;
+	SymmetrySet* subs;
+	bool end_flag;
+public:@;
+	symiterator(SymmetrySet& ss);
+	~symiterator();
+	symiterator& operator++();
+	bool isEnd() const
+		{@+ return end_flag;@+}
+	const Symmetry& operator*() const
+		{@+ return s.sym();@+}
+};
+
+
+@ This simple abstraction just constructs a vector of induced
+symmetries from the given equivalence and outer symmetry. A
+permutation might optionally permute the classes of the equivalence.
+
+@<|InducedSymmetries| class declaration@>=
+class InducedSymmetries : public vector<Symmetry> {
+public:@;
+	InducedSymmetries(const Equivalence& e, const Symmetry& s);
+	InducedSymmetries(const Equivalence& e, const Permutation& p, const Symmetry& s);
+	void print() const;
+};
+
+
+
+@ End of {\tt symmetry.h} file.
diff --git a/dynare++/tl/cc/t_container.cweb b/dynare++/tl/cc/t_container.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..e4a1b3ee62a5faa103550235c182908c57b69e2c
--- /dev/null
+++ b/dynare++/tl/cc/t_container.cweb
@@ -0,0 +1,138 @@
+@q $Id: t_container.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt t\_container.cpp} file.
+@s USubTensor int
+@c
+#include "t_container.h" 
+#include "kron_prod.h"
+#include "ps_tensor.h"
+#include "pyramid_prod.h"
+
+const int FGSContainer::num_one_time = 10;
+@<|UGSContainer| conversion from |FGSContainer|@>;
+@<|UGSContainer::multAndAdd| code@>;
+@<|FGSContainer| conversion from |UGSContainer|@>;
+@<|FGSContainer::multAndAdd| folded code@>;
+@<|FGSContainer::multAndAdd| unfolded code@>;
+@<|FGSContainer::getIndices| code@>;
+
+@ 
+@<|UGSContainer| conversion from |FGSContainer|@>=
+UGSContainer::UGSContainer(const FGSContainer& c)
+	: TensorContainer<UGSTensor>(c.num())
+{
+	for (FGSContainer::const_iterator it = c.begin();
+		 it != c.end(); ++it) {
+		UGSTensor* unfolded = new UGSTensor(*((*it).second));
+		insert(unfolded);
+	}
+}
+
+@ We set |l| to dimension of |t|, this is a tensor which multiplies
+tensors from the container from the left. Also we set |k| to a
+dimension of the resulting tensor. We go through all equivalences on
+|k| element set and pickup only those which have $l$ classes.
+
+In each loop, we fetch all necessary tensors for the product to the
+vector |ts|. Then we form Kronecker product |KronProdAll| and feed it
+with tensors from |ts|. Then we form unfolded permuted symmetry tensor
+|UPSTensor| as matrix product of |t| and Kronecker product |kp|. Then
+we add the permuted data to |out|. This is done by |UPSTensor| method
+|addTo|.
+
+@<|UGSContainer::multAndAdd| code@>=
+void UGSContainer::multAndAdd(const UGSTensor& t, UGSTensor& out) const
+{
+	int l = t.dimen();
+	int k = out.dimen();
+	const EquivalenceSet& eset = ebundle.get(k);
+
+	for (EquivalenceSet::const_iterator it = eset.begin();
+		 it != eset.end(); ++it) {
+		if ((*it).numClasses() == l) {
+			vector<const UGSTensor*> ts =
+				fetchTensors(out.getSym(), *it);
+			KronProdAllOptim kp(l);
+			for (int i = 0; i < l; i++)
+				kp.setMat(i, *(ts[i]));
+			kp.optimizeOrder();
+			UPSTensor ups(out.getDims(), *it, t, kp);
+			ups.addTo(out);
+		}
+	}
+}
+
+@ 
+@<|FGSContainer| conversion from |UGSContainer|@>=
+FGSContainer::FGSContainer(const UGSContainer& c)
+	: TensorContainer<FGSTensor>(c.num())
+{
+	for (UGSContainer::const_iterator it = c.begin();
+		 it != c.end(); ++it) {
+		FGSTensor* folded = new FGSTensor(*((*it).second));
+		insert(folded);
+	}
+}
+
+
+@ Here we perform one step of the Faa Di Bruno operation. We call the
+|multAndAdd| for unfolded tensor.
+@<|FGSContainer::multAndAdd| folded code@>=
+void FGSContainer::multAndAdd(const FGSTensor& t, FGSTensor& out) const
+{
+	UGSTensor ut(t);
+	multAndAdd(ut, out);
+}
+
+@ This is the same as |@<|UGSContainer::multAndAdd| code@>|
+but we do not construct |UPSTensor| from the Kronecker
+product, but |FPSTensor|.
+
+@<|FGSContainer::multAndAdd| unfolded code@>=
+void FGSContainer::multAndAdd(const UGSTensor& t, FGSTensor& out) const
+{
+	int l = t.dimen();
+	int k = out.dimen();
+	const EquivalenceSet& eset = ebundle.get(k);
+
+	for (EquivalenceSet::const_iterator it = eset.begin();
+		 it != eset.end(); ++it) {
+		if ((*it).numClasses() == l) {
+			vector<const FGSTensor*> ts =
+				fetchTensors(out.getSym(), *it);
+			KronProdAllOptim kp(l);
+			for (int i = 0; i < l; i++)
+				kp.setMat(i, *(ts[i]));
+			kp.optimizeOrder();
+			FPSTensor fps(out.getDims(), *it, t, kp);
+			fps.addTo(out);
+		}
+	}
+}
+
+
+@ This fills a given vector with integer sequences corresponding to
+first |num| indices from interval |start| (including) to |end|
+(excluding). If there are not |num| of such indices, the shorter vector
+is returned.
+
+@<|FGSContainer::getIndices| code@>=
+Tensor::index
+FGSContainer::getIndices(int num, vector<IntSequence>& out,
+						 const Tensor::index& start,
+						 const Tensor::index& end)
+{
+	out.clear();
+	int i = 0;
+	Tensor::index run = start;
+	while (i < num && run != end) {
+		out.push_back(run.getCoor());
+		i++;
+		++run;
+	}
+	return run;
+}
+
+
+@ End of {\tt t\_container.cpp} file.
diff --git a/dynare++/tl/cc/t_container.hweb b/dynare++/tl/cc/t_container.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..ed4334f7bb9cbc23e582c348936f14de6803945b
--- /dev/null
+++ b/dynare++/tl/cc/t_container.hweb
@@ -0,0 +1,381 @@
+@q $Id: t_container.hweb 2353 2009-09-03 19:22:36Z michel $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Tensor containers. Start of {\tt t\_container.h} file.
+
+One of primary purposes of the tensor library is to perform one step
+of the Faa Di Bruno formula:
+$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_k}=
+[h_{y^l}]_{\gamma_1\ldots\gamma_l}\sum_{c\in M_{l,k}}
+\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}
+$$
+where $h_{y^l}$ and $g_{s^i}$ are tensors, $M_{l,k}$ is a set of all
+equivalences with $l$ classes of $k$ element set, $c_m$ is $m$-the
+class of equivalence $c$, and $\vert c_m\vert$ is its
+cardinality. Further, $c_m(\alpha)$ is a sequence of $\alpha$s picked
+by equivalence class $c_m$.
+
+In order to accomplish this operation, we basically need some storage
+of all tensors of the form $\left[g_{s^i}\right]$. Note that $s$ can
+be compound, for instance $s=[y,u]$. Then we need storage for
+$\left[g_{y^3}\right]$, $\left[g_{y^2u}\right]$,
+$\left[g_{yu^5}\right]$, etc.
+ 
+We need an object holding all tensors of the same type. Here type
+means an information, that coordinates of the tensors can be of type
+$y$, or $u$. We will group only tensors, whose symmetry is described
+by |Symmetry| class. These are only $y^2u^3$, not $yuyu^2$. So, we are
+going to define a class which will hold tensors whose symmetries are
+of type |Symmetry| and have the same symmetry length (number of
+different coordinate types). Also, for each symmetry there will be at
+most one tensor.
+
+The class has two purposes: The first is to provide storage (insert
+and retrieve). The second is to perform the above step of Faa Di Bruno. This is
+going through all equivalences with $l$ classes, perform the tensor
+product and add to the result.
+  
+We define a template class |TensorContainer|. From different
+instantiations of the template class we will inherit to create concrete
+classes, for example container of unfolded general symmetric
+tensors. The one step of the Faa Di Bruno (we call it |multAndAdd|) is
+implemented in the concrete subclasses, because the implementation
+depends on storage. Note even, that |multAndAdd| has not a template
+common declaration. This is because sparse tensor $h$ is multiplied by
+folded tensors $g$ yielding folded tensor $B$, but unfolded tensor $h$
+is multiplied by unfolded tensors $g$ yielding unfolded tensor $B$.
+
+@c
+#ifndef T_CONTAINER_H
+#define T_CONTAINER_H
+
+#include "symmetry.h"
+#include "gs_tensor.h"
+#include "tl_exception.h"
+#include "tl_static.h"
+#include "sparse_tensor.h"
+#include "equivalence.h"
+#include "rfs_tensor.h"
+#include "Vector.h"
+
+#include <map>
+#include <string>
+
+@<|ltsym| predicate@>;
+@<|TensorContainer| class definition@>;
+@<|UGSContainer| class declaration@>;
+@<|FGSContainer| class declaration@>;
+
+#endif
+
+@ We need a predicate on strict weak ordering of symmetries.
+@<|ltsym| predicate@>=
+struct ltsym {
+	bool operator()(const Symmetry& s1, const Symmetry& s2) const
+	{@+ return s1 < s2;@+}
+};
+
+@ Here we define the template class for tensor container. We implement
+it as |stl::map|. It is a unique container, no two tensors with same
+symmetries can coexist. Keys of the map are symmetries, values are
+pointers to tensor. The class is responsible for deallocating all
+tensors. Creation of the tensors is done outside.
+
+The class has integer |n| as its member. It is a number of different
+coordinate types of all contained tensors. Besides intuitive insert
+and retrieve interface, we define a method |fetchTensors|, which for a
+given symmetry and given equivalence calculates symmetries implied by
+the symmetry and all equivalence classes, and fetches corresponding
+tensors in a vector.
+
+Also, each instance of the container has a reference to
+|EquivalenceBundle| which allows an access to equivalences.
+
+@s _const_ptr int;
+@s _ptr int;
+@s _Map int;
+
+@<|TensorContainer| class definition@>=
+template<class _Ttype> class TensorContainer {
+protected:@;
+	typedef const _Ttype* _const_ptr;
+	typedef _Ttype* _ptr;
+	typedef map<Symmetry, _ptr, ltsym> _Map;@/
+	typedef typename _Map::value_type _mvtype;@/
+public:@;
+	typedef typename _Map::iterator iterator;@/
+	typedef typename _Map::const_iterator const_iterator;@/
+private:@;
+	int n;
+	_Map m;
+protected:@;
+	const EquivalenceBundle& ebundle;
+public:@;
+	TensorContainer(int nn)
+		: n(nn), ebundle(*(tls.ebundle)) @+ {}
+	@<|TensorContainer| copy constructor@>;
+	@<|TensorContainer| subtensor constructor@>;
+	@<|TensorContainer:get| code@>;
+	@<|TensorContainer::check| code@>;
+	@<|TensorContainer::insert| code@>;
+	@<|TensorContainer::remove| code@>;
+	@<|TensorContainer::clear| code@>;
+	@<|TensorContainer::fetchTensors| code@>;
+	@<|TensorContainer::getMaxDim| code@>;
+	@<|TensorContainer::print| code@>;
+	@<|TensorContainer::writeMat4| code@>;
+	@<|TensorContainer::writeMMap| code@>;
+
+	virtual ~TensorContainer()
+		{@+ clear();@+}
+
+	@<|TensorContainer| inline methods@>;
+};
+
+@ 
+@<|TensorContainer| inline methods@>=
+	int num() const
+		{@+ return n;@+}
+	const EquivalenceBundle& getEqBundle() const
+		{@+ return ebundle;@+}
+
+	const_iterator begin() const
+		{@+ return m.begin();@+}
+	const_iterator end() const
+		{@+ return m.end();@+}
+	iterator begin()
+		{@+ return m.begin();@+}
+	iterator end()
+		{@+ return m.end();@+}
+
+@ This is just a copy constructor. This makes a hard copy of all tensors.
+@<|TensorContainer| copy constructor@>=
+TensorContainer(const TensorContainer<_Ttype>& c)
+	: n(c.n), m(), ebundle(c.ebundle)
+{
+	for (const_iterator it = c.m.begin(); it != c.m.end(); ++it) {
+		_Ttype* ten = new _Ttype(*((*it).second));
+		insert(ten);
+	}
+}
+
+@ This constructor constructs a new tensor container, whose tensors
+are in-place subtensors of the given container.
+
+@<|TensorContainer| subtensor constructor@>=
+TensorContainer(int first_row, int num, TensorContainer<_Ttype>& c)
+	: n(c.n), ebundle(*(tls.ebundle))
+{
+	for (iterator it = c.m.begin(); it != c.m.end(); ++it) {
+		_Ttype* t = new _Ttype(first_row, num, *((*it).second));
+		insert(t);
+	}
+}
+
+
+@ 
+@<|TensorContainer:get| code@>=
+_const_ptr get(const Symmetry& s) const
+{
+	TL_RAISE_IF(s.num() != num(),
+				"Incompatible symmetry lookup in TensorContainer::get");
+	const_iterator it = m.find(s);
+	if (it == m.end()) {
+		TL_RAISE("Symmetry not found in TensorContainer::get");
+		return NULL;
+	} else {
+		return (*it).second;
+	}
+}
+@#
+
+_ptr get(const Symmetry& s)
+{
+	TL_RAISE_IF(s.num() != num(),
+				"Incompatible symmetry lookup in TensorContainer::get");
+	iterator it = m.find(s);
+	if (it == m.end()) {
+		TL_RAISE("Symmetry not found in TensorContainer::get");
+		return NULL;
+	} else {
+		return (*it).second;
+	}
+}
+
+@ 
+@<|TensorContainer::check| code@>=
+bool check(const Symmetry& s) const
+{
+	TL_RAISE_IF(s.num() != num(),
+				"Incompatible symmetry lookup in TensorContainer::check");
+	const_iterator it = m.find(s);
+	return it != m.end();
+}
+
+@ 
+@<|TensorContainer::insert| code@>=
+void insert(_ptr t)
+{
+	TL_RAISE_IF(t->getSym().num() != num(),
+				"Incompatible symmetry insertion in TensorContainer::insert");
+	TL_RAISE_IF(check(t->getSym()),
+				"Tensor already in container in TensorContainer::insert");
+	m.insert(_mvtype(t->getSym(),t));
+	if (! t->isFinite()) {
+		throw TLException(__FILE__, __LINE__,  "NaN or Inf asserted in TensorContainer::insert");
+	}
+}
+
+@ 
+@<|TensorContainer::remove| code@>=
+void remove(const Symmetry& s)
+{
+	iterator it = m.find(s);
+	if (it != m.end()) {
+		_ptr t = (*it).second;
+		m.erase(it);
+		delete t;
+	}
+}
+
+
+@ 
+@<|TensorContainer::clear| code@>=
+void clear()
+{
+	while (! m.empty()) {
+		delete (*(m.begin())).second;
+		m.erase(m.begin());
+	}
+}
+
+@ 
+@<|TensorContainer::getMaxDim| code@>=
+int getMaxDim() const
+{
+	int res = -1;
+	for (const_iterator run = m.begin(); run != m.end(); ++run) {
+		int dim = (*run).first.dimen();
+		if (dim > res)
+			res = dim;
+	}
+	return res;
+}
+
+
+@ Debug print.
+@<|TensorContainer::print| code@>=
+void print() const
+{
+	printf("Tensor container: nvars=%d, tensors=%d\n", n, m.size());
+	for (const_iterator it = m.begin(); it != m.end(); ++it) {
+		printf("Symmetry: ");
+		(*it).first.print();
+		((*it).second)->print();
+	}
+}
+
+@ Output to the MAT--4 file.
+@<|TensorContainer::writeMat4| code@>=
+void writeMat4(FILE* fd, const char* prefix) const
+{
+	for (const_iterator it = begin(); it != end(); ++it) {
+		char lname[100];
+		sprintf(lname, "%s_g", prefix);
+		const Symmetry& sym = (*it).first;
+		for (int i = 0; i < sym.num(); i++) {
+			char tmp[10];
+			sprintf(tmp, "_%d", sym[i]);
+			strcat(lname, tmp);
+		}
+		ConstTwoDMatrix m(*((*it).second));
+		m.writeMat4(fd, lname);
+	}
+}
+
+@ Output to the Memory Map.
+@<|TensorContainer::writeMMap| code@>=
+void writeMMap(map<string,ConstTwoDMatrix>* mm) const
+{
+	for (const_iterator it = begin(); it != end(); ++it) {
+		char lname[100];
+		sprintf(lname, "g");
+		const Symmetry& sym = (*it).first;
+		for (int i = 0; i < sym.num(); i++) {
+			char tmp[10];
+			sprintf(tmp, "_%d", sym[i]);
+			strcat(lname, tmp);
+		}
+		ConstTwoDMatrix mx(*((*it).second));
+		mm->insert(make_pair(string(lname),mx));
+	}
+}
+
+
+@ Here we fetch all tensors given by symmetry and equivalence. We go
+through all equivalence classes, calculate implied symmetry, and
+fetch its tensor storing it in the same order to the vector.
+
+@<|TensorContainer::fetchTensors| code@>=
+vector<_const_ptr>
+fetchTensors(const Symmetry& rsym, const Equivalence& e) const
+{
+	vector<_const_ptr> res(e.numClasses());
+	int i = 0;
+	for (Equivalence::const_seqit it = e.begin();
+		 it != e.end(); ++it, i++) {
+		Symmetry s(rsym, *it);
+		res[i] = get(s);
+	}
+	return res;
+}
+
+@ Here is a container storing |UGSTensor|s. We declare |multAndAdd| method.
+
+@<|UGSContainer| class declaration@>=
+class FGSContainer;
+class UGSContainer : public TensorContainer<UGSTensor> {
+public:@;
+	UGSContainer(int nn)
+		: TensorContainer<UGSTensor>(nn)@+ {}
+	UGSContainer(const UGSContainer& uc)
+		: TensorContainer<UGSTensor>(uc)@+ {}
+	UGSContainer(const FGSContainer& c);
+	void multAndAdd(const UGSTensor& t, UGSTensor& out) const;
+};
+
+
+@ Here is a container storing |FGSTensor|s. We declare two versions of
+|multAndAdd| method. The first works for folded $B$ and folded $h$
+tensors, the second works for folded $B$ and unfolded $h$. There is no
+point to do it for unfolded $B$ since the algorithm go through all the
+indices of $B$ and calculates corresponding columns. So, if $B$ is
+needed unfolded, it is more effective to calculate its folded version
+and then unfold by conversion.
+
+The static member |num_one_time| is a number of columns formed from
+product of $g$ tensors at one time. This is subject to change, probably
+we will have to do some tuning and decide about this number based on
+symmetries, and dimensions in the runtime.
+
+@s FGSContainer int
+@<|FGSContainer| class declaration@>=
+class FGSContainer : public TensorContainer<FGSTensor> {
+	static const int num_one_time;
+public:@;
+	FGSContainer(int nn)
+		: TensorContainer<FGSTensor>(nn)@+ {}
+	FGSContainer(const FGSContainer& fc)
+		: TensorContainer<FGSTensor>(fc)@+ {}
+	FGSContainer(const UGSContainer& c);
+	void multAndAdd(const FGSTensor& t, FGSTensor& out) const;
+	void multAndAdd(const UGSTensor& t, FGSTensor& out) const;
+private:@;
+	static Tensor::index
+	getIndices(int num, vector<IntSequence>& out,
+			   const Tensor::index& start,
+			   const Tensor::index& end);
+};
+
+
+@ End of {\tt t\_container.h} file.
diff --git a/dynare++/tl/cc/t_polynomial.cweb b/dynare++/tl/cc/t_polynomial.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..8ad9d0070429e128a9dbb7ccda48846dc5dad658
--- /dev/null
+++ b/dynare++/tl/cc/t_polynomial.cweb
@@ -0,0 +1,80 @@
+@q $Id: t_polynomial.cweb 1210 2007-03-19 21:38:49Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt t\_polynomial.cpp} file.
+
+@c
+#include "t_polynomial.h"
+#include "kron_prod.h"
+
+@<|PowerProvider::getNext| unfolded code@>;
+@<|PowerProvider::getNext| folded code@>;
+@<|PowerProvider| destructor code@>;
+@<|UTensorPolynomial| constructor conversion code@>;
+@<|FTensorPolynomial| constructor conversion code@>;
+
+
+@ This method constructs unfolded |ut| of higher dimension, deleting
+the previous.
+
+@<|PowerProvider::getNext| unfolded code@>=
+const URSingleTensor& PowerProvider::getNext(const URSingleTensor* dummy)
+{
+	if (ut) {
+		URSingleTensor* ut_new = new URSingleTensor(nv, ut->dimen()+1);
+		KronProd::kronMult(ConstVector(origv), ConstVector(ut->getData()), ut_new->getData());
+		delete ut;
+		ut = ut_new;
+	} else {
+		ut = new URSingleTensor(nv, 1);
+		ut->getData() = origv;
+	}
+	return *ut;
+}
+
+@ This method just constructs next unfolded |ut| and creates folded
+|ft|.
+ 
+@<|PowerProvider::getNext| folded code@>=
+const FRSingleTensor& PowerProvider::getNext(const FRSingleTensor* dummy)
+{
+	getNext(ut);
+	if (ft)
+		delete ft;
+	ft = new FRSingleTensor(*ut);
+	return *ft;
+}
+
+@ 
+@<|PowerProvider| destructor code@>=
+PowerProvider::~PowerProvider()
+{
+	if (ut)
+		delete ut;
+	if (ft)
+		delete ft;
+}
+
+@ Clear.
+@<|UTensorPolynomial| constructor conversion code@>=
+UTensorPolynomial::UTensorPolynomial(const FTensorPolynomial& fp)
+	: TensorPolynomial<UFSTensor, UGSTensor, URSingleTensor>(fp.nrows(), fp.nvars())
+{
+	for (FTensorPolynomial::const_iterator it = fp.begin();
+		 it != fp.end(); ++it) {
+		insert(new UFSTensor(*((*it).second)));
+	}
+}
+
+@ Clear.
+@<|FTensorPolynomial| constructor conversion code@>=
+FTensorPolynomial::FTensorPolynomial(const UTensorPolynomial& up)
+	: TensorPolynomial<FFSTensor, FGSTensor, FRSingleTensor>(up.nrows(), up.nvars())
+{
+	for (UTensorPolynomial::const_iterator it = up.begin();
+		 it != up.end(); ++it) {
+		insert(new FFSTensor(*((*it).second)));
+	}
+}
+
+@ End of {\tt t\_polynomial.cpp} file.
diff --git a/dynare++/tl/cc/t_polynomial.hweb b/dynare++/tl/cc/t_polynomial.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..e0a9c5d066b367fcab89f40e30276c4d1f04b518
--- /dev/null
+++ b/dynare++/tl/cc/t_polynomial.hweb
@@ -0,0 +1,507 @@
+@q $Id: t_polynomial.hweb 2336 2009-01-14 10:37:02Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Tensor polynomial evaluation. Start of {\tt t\_polynomial.h} file.
+
+We need to evaluate a tensor polynomial of the form:
+$$
+\left[g_{x}\right]_{\alpha_1}[x]^{\alpha_1}+
+\left[g_{x^2}\right]_{\alpha_1\alpha_2}[x]^{\alpha_1}[x]^{\alpha_2}+
+\ldots+
+\left[g_{x^n}\right]_{\alpha_1\ldots\alpha_n}\prod_{i=1}^n[x]^{\alpha_i}
+$$
+where $x$ is a column vector.
+
+We have basically two options. The first is to use the formula above,
+the second is to use a Horner-like formula:
+$$
+\left[\cdots\left[\left[\left[g_{x^{n-1}}\right]+
+\left[g_{x^n}\right]_{\alpha_1\ldots\alpha_{n-1}\alpha_n}
+[x]^{\alpha_n}\right]_{\alpha_1\ldots\alpha_{n-2}\alpha_{n-1}}
+[x]^{\alpha_{n-1}}\right]\cdots\right]_{\alpha_1}
+[x]^{\alpha_1}
+$$
+
+Alternativelly, we can put the the polynomial into a more compact form
+$$\left[g_{x}\right]_{\alpha_1}[x]^{\alpha_1}+
+\left[g_{x^2}\right]_{\alpha_1\alpha_2}[x]^{\alpha_1}[x]^{\alpha_2}+
+\ldots+
+\left[g_{x^n}\right]_{\alpha_1\ldots\alpha_n}\prod_{i=1}^n[x]^{\alpha_i}
+= [G]_{\alpha_1\ldots\alpha_n}\prod_{i=1}^n\left[\matrix{1\cr x}\right]^{\alpha_i}
+$$
+Then the polynomial evaluation becomes just a matrix multiplication of the vector power.
+
+Here we define the tensor polynomial as a container of full symmetry
+tensors and add an evaluation methods. We have two sorts of
+containers, folded and unfolded. For each type we declare two methods
+implementing the above formulas. We define classes for the
+compactification of the polynomial. The class derives from the tensor
+and has a eval method.
+
+
+@s PowerProvider int
+@s TensorPolynomial int
+@s UTensorPolynomial int
+@s FTensorPolynomial int
+@s CompactPolynomial int
+@s UCompactPolynomial int
+@s FCompactPolynomial int
+
+@c
+#include "t_container.h"
+#include "fs_tensor.h"
+#include "rfs_tensor.h"
+#include"tl_static.h"
+
+@<|PowerProvider| class declaration@>;
+@<|TensorPolynomial| class declaration@>;
+@<|UTensorPolynomial| class declaration@>;
+@<|FTensorPolynomial| class declaration@>;
+@<|CompactPolynomial| class declaration@>;
+@<|UCompactPolynomial| class declaration@>;
+@<|FCompactPolynomial| class declaration@>;
+
+@ Just to make the code nicer, we implement a Kronecker power of a
+vector encapsulated in the following class. It has |getNext| method
+which returns either folded or unfolded row-oriented single column
+Kronecker power of the vector according to the type of a dummy
+argument. This allows us to use the type dependent code in templates
+below.
+
+The implementation of the Kronecker power is that we maintain the last
+unfolded power. If unfolded |getNext| is called, we Kronecker multiply
+the last power with a vector and return it. If folded |getNext| is
+called, we do the same plus we fold it.
+
+|getNext| returns the vector for the first call (first power), the
+ second power is returned on the second call, and so on.
+
+@<|PowerProvider| class declaration@>=
+class PowerProvider {
+	Vector origv;
+	URSingleTensor* ut;
+	FRSingleTensor* ft;
+	int nv;
+public:@;
+	PowerProvider(const ConstVector& v)
+		: origv(v), ut(NULL), ft(NULL), nv(v.length())@+ {}
+	~PowerProvider();
+	const URSingleTensor& getNext(const URSingleTensor* dummy);
+	const FRSingleTensor& getNext(const FRSingleTensor* dummy);
+};
+
+@ The tensor polynomial is basically a tensor container which is more
+strict on insertions. It maintains number of rows and number of
+variables and allows insertions only of those tensors, which yield
+these properties. The maximum dimension is maintained by |insert|
+method.
+
+So we re-implement |insert| method and implement |evalTrad|
+(traditional polynomial evaluation) and horner-like evaluation
+|evalHorner|.
+
+In addition, we implement derivatives of the polynomial and its
+evaluation. The evaluation of a derivative is different from the
+evaluation of the whole polynomial, simply because the evaluation of
+the derivatives is a tensor, and the evaluation of the polynomial is a
+vector (zero dimensional tensor). See documentation to
+|@<|TensorPolynomial::derivative| code@>| and
+|@<|TensorPolynomial::evalPartially| code@>| for details.
+
+@s _Stype int
+@s _TGStype int
+
+@<|TensorPolynomial| class declaration@>=
+template <class _Ttype, class _TGStype, class _Stype>@;
+class TensorPolynomial : public TensorContainer<_Ttype> {
+	int nr;
+	int nv;
+	int maxdim;
+	typedef TensorContainer<_Ttype> _Tparent;
+	typedef typename _Tparent::_ptr _ptr;
+public:@;
+	TensorPolynomial(int rows, int vars)
+		: TensorContainer<_Ttype>(1),
+		  nr(rows), nv(vars), maxdim(0) {}
+	TensorPolynomial(const TensorPolynomial<_Ttype, _TGStype, _Stype>& tp, int k)
+		: TensorContainer<_Ttype>(tp),
+		  nr(tp.nr), nv(tp.nv), maxdim(0) {@+ derivative(k);@+}
+	TensorPolynomial(int first_row, int num, TensorPolynomial<_Ttype, _TGStype, _Stype>& tp)
+		: TensorContainer<_Ttype>(first_row, num, tp),
+		  nr(num), nv(tp.nv), maxdim(tp.maxdim)@+ {}
+	@<|TensorPolynomial| contract constructor code@>;
+	TensorPolynomial(const TensorPolynomial& tp)
+		: TensorContainer<_Ttype>(tp), nr(tp.nr), nv(tp.nv), maxdim(tp.maxdim)@+ {}
+	int nrows() const
+		{@+ return nr;@+}
+	int nvars() const
+		{@+ return nv;@+}
+	@<|TensorPolynomial::evalTrad| code@>;
+	@<|TensorPolynomial::evalHorner| code@>;
+	@<|TensorPolynomial::insert| code@>;
+	@<|TensorPolynomial::derivative| code@>;
+	@<|TensorPolynomial::evalPartially| code@>;
+};
+
+
+@ This constructor takes a tensor polynomial 
+$$P(x,y)=\sum^m_{k=0}[g_{(xy)^k}]_{\alpha_1\ldots\alpha_k}
+\left[\matrix{x\cr y}\right]^{\alpha_1\ldots\alpha_k}$$
+and for a given $x$ it makes a polynomial
+$$Q(y)=P(x,y).$$
+
+The algorithm for each full symmetry $(xy)^k$ works with subtensors (slices) of
+symmetry $x^iy^j$ (with $i+j=k$), and contracts these subtensors with respect to
+$x^i$ to obtain a tensor of full symmetry $y^j$. Since the column
+$x^i$ is calculated by |PowerProvider| we cycle for $i=1,...,m$. Then
+we have to add everything for $i=0$.
+
+The code works as follows: For slicing purposes we need stack sizes
+|ss| corresponing to lengths of $x$ and $y$, and then identity |pp|
+for unfolding a symmetry of the slice to obtain stack coordinates of
+the slice. Then we do the calculations for $i=1,\ldots,m$ and then for
+$i=0$.
+
+@<|TensorPolynomial| contract constructor code@>=
+TensorPolynomial(const TensorPolynomial<_Ttype, _TGStype, _Stype>& tp, const Vector& xval)
+	: TensorContainer<_Ttype>(1),
+	  nr(tp.nrows()), nv(tp.nvars() - xval.length()), maxdim(0)
+{
+	TL_RAISE_IF(nvars() < 0,
+				"Length of xval too big in TensorPolynomial contract constructor");
+	IntSequence ss(2);@+ ss[0] = xval.length();@+ ss[1] = nvars();
+	IntSequence pp(2);@+ pp[0] = 0;@+ pp[1] = 1;
+
+	@<do contraction for all $i>0$@>;
+	@<do contraction for $i=0$@>;
+}
+
+@ Here we setup the |PowerProvider|, and cycle through
+$i=1,\ldots,m$. Within the loop we cycle through $j=0,\ldots,m-i$. If
+there is a tensor with symmetry $(xy)^{i+j}$ in the original
+polynomial, we make its slice with symmetry $x^iy^j$, and
+|contractAndAdd| it to the tensor |ten| in the |this| polynomial with
+a symmetry $y^j$.
+
+Note three things: First, the tensor |ten| is either created and put
+to |this| container or just got from the container, this is done in
+|@<initialize |ten| of dimension |j|@>|. Second, the contribution to
+the |ten| tensor must be multiplied by $\left(\matrix{i+j\cr
+j}\right)$, since there are exactly that number of slices of
+$(xy)^{i+j}$ of the symmetry $x^iy^j$ and all must be added. Third,
+the tensor |ten| is fully symmetric and |_TGStype::contractAndAdd|
+works with general symmetry, that is why we have to in-place convert
+fully syummetric |ten| to a general symmetry tensor.
+
+@<do contraction for all $i>0$@>=
+	PowerProvider pwp(xval);
+	for (int i = 1; i <= tp.maxdim; i++) {
+		const _Stype& xpow = pwp.getNext((const _Stype*)NULL);
+		for (int j = 0; j <= tp.maxdim-i; j++) {
+			if (tp.check(Symmetry(i+j))) {
+				@<initialize |ten| of dimension |j|@>;
+				Symmetry sym(i,j);
+				IntSequence coor(sym, pp);
+				_TGStype slice(*(tp.get(Symmetry(i+j))), ss, coor, TensorDimens(sym, ss));
+				slice.mult(Tensor::noverk(i+j, j));
+				_TGStype tmp(*ten);
+				slice.contractAndAdd(0, tmp, xpow);
+			}
+		}
+	}
+
+@ This is easy. The code is equivalent to code |@<do contraction for
+all $i>0$@>| as for $i=0$. The contraction here takes a form of a
+simple addition.
+
+@<do contraction for $i=0$@>=
+	for (int j = 0; j <= tp.maxdim; j++) {
+		if (tp.check(Symmetry(j))) {
+			@<initialize |ten| of dimension |j|@>;
+			Symmetry sym(0, j);
+			IntSequence coor(sym, pp);
+			_TGStype slice(*(tp.get(Symmetry(j))), ss, coor, TensorDimens(sym, ss));
+			ten->add(1.0, slice);
+		}
+	}
+
+
+@ The pointer |ten| is either a new tensor or got from |this| container.
+@<initialize |ten| of dimension |j|@>=
+	_Ttype* ten;
+	if (_Tparent::check(Symmetry(j))) {
+		ten = _Tparent::get(Symmetry(j));
+	} else {
+		ten = new _Ttype(nrows(), nvars(), j);
+		ten->zeros();
+		insert(ten);
+	}
+
+
+@ Here we cycle up to the maximum dimension, and if a tensor exists in
+the container, then we multiply it with the Kronecker power of the
+vector supplied by |PowerProvider|.
+
+@<|TensorPolynomial::evalTrad| code@>=
+void evalTrad(Vector& out, const ConstVector& v) const
+{
+	if (_Tparent::check(Symmetry(0)))
+		out = _Tparent::get(Symmetry(0))->getData();
+	else
+		out.zeros();
+
+	PowerProvider pp(v);
+	for (int d = 1; d <= maxdim; d++) {
+		const _Stype& p = pp.getNext((const _Stype*)NULL);
+		Symmetry cs(d);
+		if (_Tparent::check(cs)) {
+			const _Ttype* t = _Tparent::get(cs);
+			t->multaVec(out, p.getData());
+		}
+	}
+}
+
+@ Here we construct by contraction |maxdim-1| tensor first, and then
+cycle. The code is clear, the only messy thing is |new| and |delete|.
+
+@<|TensorPolynomial::evalHorner| code@>=
+void evalHorner(Vector& out, const ConstVector& v) const
+{
+	if (_Tparent::check(Symmetry(0)))
+		out = _Tparent::get(Symmetry(0))->getData();
+	else
+		out.zeros();
+
+	if (maxdim == 0)
+		return;
+
+	_Ttype* last;
+	if (maxdim == 1)
+		last = new _Ttype(*(_Tparent::get(Symmetry(1))));
+	else 
+		last = new _Ttype(*(_Tparent::get(Symmetry(maxdim))), v);
+	for (int d = maxdim-1; d >=1; d--) {
+		Symmetry cs(d);
+		if (_Tparent::check(cs)) {
+			const _Ttype* nt = _Tparent::get(cs);
+			last->add(1.0, ConstTwoDMatrix(*nt));
+		}
+		if (d > 1) {
+			_Ttype* new_last = new _Ttype(*last, v);
+			delete last;
+			last = new_last;
+		}
+	}
+	last->multaVec(out, v);
+	delete last;
+}
+
+@ Before a tensor is inserted, we check for the number of rows, and
+number of variables. Then we insert and update the |maxdim|.
+
+@<|TensorPolynomial::insert| code@>=
+void insert(_ptr t)
+{
+	TL_RAISE_IF(t->nrows() != nr,
+				"Wrong number of rows in TensorPolynomial::insert");
+	TL_RAISE_IF(t->nvar() != nv,
+				"Wrong number of variables in TensorPolynomial::insert");
+	TensorContainer<_Ttype>::insert(t);
+	if (maxdim < t->dimen())
+		maxdim = t->dimen();
+}
+
+@ The polynomial takes the form
+$$\sum_{i=0}^n{1\over i!}\left[g_{y^i}\right]_{\alpha_1\ldots\alpha_i}
+\left[y\right]^{\alpha_1}\ldots\left[y\right]^{\alpha_i},$$ where
+$\left[g_{y^i}\right]$ are $i$-order derivatives of the polynomial. We
+assume that ${1\over i!}\left[g_{y^i}\right]$ are items in the tensor
+container.  This method differentiates the polynomial by one order to
+yield:
+$$\sum_{i=1}^n{1\over i!}\left[i\cdot g_{y^i}\right]_{\alpha_1\ldots\alpha_i}
+\left[y\right]^{\alpha_1}\ldots\left[y\right]^{\alpha_{i-1}},$$
+where $\left[i\cdot{1\over i!}\cdot g_{y^i}\right]$ are put to the container.
+
+A polynomial can be derivative of some order, and the order cannot be
+recognized from the object. That is why we need to input the order.
+
+@<|TensorPolynomial::derivative| code@>=
+void derivative(int k)
+{
+	for (int d = 1; d <= maxdim; d++) {
+		if (_Tparent::check(Symmetry(d))) {
+			_Ttype* ten = _Tparent::get(Symmetry(d));
+			ten->mult((double) max((d-k), 0));
+		}
+	}
+}
+
+@ Now let us suppose that we have an |s| order derivative of a
+polynomial whose $i$ order derivatives are $\left[g_{y^i}\right]$, so
+we have
+$$\sum_{i=s}^n{1\over i!}\left[g_{y^i}\right]_{\alpha_1\ldots\alpha_i}
+\prod_{k=1}^{i-s}\left[y\right]^{\alpha_k},$$
+where ${1\over i!}\left[g_{y^i}\right]$ are tensors in the container.
+
+This methods performs this evaluation. The result is an |s| dimensional
+tensor. Note that when combined with the method |derivative|, they
+evaluate a derivative of some order. For example a sequence of calls
+|g.derivative(0)|, |g.derivative(1)| and |der=g.evalPartially(2, v)|
+calculates $2!$ multiple of the second derivative of |g| at |v|.
+
+@<|TensorPolynomial::evalPartially| code@>=
+_Ttype* evalPartially(int s, const ConstVector& v)
+{
+	TL_RAISE_IF(v.length() != nvars(),
+				"Wrong length of vector for TensorPolynomial::evalPartially");
+
+	_Ttype* res = new _Ttype(nrows(), nvars(), s);
+	res->zeros();
+
+	if (_Tparent::check(Symmetry(s)))
+		res->add(1.0, *(_Tparent::get(Symmetry(s))));
+
+	for (int d = s+1; d <= maxdim; d++) {
+		if (_Tparent::check(Symmetry(d))) {
+			const _Ttype& ltmp = *(_Tparent::get(Symmetry(d)));
+			_Ttype* last = new _Ttype(ltmp);
+			for (int j = 0; j < d - s; j++) {
+				_Ttype* newlast = new _Ttype(*last, v);
+				delete last;
+				last = newlast;
+			}
+			res->add(1.0, *last);
+			delete last;
+		}
+	}
+
+	return res;
+}
+
+@ This just gives a name to unfolded tensor polynomial.
+@<|UTensorPolynomial| class declaration@>=
+class FTensorPolynomial;
+class UTensorPolynomial : public TensorPolynomial<UFSTensor, UGSTensor, URSingleTensor> {
+public:@;
+	UTensorPolynomial(int rows, int vars)
+		: TensorPolynomial<UFSTensor, UGSTensor, URSingleTensor>(rows, vars)@+ {}
+	UTensorPolynomial(const UTensorPolynomial& up, int k)
+		: TensorPolynomial<UFSTensor, UGSTensor, URSingleTensor>(up, k)@+ {}
+	UTensorPolynomial(const FTensorPolynomial& fp);
+	UTensorPolynomial(const UTensorPolynomial& tp, const Vector& xval)
+		: TensorPolynomial<UFSTensor, UGSTensor, URSingleTensor>(tp, xval)@+ {}
+	UTensorPolynomial(int first_row, int num, UTensorPolynomial& tp)
+		: TensorPolynomial<UFSTensor, UGSTensor, URSingleTensor>(first_row, num, tp)@+ {}
+};
+
+@ This just gives a name to folded tensor polynomial.
+@<|FTensorPolynomial| class declaration@>=
+class FTensorPolynomial : public TensorPolynomial<FFSTensor, FGSTensor, FRSingleTensor> {
+public:@;
+	FTensorPolynomial(int rows, int vars)
+		: TensorPolynomial<FFSTensor, FGSTensor, FRSingleTensor>(rows, vars)@+ {}
+	FTensorPolynomial(const FTensorPolynomial& fp, int k)
+		: TensorPolynomial<FFSTensor, FGSTensor, FRSingleTensor>(fp, k)@+ {}
+	FTensorPolynomial(const UTensorPolynomial& up);
+	FTensorPolynomial(const FTensorPolynomial& tp, const Vector& xval)
+		: TensorPolynomial<FFSTensor, FGSTensor, FRSingleTensor>(tp, xval)@+ {}
+	FTensorPolynomial(int first_row, int num, FTensorPolynomial& tp)
+		: TensorPolynomial<FFSTensor, FGSTensor, FRSingleTensor>(first_row, num, tp)@+ {}
+};
+
+@ The compact form of |TensorPolynomial| is in fact a full symmetry
+tensor, with the number of variables equal to the number of variables
+of the polynomial plus 1 for $1$.
+
+@<|CompactPolynomial| class declaration@>=
+template <class _Ttype, class _TGStype, class _Stype>@;
+class CompactPolynomial : public _Ttype {
+public:@;
+	@<|CompactPolynomial| constructor code@>;
+	@<|CompactPolynomial::eval| method code@>;
+};
+
+@ This constructor copies matrices from the given tensor polynomial to
+the appropriate location in this matrix. It creates a dummy tensor
+|dum| with two variables (one corresponds to $1$, the other to
+$x$). The index goes through this dummy tensor and the number of
+columns of the folded/unfolded general symmetry tensor corresponding
+to the selections of $1$ or $x$ given by the index. Length of $1$ is
+one, and length of $x$ is |pol.nvars()|. This nvs information is
+stored in |dumnvs|. The symmetry of this general symmetry dummy tensor
+|dumgs| is given by a number of ones and x's in the index. We then
+copy the matrix, if it exists in the polynomial and increase |offset|
+for the following cycle.
+
+@<|CompactPolynomial| constructor code@>=
+CompactPolynomial(const TensorPolynomial<_Ttype, _TGStype, _Stype>& pol)
+	: _Ttype(pol.nrows(), pol.nvars()+1, pol.getMaxDim())
+{
+	_Ttype::zeros();
+
+	IntSequence dumnvs(2);
+	dumnvs[0] = 1;
+	dumnvs[1] = pol.nvars();
+
+	int offset = 0;
+	_Ttype dum(0, 2, _Ttype::dimen());
+	for (Tensor::index i = dum.begin(); i != dum.end(); ++i) {
+		int d = i.getCoor().sum();
+		Symmetry symrun(_Ttype::dimen()-d, d);
+		_TGStype dumgs(0, TensorDimens(symrun, dumnvs));
+		if (pol.check(Symmetry(d))) {
+			TwoDMatrix subt(*this, offset, dumgs.ncols());
+			subt.add(1.0, *(pol.get(Symmetry(d))));	
+		}
+		offset += dumgs.ncols();
+	}
+}
+
+
+@ We create |x1| to be a concatenation of $1$ and $x$, and then create
+|PowerProvider| to make a corresponding power |xpow| of |x1|, and
+finally multiply this matrix with the power.
+
+@<|CompactPolynomial::eval| method code@>=
+void eval(Vector& out, const ConstVector& v) const
+{
+	TL_RAISE_IF(v.length()+1 != _Ttype::nvar(),
+				"Wrong input vector length in CompactPolynomial::eval");
+	TL_RAISE_IF(out.length() != _Ttype::nrows(),
+				"Wrong output vector length in CompactPolynomial::eval");
+
+	Vector x1(v.length()+1);
+	Vector x1p(x1, 1, v.length());
+	x1p = v;
+	x1[0] = 1.0;
+
+	if (_Ttype::dimen() == 0)
+		out = ConstVector(*this, 0);
+	else {
+		PowerProvider pp(x1);
+		const _Stype& xpow = pp.getNext((const _Stype*)NULL);
+		for (int i = 1; i < _Ttype::dimen(); i++)
+			xpow = pp.getNext((const _Stype*)NULL);
+		multVec(0.0, out, 1.0, xpow);
+	}
+}
+
+@ Specialization of the |CompactPolynomial| for unfolded tensor.
+@<|UCompactPolynomial| class declaration@>=
+class UCompactPolynomial : public CompactPolynomial<UFSTensor, UGSTensor, URSingleTensor> {
+public:@;
+	UCompactPolynomial(const UTensorPolynomial& upol)
+		: CompactPolynomial<UFSTensor, UGSTensor, URSingleTensor>(upol)@+ {}
+};
+
+@ Specialization of the |CompactPolynomial| for folded tensor.
+@<|FCompactPolynomial| class declaration@>=
+class FCompactPolynomial : public CompactPolynomial<FFSTensor, FGSTensor, FRSingleTensor> {
+public:@;
+	FCompactPolynomial(const FTensorPolynomial& fpol)
+		: CompactPolynomial<FFSTensor, FGSTensor, FRSingleTensor>(fpol)@+ {}
+};
+
+
+
+@ End of {\tt t\_polynomial.h} file.
diff --git a/dynare++/tl/cc/tensor.cweb b/dynare++/tl/cc/tensor.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..a6f5438fcf216a4a0fc19ca4ff468c1d381c28dc
--- /dev/null
+++ b/dynare++/tl/cc/tensor.cweb
@@ -0,0 +1,229 @@
+@q $Id: tensor.cweb 429 2005-08-16 15:20:09Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt tensor.cpp} file.
+
+@c
+#include "tensor.h"
+#include "tl_exception.h"
+#include "tl_static.h"
+
+@<|Tensor| static methods@>;
+@<|Tensor::noverseq_ip| static method@>;
+@<|UTensor::increment| code 1@>;
+@<|UTensor::decrement| code 1@>;
+@<|UTensor::increment| code 2@>;
+@<|UTensor::decrement| code 2@>;
+@<|UTensor::getOffset| code 1@>;
+@<|UTensor::getOffset| code 2@>;
+@<|FTensor::decrement| code@>;
+@<|FTensor::getOffsetRecurse| code@>;
+
+@ Here we implement calculation of $\pmatrix{n\cr k}$ where $n-k$ is
+usually bigger than $k$.
+
+Also we implement $a^b$.
+
+@<|Tensor| static methods@>=
+int Tensor::noverk(int n, int k)
+{
+	return tls.ptriang->noverk(n,k);
+}
+@#
+int Tensor::power(int a, int b)
+{
+	int res = 1;
+	for (int i = 0; i < b; i++)
+		res *= a;
+	return res;
+}
+
+@ Here we calculate a generalized combination number
+$\left(\matrix{a\cr b_1,\ldots,b_n}\right)$, where $a=b_1+\ldots+
+b_n$. We use the identity
+$$\left(\matrix{a\cr b_1,\ldots,b_n}\right)=\left(\matrix{b_1+b_2\cr b_1}\right)\cdot
+\left(\matrix{a\cr b_1+b_2,b_3,\ldots,b_n}\right)$$
+
+This number is exactly a number of unfolded indices corresponding to
+one folded index, where the sequence $b_1,\ldots,b_n$ is the symmetry
+of the index.
+
+@<|Tensor::noverseq_ip| static method@>=
+int Tensor::noverseq_ip(IntSequence& s)
+{
+	if (s.size() == 0 || s.size() == 1)
+		return 1;
+	s[1] += s[0];
+	return noverk(s[1],s[0]) * noverseq(IntSequence(s, 1, s.size()));
+}
+
+@ Here we increment a given sequence within full symmetry given by
+|nv|, which is number of variables in each dimension. The underlying
+tensor is unfolded, so we increase the rightmost by one, and if it is
+|nv| we zero it and increase the next one to the left.
+
+@<|UTensor::increment| code 1@>=
+void UTensor::increment(IntSequence& v, int nv)
+{
+	if (v.size() == 0)
+		return;
+	int i = v.size()-1;
+	v[i]++;
+	while (i > 0 && v[i] == nv) {
+		v[i] = 0;
+		v[--i]++;
+	}
+}
+
+@ This is dual to |UTensor::increment(IntSequence& v, int nv)|.
+
+@<|UTensor::decrement| code 1@>=
+void UTensor::decrement(IntSequence& v, int nv)
+{
+	if (v.size() == 0)
+		return;
+	int i = v.size()-1;
+	v[i]--;
+	while (i > 0 && v[i] == -1) {
+		v[i] = nv -1;
+		v[--i]--;
+	}
+}
+
+@ Here we increment index for general symmetry for unfolded
+storage. The sequence |nvmx| assigns for each coordinate a number of
+variables. Since the storage is unfolded, we do not need information
+about what variables are symmetric, everything necessary is given by
+|nvmx|.
+
+@<|UTensor::increment| code 2@>=
+void UTensor::increment(IntSequence& v, const IntSequence& nvmx)
+{
+	if (v.size() == 0)
+		return;
+	int i = v.size()-1;
+	v[i]++;
+	while (i > 0 && v[i] == nvmx[i]) {
+		v[i] = 0;
+		v[--i]++;
+	}
+}
+
+@ This is a dual code to |UTensor::increment(IntSequence& v, const
+IntSequence& nvmx)|.
+
+@<|UTensor::decrement| code 2@>=
+void UTensor::decrement(IntSequence& v, const IntSequence& nvmx)
+{
+	if (v.size() == 0)
+		return;
+	int i = v.size()-1;
+	v[i]--;
+	while (i > 0 && v[i] == -1) {
+		v[i] = nvmx[i] -1;
+		v[--i]--;
+	}
+}
+
+@ Here we return an offset for a given coordinates of unfolded full
+symmetry tensor. This is easy.
+
+@<|UTensor::getOffset| code 1@>=
+int UTensor::getOffset(const IntSequence& v, int nv)
+{
+	int pow = 1;
+	int res = 0;
+	for (int i = v.size()-1; i >= 0; i--) {
+		res += v[i]*pow;
+		pow *= nv;
+	}
+	return res;
+}
+
+@ Also easy.
+@<|UTensor::getOffset| code 2@>=
+int UTensor::getOffset(const IntSequence& v, const IntSequence& nvmx)
+{
+	int pow = 1;
+	int res = 0;
+	for (int i = v.size()-1; i >= 0; i--) {
+		res += v[i]*pow;
+		pow *= nvmx[i];
+	}
+	return res;
+}
+
+ 
+@ Decrementing of coordinates of folded index is not that easy. Note
+that if a trailing part of coordinates is $(b, a, a, a)$ (for
+instance) with $b<a$, then a preceding coordinates are $(b, a-1, n-1,
+n-1)$, where $n$ is a number of variables |nv|. So we find the left
+most element which is equal to the last element, decrease it by one,
+and then set all elements to the right to $n-1$.
+
+@<|FTensor::decrement| code@>=
+void FTensor::decrement(IntSequence& v, int nv)
+{
+	int i = v.size()-1;
+	while (i > 0 && v[i-1]==v[i])
+		i--;
+	v[i]--;
+	for (int j = i+1; j < v.size(); j++)
+		v[j] = nv-1;
+}
+
+@ This calculates order of the given index of our ordering of
+indices. In order to understand how it works, let us take number of
+variables $n$ and dimension $k$, and write down all the possible
+combinations of indices in our ordering. For example for $n=4$ and
+$k=3$, the sequence looks as:
+
+\def\tr#1#2#3{\hbox{\rlap{#1}\hskip 0.7em\rlap{#2}\hskip 0.7em\rlap{#3}\hskip 0.7em}}
+\halign{\tabskip=3em \hskip2cm #&#&#&#\cr
+\tr 000 &\tr 111 &\tr 222 &\tr 333\cr
+\tr 001 &\tr 112 &\tr 223 \cr
+\tr 002 &\tr 113 &\tr 233 \cr 
+\tr 003 &\tr 122 \cr
+\tr 011 &\tr 123\cr
+\tr 012 &\tr 133\cr
+\tr 013\cr
+\tr 022\cr
+\tr 023\cr
+\tr 033\cr
+}
+
+Now observe, that a number of sequences starting with zero is the same
+as total number of sequences with the same number of variables but
+with dimension minus one. More generally, if $S_{n,k}$ denotes number
+of indices of $n$ variables and dimension $k$, then the number of
+indices beginning with $m$ is exactly $S_{n-m,k-1}$. This is because $m$
+can be subtracted from all items, and we obtain sequence of indices of
+$n-m$ variables. So we have formula:
+$$S_{n,k}=S_{n,k-1}+S_{n-1,k-1}+\ldots+S_{1,k-1}$$
+
+Now it is easy to calculate offset of index of the form
+$(m,\ldots,m)$. It is a sum of all above it, this is
+$S_{n,k-1}+\ldots+S_{n-m,k-1}$. We know that $S_{n,k}=\pmatrix{n+k-1\cr
+k}$. Using above formula, we can calculate offset of $(m,\ldots,m)$ as
+$$\pmatrix{n+k-1\cr k}-\pmatrix{n-m+k-1\cr k}$$
+
+The offset of general index $(m_1,m_2,\ldots,m_k)$ is calculated
+recursively, since it is offset of $(m_1,\ldots,m_1)$ for $n$
+variables plus offset of $(m_2-m_1,m_3-m_1,\ldots,m_k-m_1)$ for
+$n-m_1$ variables.
+
+@<|FTensor::getOffsetRecurse| code@>=
+int FTensor::getOffsetRecurse(IntSequence& v, int nv)
+{
+	if (v.size() == 0) return 0;
+	int prefix = v.getPrefixLength();
+	int m = v[0];
+	int k = v.size();
+	int s1 = noverk(nv+k-1,k) - noverk(nv-m+k-1,k);
+	IntSequence subv(v, prefix, k);
+	subv.add(-m);
+	int s2 = getOffsetRecurse(subv, nv-m);
+	return s1+s2;
+}
+
+@ End of {\tt tensor.cpp} file.
diff --git a/dynare++/tl/cc/tensor.hweb b/dynare++/tl/cc/tensor.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..0594a23b7ab28cead51475b5061f514be8d26565
--- /dev/null
+++ b/dynare++/tl/cc/tensor.hweb
@@ -0,0 +1,252 @@
+@q $Id: tensor.hweb 741 2006-05-09 11:12:46Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Tensor concept. Start of {\tt tensor.h} file.
+
+Here we define a tensor class. Tensor is a mathematical object
+corresponding to a $(n+1)$-dimensional array. An element of such array
+is denoted $[B]_{\alpha_1\ldots\alpha_n}^\beta$, where $\beta$ is a
+special index and $\alpha_1\ldots\alpha_n$ are other indices. The
+class |Tensor| and its subclasses view such array as a 2D matrix,
+where $\beta$ corresponds to one dimension, and
+$\alpha_1\ldots\alpha_2$ unfold to the other dimension. Whether
+$\beta$ correspond to rows or columns is decided by tensor subclasses,
+however, most of our tensors will have rows indexed by $\beta$, and
+$\alpha_1\ldots\alpha_n$ will unfold column-wise.
+
+There might be some symmetries in the tensor data. For instance, if
+$\alpha_1$ is interchanged with $\alpha_3$ and the both elements equal
+for all possible $\alpha_i$, and $\beta$, then there is a symmetry
+of $\alpha_1$ and $\alpha_3$.
+
+For any symmetry, there are basically two possible storages of the
+data. The first is unfolded storage, which stores all elements
+regardless the symmetry. The other storage type is folded, which
+stores only elements which do not repeat. We declare abstract classes
+for unfolded tensor, and folded tensor.
+
+Also, here we also define a concept of tensor index which is the
+$n$-tuple $\alpha_1\ldots\alpha_n$. It is an iterator, which iterates
+in dependence of symmetry and storage of the underlying tensor.
+
+Although we do not decide about possible symmetries at this point, it
+is worth noting that we implement two kinds of symmetries. The first
+one is a full symmetry where all indices are interchangeable. The
+second one is a generalization of the first. We define tensor of a
+symmetry, where there are a few groups of indices interchangeable
+within a group and not across. Moreover, the groups are required to be
+consequent partitions of the index $n$-tuple. This is, we do not allow
+$\alpha_1$ be interchangeable with $\alpha_3$ and not with $\alpha_2$
+at the same time.
+
+However, some intermediate results are, in fact, tensors of a symmetry
+not fitting to our concept. We develop the tensor abstraction for it,
+but these objects are not used very often. They have limited usage
+due to their specialized constructor.
+
+@c
+
+#ifndef TENSOR_H
+#define TENSOR_H
+
+#include "int_sequence.h"
+#include "twod_matrix.h"
+
+@<index class definition@>;
+@<|Tensor| class declaration@>;
+@<|UTensor| class declaration@>;
+@<|FTensor| class declaration@>;
+
+#endif
+
+@ The index represents $n$-tuple $\alpha_1\ldots\alpha_n$. Since its
+movement is dependent on the underlying tensor (with storage and
+symmetry), we maintain a pointer to that tensor, we maintain the
+$n$-tuple (or coordinates) as |IntSequence| and also we maintain the
+offset number (column, or row) of the index in the tensor. The pointer
+is const, since we do not need to change data through the index.
+
+Here we require the |tensor| to implement |increment| and |decrement|
+methods, which calculate following and preceding $n$-tuple. Also, we
+need to calculate offset number from the given coordinates, so the
+tensor must implement method |getOffset|. This method is used only in
+construction of the index from the given coordinates. As the index is
+created, the offset is automatically incremented, and decremented
+together with index. The|getOffset| method can be relatively
+computationally complex. This must be kept in mind.  Also we generally
+suppose that n-tuple of all zeros is the first offset (first columns
+or row).
+
+What follows is a definition of index class, the only
+interesting point is |operator==| which decides only according to
+offset, not according to the coordinates. This is useful since there
+can be more than one of coordinate representations of past-the-end
+index.
+
+@s _Tptr int
+@s _Self int
+
+@<index class definition@>=
+template<class _Tptr> class _index {
+	typedef _index<_Tptr> _Self;
+	_Tptr tensor;
+	int offset;
+	IntSequence coor;
+public:@;
+	_index(_Tptr t, int n) 
+		: tensor(t), offset(0), coor(n, 0)@+ {}
+	_index(_Tptr t, const IntSequence& cr, int c)
+		: tensor(t), offset(c), coor(cr)@+ {}
+	_index(_Tptr t, const IntSequence& cr)
+		: tensor(t), offset(tensor->getOffset(cr)), coor(cr)@+ {}
+	_index(const _index& ind)
+		: tensor(ind.tensor), offset(ind.offset), coor(ind.coor)@+ {}
+	const _Self& operator=(const _Self& in)
+		{@+ tensor = in.tensor;@+ offset = in.offset;@+ coor = in.coor;
+		return *this;@+}
+	_Self& operator++()
+		{@+ tensor->increment(coor);@+ offset++;@+ return *this;@+}
+	_Self& operator--()
+		{@+ tensor->decrement(coor);@+ offset--;@+ return *this;@+}
+	int operator*() const
+		{@+ return offset;@+}
+	bool operator==(const _index& n) const
+		{@+ return offset == n.offset;@+}
+	bool operator!=(const _index& n) const
+		{@+ return offset != n.offset;@+}
+	const IntSequence& getCoor() const
+		{@+ return coor;@+}
+	void print() const
+		{@+ printf("%4d: ", offset);@+  coor.print();@+}
+};
+
+@ Here is the |Tensor| class, which is nothing else than a simple subclass
+of |TwoDMatrix|. The unique semantically new member is |dim| which is tensor
+dimension (length of $\alpha_1\ldots\alpha_n$). We also declare
+|increment|, |decrement| and |getOffset| methods as pure virtual.
+
+We also add members for index begin and index end. This is useful,
+since |begin| and |end| methods do not return instance but only
+references, which prevent making additional copy of index (for example
+in for cycles as |in != end()| which would do a copy of index for each
+cycle). The index begin |in_beg| is constructed as a sequence of all
+zeros, and |in_end| is constructed from the sequence |last| passed to
+the constructor, since it depends on subclasses. Also we have to say,
+along what coordinate is the multidimensional index. This is used only
+for initialization of |in_end|.
+
+Also, we declare static auxiliary functions for $\pmatrix{n\cr k}$
+which is |noverk| and $a^b$, which is |power|.
+
+@s indor int
+
+@<|Tensor| class declaration@>=
+class Tensor : public TwoDMatrix {
+public:@;
+	enum indor {along_row, along_col};
+	typedef _index<const Tensor*> index;
+protected:@;
+	const index in_beg;
+	const index in_end;
+	int dim;
+public:@;
+	Tensor(indor io, const IntSequence& last, int r, int c, int d)
+		: TwoDMatrix(r, c),
+		  in_beg(this, d),
+		  in_end(this, last, (io == along_row)? r:c),
+		  dim(d)@+ {}
+	Tensor(indor io, const IntSequence& first, const IntSequence& last,
+		   int r, int c, int d)
+		: TwoDMatrix(r, c),
+		  in_beg(this, first, 0),
+		  in_end(this, last, (io == along_row)? r:c),
+		  dim(d)@+ {}
+	Tensor(int first_row, int num, Tensor& t)
+		: TwoDMatrix(first_row, num, t),
+		  in_beg(t.in_beg),
+		  in_end(t.in_end),
+		  dim(t.dim)@+ {}
+	Tensor(const Tensor& t)
+		: TwoDMatrix(t),
+		  in_beg(this, t.in_beg.getCoor(), *(t.in_beg)),
+		  in_end(this, t.in_end.getCoor(), *(t.in_end)),
+		  dim(t.dim)@+ {}
+	virtual ~Tensor()@+ {}
+	virtual void increment(IntSequence& v) const =0;
+	virtual void decrement(IntSequence& v) const =0;
+	virtual int getOffset(const IntSequence& v) const =0;
+	int dimen() const
+		{@+ return dim;@+}
+
+	const index& begin() const
+		{@+ return in_beg;@+}
+	const index& end() const
+		{@+ return in_end;@+}
+
+	static int noverk(int n, int k);
+	static int power(int a, int b);
+	static int noverseq(const IntSequence& s)
+		{
+			IntSequence seq(s);
+			return noverseq_ip((IntSequence&)s);
+		}
+private:@;
+	static int noverseq_ip(IntSequence& s);
+};
+
+@ Here is an abstraction for unfolded tensor. We provide a pure
+virtual method |fold| which returns a new instance of folded tensor of
+the same symmetry. Also we provide static methods for incrementing and
+decrementing an index with full symmetry and general symmetry as
+defined above.
+
+@<|UTensor| class declaration@>=
+class FTensor;
+class UTensor : public Tensor {
+public:@;
+	UTensor(indor io, const IntSequence& last, int r, int c, int d)
+		: Tensor(io, last, r, c, d)@+ {}
+	UTensor(const UTensor& ut)
+		: Tensor(ut)@+ {}
+	UTensor(int first_row, int num, UTensor& t)
+		: Tensor(first_row, num, t)@+ {}
+	virtual ~UTensor()@+ {}
+	virtual FTensor& fold() const =0;
+
+	static void increment(IntSequence& v, int nv);
+	static void decrement(IntSequence& v, int nv);
+	static void increment(IntSequence& v, const IntSequence& nvmx);
+	static void decrement(IntSequence& v, const IntSequence& nvmx);
+	static int getOffset(const IntSequence& v, int nv);
+	static int getOffset(const IntSequence& v, const IntSequence& nvmx);
+};
+
+@ This is an abstraction for folded tensor. It only provides a method
+|unfold|, which returns the unfolded version of the same symmetry, and
+static methods for decrementing indices.
+
+We also provide static methods for decrementing the |IntSequence| in
+folded fashion and also calculating an offset for a given
+|IntSequence|. However, this is relatively complex calculation, so
+this should be avoided if possible.
+
+@<|FTensor| class declaration@>=
+class FTensor : public Tensor {
+public:@;
+	FTensor(indor io, const IntSequence& last, int r, int c, int d)
+		: Tensor(io, last, r, c, d)@+ {}
+	FTensor(const FTensor& ft)
+		: Tensor(ft)@+ {}
+	FTensor(int first_row, int num, FTensor& t)
+		: Tensor(first_row, num, t)@+ {}
+	virtual ~FTensor()@+ {}
+	virtual UTensor& unfold() const =0;
+
+	static void decrement(IntSequence& v, int nv);
+	static int getOffset(const IntSequence& v, int nv)
+		{@+IntSequence vtmp(v);@+ return getOffsetRecurse(vtmp, nv);@+}
+private:@;
+	static int getOffsetRecurse(IntSequence& v, int nv);
+};
+
+@ End of {\tt tensor.h} file.
diff --git a/dynare++/tl/cc/tl_exception.hweb b/dynare++/tl/cc/tl_exception.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..731b10b7addd29a66c3ea8a1c408b2ab08f3e1a5
--- /dev/null
+++ b/dynare++/tl/cc/tl_exception.hweb
@@ -0,0 +1,79 @@
+@q $Id: tl_exception.hweb 332 2005-07-15 13:41:48Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Exception. Start of {\tt tl\_exception.h} file.
+
+Within the code we often check some state of variables, typically
+preconditions or postconditions. If the state is not as required, it
+is worthless to continue, since this means some fatal error in
+algorithms. In this case we raise an exception which can be caught at
+some higher level. This header file defines a simple infrastructure
+for this.
+
+@s TLException int
+@c
+#ifndef TL_EXCEPTION_H
+#define TL_EXCEPTION_H
+
+#include <string.h>
+#include <stdio.h>
+
+@<body of tl\_exception header@>;
+
+#endif
+
+@ The basic idea of raising an exception if some condition fails is
+that the conditions is checked only if required. We define global
+|TL_DEBUG| macro which is integer and says, how many debug messages
+the programm has to emit. We also define |TL_DEBUG_EXCEPTION| which
+says, for what values of |TL_DEBUG| we will check for conditions of
+the exceptions. If the |TL_DEBUG| is equal or higher than
+|TL_DEBUG_EXCEPTION|, the exception conditions are checked.
+
+We define |TL_RAISE|, and |TL_RAISE_IF| macros which throw an instance
+of |TLException| if |TL_DEBUG >= TL_DEBUG_EXCEPTION|. The first is
+unconditional throw, the second is conditioned by a given
+expression. Note that if |TL_DEBUG < TL_DEBUG_EXCEPTION| then the code
+is compiled but evaluation of the condition is passed. If code is
+optimized, the optimizer also passes evaluation of |TL_DEBUG| and
+|TL_DEBUG_EXCEPTION| comparison (I hope).
+
+We provide default values for |TL_DEBUG| and |TL_DEBUG_EXCEPTION|.
+
+@<body of tl\_exception header@>=
+#ifndef TL_DEBUG_EXCEPTION
+#define TL_DEBUG_EXCEPTION 1
+#endif
+
+#ifndef TL_DEBUG
+#define TL_DEBUG 0
+#endif
+
+#define TL_RAISE(mes) \
+if (TL_DEBUG >= TL_DEBUG_EXCEPTION) throw TLException(__FILE__, __LINE__, mes);
+
+#define TL_RAISE_IF(expr, mes) \
+if (TL_DEBUG >= TL_DEBUG_EXCEPTION && (expr)) throw TLException(__FILE__, __LINE__, mes);
+
+@<|TLException| class definition@>;
+
+@ Primitive exception class containing file name, line number and message.
+@<|TLException| class definition@>=
+class TLException {
+	char fname[50];
+	int lnum;
+	char message[500];
+public:@;
+	TLException(const char* f, int l, const char* mes)
+		{
+			strncpy(fname, f, 50);@+ fname[49] = '\0';
+			strncpy(message, mes, 500);@+ message[499] = '\0';
+			lnum = l;
+		}
+	virtual ~TLException()@+ {}
+	virtual void print() const
+		{@+ printf("At %s:%d:%s\n", fname, lnum, message);@+}
+};
+
+
+@ End of {\tt tl\_exception.h} file.
diff --git a/dynare++/tl/cc/tl_static.cweb b/dynare++/tl/cc/tl_static.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..7787154657845995faf6592317009fc5fa28ecc8
--- /dev/null
+++ b/dynare++/tl/cc/tl_static.cweb
@@ -0,0 +1,89 @@
+@q $Id: tl_static.cweb 200 2005-05-12 12:28:19Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt tl\_static.cpp} file.
+@c
+#include "tl_static.h"
+#include "tl_exception.h"
+
+TLStatic tls;
+@<|TLStatic| methods@>;
+@<|PascalTriangle| constructor code@>;
+@<|PascalTriangle::noverk| code@>;
+
+@ Note that we allow for repeated calls of |init|. This is not normal
+and the only purpose of allowing this is the test suite.
+
+@<|TLStatic| methods@>=
+TLStatic::TLStatic()
+{
+	ebundle = NULL;
+	pbundle = NULL;
+	ptriang = NULL;
+}
+
+TLStatic::~TLStatic()
+{
+	if (ebundle)
+		delete ebundle;
+	if (pbundle)
+		delete pbundle;
+	if (ptriang)
+		delete ptriang;
+}
+
+void TLStatic::init(int dim, int nvar)
+{
+	if (ebundle)
+		ebundle->generateUpTo(dim);
+	else
+		ebundle = new EquivalenceBundle(dim);
+
+	if (pbundle)
+		pbundle->generateUpTo(dim);
+	else
+		pbundle = new PermutationBundle(dim);
+
+	if (ptriang)
+		delete ptriang;
+	ptriang = new PascalTriangle(nvar, dim);
+}
+
+@ The coefficients are stored in |data| row by row where a row are
+coeffs with the same $k$.
+
+We first initialize the first row with ones. Then for each other row
+we initialize the first item to one, and other items are a sum of
+coefficients of $n-1$ which is in code |i+j-1|.
+
+@<|PascalTriangle| constructor code@>=
+PascalTriangle::PascalTriangle(int n, int k)
+	: data(new int[(n+1)*(k+1)]), kmax(k), nmax(n)
+{
+	for (int i = 0; i <= n; i++)
+		data[i] = 1;
+	for (int j = 1; j <= k; j++) {
+		data[j*(nmax+1)] = 1;
+		for (int i = 1; i <= n; i++)
+			data[j*(nmax+1)+i] = noverk(i+j-1,j) + noverk(i+j-1,j-1);
+	}
+}
+
+@ Clear. Recall, that there are |nmax+1| items in a row.
+@<|PascalTriangle::noverk| code@>=
+int PascalTriangle::noverk(int n, int k) const
+{
+	TL_RAISE_IF(k > n || n < 0,
+				"Wrong arguments for PascalTriangle::noverk");
+
+	if (k <= kmax && n-k <= nmax)
+		return data[k*(nmax+1)+n-k];
+
+	if (n-k <= kmax && k <= nmax)
+		return data[(n-k)*(nmax+1)+k];
+
+	TL_RAISE("n or k out of range in PascalTriangle::noverk");
+	return 0;
+}
+
+@ End of {\tt tl\_static.cpp} file.
diff --git a/dynare++/tl/cc/tl_static.hweb b/dynare++/tl/cc/tl_static.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..83d37004d7212f8490ecd5ced0fc13c68280fe4a
--- /dev/null
+++ b/dynare++/tl/cc/tl_static.hweb
@@ -0,0 +1,62 @@
+@q $Id: tl_static.hweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Tensor library static data. Start of {\tt tl\_static.h} file.
+
+The purpose of this file is to make a unique static variable which
+would contain all other static variables and be responsible for their
+correct initialization and destruction. The variables include an
+equivalence bundle and a Pascal triangle for binomial
+coefficients. Both depend on dimension of the problem, and maximum
+number of variables.
+
+So we declare static |tls| variable of type |TLStatic| encapsulating
+the variables. The |tls| must be initialized at the beginning of
+the program, as dimension and number of variables is known. 
+
+Also we define a class for Pascal triangle.
+
+@c
+#ifndef TL_STATIC_H
+#define TL_STATIC_H
+
+#include "equivalence.h"
+#include "permutation.h"
+
+@<|PascalTriangle| class declaration@>;
+@<|TLStatic| class declaration@>;
+extern TLStatic tls;
+
+#endif
+
+@ Pascal triangle is a storage for binomial coefficients. We store in
+|data| array the coefficients of rectangle starting at $\pmatrix{0\cr
+0}$, and ending $\pmatrix{nmax+kmax\cr kmax}$.
+
+@<|PascalTriangle| class declaration@>=
+class PascalTriangle {
+	int* data;
+	int kmax;
+	int nmax;
+public:@;
+	PascalTriangle(int n, int k);
+	~PascalTriangle()
+		{@+ delete [] data;@+}
+	int noverk(int n, int k) const;
+};
+
+
+@  
+@<|TLStatic| class declaration@>=
+struct TLStatic {
+	EquivalenceBundle* ebundle;
+	PermutationBundle* pbundle;
+	PascalTriangle* ptriang;
+
+	TLStatic();
+	~TLStatic();
+	void init(int dim, int nvar);
+};
+
+
+@ End of {\tt tl\_static.h} file.
diff --git a/dynare++/tl/cc/twod_matrix.cweb b/dynare++/tl/cc/twod_matrix.cweb
new file mode 100644
index 0000000000000000000000000000000000000000..f7e1eb3c8a3d1981614e8de891869da2e422dc42
--- /dev/null
+++ b/dynare++/tl/cc/twod_matrix.cweb
@@ -0,0 +1,137 @@
+@q $Id: twod_matrix.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@ Start of {\tt twod\_matrix.cpp} file.
+@c
+#include "twod_matrix.h"
+#include "tl_exception.h"
+
+
+@<|ConstTwoDMatrix| constructors@>;
+@<|ConstTwoDMatrix::writeMat4| code@>;
+@<|TwoDMatrix| row methods code@>;
+@<|TwoDMatrix| column methods code@>;
+@<|TwoDMatrix::save| code@>;
+@<|Mat4Header| constructor 1 code@>;
+@<|Mat4Header| constructor 2 code@>;
+@<|Mat4Header::write| code@>;
+
+@ 
+@<|ConstTwoDMatrix| constructors@>=
+ConstTwoDMatrix::ConstTwoDMatrix(const TwoDMatrix& m)
+	: ConstGeneralMatrix(m)@+ {}
+
+ConstTwoDMatrix::ConstTwoDMatrix(const TwoDMatrix& m, int first_col, int num)
+	: ConstGeneralMatrix(m, 0, first_col, m.nrows(), num)@+ {}
+
+ConstTwoDMatrix::ConstTwoDMatrix(const ConstTwoDMatrix& m, int first_col, int num)
+	: ConstGeneralMatrix(m, 0, first_col, m.nrows(), num)@+ {}
+
+ConstTwoDMatrix::ConstTwoDMatrix(int first_row, int num, const TwoDMatrix& m)
+	: ConstGeneralMatrix(m, first_row, 0, num, m.ncols())@+ {}
+
+ConstTwoDMatrix::ConstTwoDMatrix(int first_row, int num, const ConstTwoDMatrix& m)
+	: ConstGeneralMatrix(m, first_row, 0, num, m.ncols())@+ {}
+
+@ 
+@<|ConstTwoDMatrix::writeMat4| code@>=
+void ConstTwoDMatrix::writeMat4(FILE* fd, const char* vname) const
+{
+	Mat4Header header(*this, vname);
+	header.write(fd);
+	for (int j = 0; j < ncols(); j++)
+		for (int i = 0; i < nrows(); i++)
+			fwrite(&(get(i,j)), sizeof(double), 1, fd);
+}
+
+@ 
+@<|TwoDMatrix| row methods code@>=
+void TwoDMatrix::copyRow(int from, int to)
+{
+	if (from != to)
+		copyRow(ConstTwoDMatrix(*this), from, to);
+}
+
+void TwoDMatrix::copyRow(const ConstTwoDMatrix& m, int from, int to)
+{
+	ConstVector fr_row(from, m);
+	Vector to_row(to, *this);
+	to_row = fr_row;
+}
+
+void TwoDMatrix::addRow(double d, const ConstTwoDMatrix& m, int from, int to)
+{
+	ConstVector fr_row(from, m);
+	Vector to_row(to, *this);
+	to_row.add(d, fr_row);
+}
+
+
+@ 
+@<|TwoDMatrix| column methods code@>=
+void TwoDMatrix::copyColumn(int from, int to)
+{
+	if (from != to)
+		copyColumn(ConstTwoDMatrix(*this), from, to);
+}
+
+void TwoDMatrix::copyColumn(const ConstTwoDMatrix& m, int from, int to)
+{
+	ConstVector fr_col(m, from);
+	Vector to_col(*this, to);
+	to_col = fr_col;
+}
+
+void TwoDMatrix::addColumn(double d, const ConstTwoDMatrix& m, int from, int to)
+{
+	ConstVector fr_col(m, from);
+	Vector to_col(*this, to);
+	to_col.add(d, fr_col);
+}
+
+@ 
+@<|TwoDMatrix::save| code@>=
+void TwoDMatrix::save(const char* fname) const
+{
+	FILE* fd;
+	if (NULL==(fd = fopen(fname,"w"))) {
+		TL_RAISE("Cannot open file for writing in TwoDMatrix::save");
+	}
+	for (int row = 0; row < nrows(); row++) {
+		for (int col = 0; col < ncols(); col++)
+			fprintf(fd, " %20.10g", get(row, col));
+		fprintf(fd, "\n");
+	}
+	fclose(fd);	
+}
+
+@ This constructs a MAT-4 header for Little Endian dense real double matrix.
+@<|Mat4Header| constructor 1 code@>=
+Mat4Header::Mat4Header(const ConstTwoDMatrix& m, const char* vn)
+	: type(0), rows(m.nrows()), cols(m.ncols()), imagf(0), namelen(strlen(vn)+1),
+	  vname(vn)
+{}
+
+
+@ This constructs a MAT-4 header for text matrix.
+@<|Mat4Header| constructor 2 code@>=
+Mat4Header::Mat4Header(const ConstTwoDMatrix& m, const char* vn, const char* dummy)
+	: type(1), rows(m.nrows()), cols(m.ncols()), imagf(0), namelen(strlen(vn)+1),
+	  vname(vn)
+{}
+
+
+@ 
+@<|Mat4Header::write| code@>=
+void Mat4Header::write(FILE* fd) const
+{
+	fwrite(&type, sizeof(int), 1, fd);
+	fwrite(&rows, sizeof(int), 1, fd);
+	fwrite(&cols, sizeof(int), 1, fd);
+	fwrite(&imagf, sizeof(int), 1, fd);
+	fwrite(&namelen, sizeof(int), 1, fd);
+	fwrite(vname, 1, namelen, fd);
+}
+
+
+@ End of {\tt twod\_matrix.cpp} file.
\ No newline at end of file
diff --git a/dynare++/tl/cc/twod_matrix.hweb b/dynare++/tl/cc/twod_matrix.hweb
new file mode 100644
index 0000000000000000000000000000000000000000..57e07cdeb9f6c3777475d26d9ff22c6967124e87
--- /dev/null
+++ b/dynare++/tl/cc/twod_matrix.hweb
@@ -0,0 +1,157 @@
+@q $Id: twod_matrix.hweb 376 2005-07-21 15:48:05Z kamenik $ @>
+@q Copyright 2004, Ondra Kamenik @>
+
+@*2 Matrix interface. Start of {\tt twod\_matrix.h} file.
+
+Here we make an interface to 2-dimensional matrix defined in the
+Sylvester module. That abstraction provides an interface to BLAS. The
+main purpose of this file is to only make its subclass in order to
+keep the tensor library and Sylvester module independent. So here is
+mainly renaming of methods.
+
+Similarly as in the Sylvester module we declare two classes
+|TwoDMatrix| and |ConstTwoDMatrix|. The only purpose of the latter is
+to allow submatrix construction from const reference arguments.
+
+@s GeneralMatrix int
+@s ConstGeneralMatrix int
+@s Vector int
+@s ConstVector int
+@s TwoDMatrix int
+@s ConstTwoDMatrix int
+
+@c
+#ifndef TWOD_MATRIX_H
+#define TWOD_MATRIX_H
+
+#include "GeneralMatrix.h"
+
+#include <stdio.h>
+
+class TwoDMatrix;
+@<|ConstTwoDMatrix| class declaration@>;
+@<|TwoDMatrix| class declaration@>;
+@<|Mat4Header| class declaration@>;
+
+#endif
+
+
+@ We make two obvious constructors, and then a constructor making
+submatrix of subsequent columns. We also rename
+|GeneralMatrix::numRows()| and |GeneralMatrix::numCols()|.
+
+@<|ConstTwoDMatrix| class declaration@>=
+class ConstTwoDMatrix : public ConstGeneralMatrix {
+public:@/
+	ConstTwoDMatrix(int m, int n, const double* d)
+		: ConstGeneralMatrix(d, m, n)@+ {}@;
+	ConstTwoDMatrix(const TwoDMatrix& m);
+	ConstTwoDMatrix(const TwoDMatrix& m, int first_col, int num);
+	ConstTwoDMatrix(const ConstTwoDMatrix& m, int first_col, int num);
+	ConstTwoDMatrix(int first_row, int num, const TwoDMatrix& m);
+	ConstTwoDMatrix(int first_row, int num, const ConstTwoDMatrix& m);
+	ConstTwoDMatrix(const ConstTwoDMatrix& m, int first_row, int first_col, int rows, int cols)
+		: ConstGeneralMatrix(m, first_row, first_col, rows, cols)@+ {}
+	virtual ~ConstTwoDMatrix()@+ {}
+@#
+	int nrows() const
+		{@+ return numRows();@+}
+	int ncols() const
+		{@+ return numCols();@+}
+	void writeMat4(FILE* fd, const char* vname) const;
+};
+
+@ Here we do the same as for |ConstTwoDMatrix| plus define
+methods for copying and adding rows and columns.
+
+Also we have |save| method which dumps the matrix to a file with a
+given name. The file can be read by Scilab {\tt fscanfMat} function.
+
+@<|TwoDMatrix| class declaration@>=
+class TwoDMatrix : public GeneralMatrix {
+public:@/
+	TwoDMatrix(int r, int c)
+		: GeneralMatrix(r, c)@+ {}@;
+	TwoDMatrix(int r, int c, double* d)
+		: GeneralMatrix(d, r, c)@+ {}@;
+	TwoDMatrix(int r, int c, const double* d)
+		: GeneralMatrix(d, r, c)@+ {}@;
+	TwoDMatrix(const GeneralMatrix& m)
+		: GeneralMatrix(m)@+ {}@;
+	TwoDMatrix(const GeneralMatrix& m, char* dummy)
+		: GeneralMatrix(m, dummy)@+ {}@;
+	TwoDMatrix(const TwoDMatrix& m, int first_col, int num)
+		: GeneralMatrix(m, 0, first_col, m.numRows(), num)@+ {}@;
+	TwoDMatrix(TwoDMatrix& m, int first_col, int num)
+		: GeneralMatrix(m, 0, first_col, m.numRows(), num)@+ {}@;
+	TwoDMatrix(int first_row, int num, const TwoDMatrix& m)
+		: GeneralMatrix(m, first_row, 0, num, m.ncols())@+ {}
+	TwoDMatrix(int first_row, int num, TwoDMatrix& m)
+		: GeneralMatrix(m, first_row, 0, num, m.ncols())@+ {} 
+	TwoDMatrix(TwoDMatrix& m, int first_row, int first_col, int rows, int cols)
+		: GeneralMatrix(m, first_row, first_col, rows, cols)@+ {}
+	TwoDMatrix(const TwoDMatrix& m, int first_row, int first_col, int rows, int cols)
+		: GeneralMatrix(m, first_row, first_col, rows, cols)@+ {}
+	TwoDMatrix(const ConstTwoDMatrix& a, const ConstTwoDMatrix& b)
+		: GeneralMatrix(a, b)@+ {}
+	virtual ~TwoDMatrix()@+ {}
+@#
+	int nrows() const
+		{@+ return numRows();@+}
+	int ncols() const
+		{@+ return numCols();@+}
+@#
+	@<|TwoDMatrix| row methods declarations@>;
+	@<|TwoDMatrix| column methods declarations@>;
+	void save(const char* fname) const;
+	void writeMat4(FILE* fd, const char* vname) const
+		{@+ ConstTwoDMatrix(*this).writeMat4(fd, vname);@+}
+};
+
+@ 
+@<|TwoDMatrix| row methods declarations@>=
+	void copyRow(int from, int to);
+	void copyRow(const ConstTwoDMatrix& m, int from, int to);
+	void copyRow(const TwoDMatrix& m, int from, int to)
+		{@+ copyRow(ConstTwoDMatrix(m), from, to);@+}
+	void addRow(const ConstTwoDMatrix& m, int from, int to)
+		{@+ addRow(1.0, m, from, to);@+}
+	void addRow(const TwoDMatrix& m, int from, int to)
+		{@+ addRow(1.0, ConstTwoDMatrix(m), from, to);@+}
+	void addRow(double d, const ConstTwoDMatrix& m, int from, int to);
+	void addRow(double d, const TwoDMatrix& m, int from, int to)
+		{@+ addRow(d, ConstTwoDMatrix(m), from, to);@+}
+
+
+@ 
+@<|TwoDMatrix| column methods declarations@>=
+	void copyColumn(int from, int to);
+	void copyColumn(const ConstTwoDMatrix& m, int from, int to);
+	void copyColumn(const TwoDMatrix& m, int from, int to)
+		{@+ copyColumn(ConstTwoDMatrix(m), from, to);@+}
+	void addColumn(const ConstTwoDMatrix& m, int from, int to)
+		{@+ addColumn(1.0, ConstTwoDMatrix(m), from, to);@+}
+	void addColumn(const TwoDMatrix& m, int from, int to)
+		{@+ addColumn(1.0, ConstTwoDMatrix(m), from, to);@+}
+	void addColumn(double d, const ConstTwoDMatrix& m, int from, int to);
+	void addColumn(double d, const TwoDMatrix& m, int from, int to)
+		{@+ addColumn(d, ConstTwoDMatrix(m), from, to);@+}
+
+@ 
+@<|Mat4Header| class declaration@>=
+class Mat4Header {
+	int type;
+	int rows;
+	int cols;
+	int imagf;
+	int namelen;
+	const char* vname;
+public:@;
+	Mat4Header(const ConstTwoDMatrix& m, const char* vname);
+	Mat4Header(const ConstTwoDMatrix& m, const char* vname, const char* dummy);
+	void write(FILE* fd) const;
+};
+
+
+
+@ End of {\tt twod\_matrix.h} file.
\ No newline at end of file
diff --git a/dynare++/tl/testing/Makefile b/dynare++/tl/testing/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..40ce96a242d11a58ec0b6e53a6ce071babcc67bd
--- /dev/null
+++ b/dynare++/tl/testing/Makefile
@@ -0,0 +1,43 @@
+# $Id: Makefile 843 2006-07-28 08:54:19Z tamas $
+# Copyright 2004, Ondra Kamenik
+
+
+LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c -lpthread
+CC_FLAGS := -Wall -I../cc -I../../sylv/cc
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
+else
+	CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
+endif
+
+matrix_interface := GeneralMatrix Vector SylvException 
+matobjs := $(patsubst %, ../../sylv/cc/%.o, $(matrix_interface))
+cwebsource := $(wildcard ../cc/*.cweb)
+cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource)) 
+objects := $(patsubst %.cweb,%.o,$(cwebsource))
+hwebsource := $(wildcard ../cc/*.hweb)
+hsource := $(patsubst %.hweb,%.h,$(hwebsource))
+
+../cc/dummy.ch:
+	make -C ../cc dummy.ch
+
+../cc/%.cpp: ../cc/%.cweb ../cc/dummy.ch
+	make -C ../cc $*.cpp
+
+../cc/%.h: ../cc/%.hweb ../cc/dummy.ch
+	make -C ../cc $*.h
+
+../cc/%.o: ../cc/%.cpp $(hsource)
+	make -C ../cc $*.o
+
+%.o: %.cpp factory.h monoms.h $(hwebsource) $(hsource)
+	$(CC) $(CC_FLAGS) -c $*.cpp
+
+tests: $(hwebsource) $(cwebsoure) $(hsource) $(cppsource) \
+       tests.o factory.o monoms.o $(objects) 
+	$(CC) $(CC_FLAGS) $(objects) $(matobjs) tests.o factory.o monoms.o -o tests $(LD_LIBS) 
+
+clear:
+	rm -f *.o
+	rm -f tests
+	make -C ../cc clear
diff --git a/dynare++/tl/testing/factory.cpp b/dynare++/tl/testing/factory.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3eca85bf6d0b44c8fd055e4770530a28dec31581
--- /dev/null
+++ b/dynare++/tl/testing/factory.cpp
@@ -0,0 +1,48 @@
+/* $Id: factory.cpp 148 2005-04-19 15:12:26Z kamenik $ */
+/* Copyright 2004, Ondra Kamenik */
+
+#include "factory.h"
+
+#include <math.h>
+
+void Factory::init(const Symmetry& s, const IntSequence& nvs)
+{
+	IntSequence sym(s);
+	long int seed = sym[0];
+	seed = 256*seed + nvs[0];
+	if (sym.size() > 1)
+		seed = 256*seed + sym[1];
+	if (nvs.size() > 1)
+		seed = 256*seed + nvs[0];
+	srand48(seed);
+}
+
+void Factory::init(int dim, int nv)
+{
+	long int seed = dim;
+	seed = 256*seed + nv;
+	srand48(seed);
+}
+
+double Factory::get() const
+{
+	return 1.0*(drand48()-0.5);
+}
+
+void Factory::fillMatrix(TwoDMatrix& m) const
+{
+	Vector& d = m.getData();
+	for (int i = 0; i < d.length(); i++)
+		d[i] = get();
+}
+
+Vector* Factory::makeVector(int n)
+{
+	init(n, n*n);
+
+	Vector* v = new Vector(n);
+	for (int i = 0; i < n; i++)
+		(*v)[i] = get();
+
+	return v;
+}
diff --git a/dynare++/tl/testing/factory.h b/dynare++/tl/testing/factory.h
new file mode 100644
index 0000000000000000000000000000000000000000..fea2230110c80709aecdcd01d7484b410a6f7bee
--- /dev/null
+++ b/dynare++/tl/testing/factory.h
@@ -0,0 +1,81 @@
+/* $Id: factory.h 148 2005-04-19 15:12:26Z kamenik $ */
+/* Copyright 2004, Ondra Kamenik */
+
+#ifndef FACTORY_H
+#define FACTORY_H 
+
+#include "symmetry.h"
+#include "int_sequence.h"
+#include "twod_matrix.h"
+#include "equivalence.h"
+#include "rfs_tensor.h"
+#include "t_container.h"
+
+class Factory {
+	void init(const Symmetry& s, const IntSequence& nvs);
+	void init(int dim, int nv);
+	void fillMatrix(TwoDMatrix& m) const;
+public:
+	double get() const;
+	// this can be used with UGSTensor, FGSTensor
+	template <class _Ttype>
+	_Ttype* make(int r, const Symmetry& s, const IntSequence& nvs)
+		{
+			_Ttype* res = new _Ttype(r, TensorDimens(s, nvs));
+			init(s, nvs);
+			fillMatrix(*res);
+			return res;
+		}
+
+	// this can be used with FFSTensor, UFSTensor, FRTensor, URTensor
+	template <class _Ttype>
+	_Ttype* make(int r, int nv, int dim)
+		{
+			_Ttype* res = new _Ttype(r, nv, dim);
+			init(dim, nv);
+			fillMatrix(*res);
+			return res;
+		}
+
+	template <class _Ttype, class _Ctype>
+	_Ctype* makeCont(int r, const IntSequence& nvs, int maxdim)
+		{
+			int symnum = nvs.size();
+			_Ctype* res = new _Ctype(symnum);
+			for (int dim = 1; dim <= maxdim; dim++) {
+				if (symnum == 1) {
+					// full symmetry
+					Symmetry sym(dim);
+					_Ttype* t = make<_Ttype>(r, sym, nvs);
+					res->insert(t);
+				} else {
+					// general symmetry
+					for (int i = 0; i <= dim; i++) {
+						Symmetry sym(i, dim-i);
+						_Ttype* t = make<_Ttype>(r, sym, nvs);
+						res->insert(t);
+					}
+				}
+			}
+			return res;
+		}
+
+	template <class _Ttype, class _Ptype>
+	_Ptype* makePoly(int r, int nv, int maxdim)
+		{
+			_Ptype* p = new _Ptype(r, nv);
+			for (int d = 1; d <= maxdim; d++) {
+				_Ttype* t = make<_Ttype>(r, nv, d);
+				p->insert(t);
+			}
+			return p;
+		}
+
+	Vector* makeVector(int n);
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/tl/testing/monoms.cpp b/dynare++/tl/testing/monoms.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b0b0961567ad8412f46b976ee0a720e6ceaf39d3
--- /dev/null
+++ b/dynare++/tl/testing/monoms.cpp
@@ -0,0 +1,490 @@
+/* $Id: monoms.cpp 148 2005-04-19 15:12:26Z kamenik $ */
+/* Copyright 2004, Ondra Kamenik */
+
+#include "monoms.h"
+#include "tl_exception.h"
+#include "fs_tensor.h"
+
+#include <math.h>
+#include <stdio.h>
+
+IntGenerator intgen;
+
+void IntGenerator::init(int nf, int ny, int nv, int nw, int nu,
+						int mx, double prob)
+{
+	maxim = mx;
+	probab = prob;
+	long int seed = nf;
+	seed = 256*seed + ny;
+	seed = 256*seed + nv;
+	seed = 256*seed + nw;
+	seed = 256*seed + nu;
+	srand48(seed);
+}
+
+int IntGenerator::get() const
+{
+	double d = drand48();
+	int num_inter = (int)( ((double)2*maxim)/(1.0-probab));
+	int num_zero_inter = num_inter - 2*maxim;
+	if (d < ((double)num_zero_inter)/num_inter)
+		return 0;
+	return (int)(d*num_inter)-num_zero_inter-maxim;
+}
+
+Monom::Monom(int len)
+	: IntSequence(len)
+{
+	for (int i = 0; i < len; i++)
+		operator[](i) = intgen.get();
+}
+
+Monom::Monom(int len, int item)
+	: IntSequence(len, item)
+{
+}
+
+double Monom::deriv(const IntSequence& vars) const
+{
+	double res = 1.0;
+	int first_same_i = 0;
+	for (int i = 0; i < vars.size(); i++) {
+		TL_RAISE_IF(vars[i] < 0 || vars[i] >= size(),
+					"Wrong variable index in Monom::deriv");
+		if (vars[i] != vars[first_same_i])
+			first_same_i = i;
+		int mult = operator[](vars[i]) - (i-first_same_i);
+		if (mult == 0)
+			return 0;
+		res *= mult;
+	}
+	return res;
+}
+
+void Monom::multiplyWith(int ex, const Monom& m)
+{
+	TL_RAISE_IF(size() != m.size(),
+				"Wrong sizes of monoms in Monom::multiplyWith");
+	if (ex == 0)
+		return;
+	for (int i = 0; i < size(); i++)
+		operator[](i) += m[i]*ex;
+}
+
+void Monom::print() const
+{
+	printf("[");
+	for (int i = 0; i < size(); i++)
+		printf("%3d", operator[](i));
+	printf("]");
+}
+
+Monom1Vector::Monom1Vector(int nxx, int l)
+	: nx(nxx), len(l), x(new (Monom*)[len])
+{
+	for (int i = 0; i < len; i++) {
+		x[i] = new Monom(nx);
+	}
+}
+
+Monom1Vector::~Monom1Vector()
+{
+	for (int i = 0; i < len; i++) {
+		delete x[i];
+	}
+	delete [] x;
+}
+
+void Monom1Vector::deriv(const IntSequence& c, Vector& out) const
+{
+	TL_RAISE_IF(out.length() != len,
+				"Wrong length of output vector in Monom1Vector::deriv");
+
+	for (int i = 0; i < len; i++) {
+		out[i] = x[i]->deriv(c);
+	}
+}
+
+
+FGSTensor* Monom1Vector::deriv(int dim) const
+{
+	FGSTensor* res =
+		new FGSTensor(len, TensorDimens(Symmetry(dim), IntSequence(1, nx)));
+	for (Tensor::index it = res->begin(); it != res->end(); ++it) {
+		Vector outcol(*res, *it);
+		deriv(it.getCoor(), outcol);
+	}
+	return res;
+}
+
+void Monom1Vector::print() const
+{
+	printf("Variables: x(%d)\n", nx);
+	printf("Rows: %d\n", len);
+	for (int i = 0; i < len; i++) {
+		printf("%2d: ", i);
+		x[i]->print();
+		printf("\n");
+	}
+}
+
+Monom2Vector::Monom2Vector(int nyy, int nuu, int l)
+	: ny(nyy), nu(nuu), len(l), y(new (Monom*)[len]), u(new (Monom*)[len])
+{
+	for (int i = 0; i < len; i++) {
+		y[i] = new Monom(ny);
+		u[i] = new Monom(nu);
+	}
+}
+
+Monom2Vector::Monom2Vector(const Monom1Vector& g, const Monom2Vector& xmon)
+	: ny(xmon.ny), nu(xmon.nu), len(g.len),
+	  y(new (Monom*)[len]), u(new (Monom*)[len])	
+{
+	TL_RAISE_IF(xmon.len != g.nx,
+				"Wrong number of x's in Monom2Vector constructor");
+
+	for (int i = 0; i < len; i++) {
+		y[i] = new Monom(ny, 0);
+		u[i] = new Monom(nu, 0);
+	}
+
+	for (int i = 0; i < len; i++) {
+		// multiply from xmon
+		for (int j = 0; j < g.nx; j++) {
+			int ex = g.x[i]->operator[](j);
+			y[i]->multiplyWith(ex, *(xmon.y[j]));
+			u[i]->multiplyWith(ex, *(xmon.u[j]));
+		}
+	}
+}
+
+Monom2Vector::~Monom2Vector()
+{
+	for (int i = 0; i < len; i++) {
+		delete y[i];
+		delete u[i];
+	}
+	delete [] y;
+	delete [] u;
+}
+
+void Monom2Vector::deriv(const Symmetry& s, const IntSequence& c,
+						 Vector& out) const
+{
+	TL_RAISE_IF(out.length() != len,
+				"Wrong length of output vector in Monom2Vector::deriv");
+	TL_RAISE_IF(s.num() != 2,
+				"Wrong symmetry for Monom2Vector::deriv");
+	TL_RAISE_IF(s.dimen() != c.size(),
+				"Incompatible symmetry and coordinates in Monom2Vector::deriv");
+	IntSequence cy(c, 0, s[0]);
+	IntSequence cu(c, s[0], s.dimen());
+	for (int i = 0; i < len; i++) {
+		out[i] = y[i]->deriv(cy) * u[i]->deriv(cu);
+	}
+}
+
+FGSTensor* Monom2Vector::deriv(const Symmetry& s) const
+{
+	IntSequence nvs(2); nvs[0] = ny; nvs[1] = nu;
+	FGSTensor* t = new FGSTensor(len, TensorDimens(s, nvs));
+	for (Tensor::index it = t->begin(); it != t->end(); ++it) {
+		Vector col(*t, *it);
+		deriv(s, it.getCoor(), col);
+	}
+	return t;
+}
+
+FGSContainer* Monom2Vector::deriv(int maxdim) const
+{
+	FGSContainer* res = new FGSContainer(2);
+	for (int dim = 1; dim <= maxdim; dim++) {
+		for (int ydim = 0; ydim <= dim; ydim++) {
+			int udim = dim - ydim;
+			Symmetry s(ydim, udim);
+			res->insert(deriv(s));
+		}
+	}
+	return res;
+}
+
+void Monom2Vector::print() const
+{
+	printf("Variables: y(%d) u(%d)\n", ny, nu);
+	printf("Rows: %d\n", len);
+	for (int i = 0; i < len; i++) {
+		printf("%2d: ", i);
+		y[i]->print();
+		printf("    ");
+		u[i]->print();
+		printf("\n");
+	}
+}
+
+Monom4Vector::~Monom4Vector()
+{
+	for (int i = 0; i < len; i++) {
+		delete x1[i];
+		delete x2[i];
+		delete x3[i];
+		delete x4[i];
+	}
+	delete [] x1;
+	delete [] x2;
+	delete [] x3;
+	delete [] x4;
+}
+
+void Monom4Vector::init_random()
+{
+	for (int i = 0; i < len; i++) {
+		x1[i] = new Monom(nx1);
+		x2[i] = new Monom(nx2);
+		x3[i] = new Monom(nx3);
+		x4[i] = new Monom(nx4);
+	}
+}
+
+Monom4Vector::Monom4Vector(int l, int ny, int nu)
+	: len(l), nx1(ny), nx2(nu), nx3(0), nx4(1),
+	  x1(new (Monom*)[len]),
+	  x2(new (Monom*)[len]),
+	  x3(new (Monom*)[len]),
+	  x4(new (Monom*)[len])	  
+{
+	init_random();
+}
+
+Monom4Vector::Monom4Vector(int l, int ny, int nu, int nup)
+	: len(l), nx1(ny), nx2(nu), nx3(nup), nx4(1),
+	  x1(new (Monom*)[len]),
+	  x2(new (Monom*)[len]),
+	  x3(new (Monom*)[len]),
+	  x4(new (Monom*)[len])
+{
+	init_random();
+}
+
+Monom4Vector::Monom4Vector(int l, int nbigg, int ng, int ny, int nu)
+	: len(l), nx1(nbigg), nx2(ng), nx3(ny), nx4(nu),
+	  x1(new (Monom*)[len]),
+	  x2(new (Monom*)[len]),
+	  x3(new (Monom*)[len]),
+	  x4(new (Monom*)[len])
+{
+	init_random();
+}
+
+Monom4Vector::Monom4Vector(const Monom4Vector& f, const Monom4Vector& bigg,
+						   const Monom4Vector& g)
+	: len(f.len), nx1(bigg.nx1), nx2(bigg.nx2), nx3(bigg.nx3), nx4(1),
+	  x1(new (Monom*)[len]),
+	  x2(new (Monom*)[len]),
+	  x3(new (Monom*)[len]),
+	  x4(new (Monom*)[len])
+{
+	TL_RAISE_IF(!(bigg.nx1 == g.nx1 && bigg.nx2 == g.nx2 && g.nx3 == 0 &&
+				  bigg.nx4 == 1 && g.nx4 == 1),
+				"Incompatible g with G");
+	TL_RAISE_IF(!(bigg.len == f.nx1 && g.len == f.nx2 &&
+				  bigg.nx1 == f.nx3 && bigg.nx2 == f.nx4),
+				"Incompatible g or G with f");
+
+	for (int i = 0; i < len; i++) {
+		x1[i] = new Monom(nx1, 0);
+		x2[i] = new Monom(nx2, 0);
+		x3[i] = new Monom(nx3, 0);
+		x4[i] = new Monom(nx4, 0);
+	}
+
+	for (int i = 0; i < len; i++) {
+		// multiply from G (first argument)
+		for (int j = 0; j < f.nx1; j++) {
+			int ex = f.x1[i]->operator[](j);
+			x1[i]->multiplyWith(ex, *(bigg.x1[j]));
+			x2[i]->multiplyWith(ex, *(bigg.x2[j]));
+			x3[i]->multiplyWith(ex, *(bigg.x3[j]));
+			x4[i]->multiplyWith(ex, *(bigg.x4[j]));
+		}
+		// multiply from g (second argument)
+		for (int j = 0; j < f.nx2; j++) {
+			int ex = f.x2[i]->operator[](j);
+			x1[i]->multiplyWith(ex, *(g.x1[j]));
+			x2[i]->multiplyWith(ex, *(g.x2[j]));
+			x4[i]->multiplyWith(ex, *(g.x4[j]));
+		}
+		// add y as third argument of f
+		x1[i]->add(1, *(f.x3[i]));
+		// add u as fourth argument of f
+		x2[i]->add(1, *(f.x4[i]));
+	}
+}
+
+void Monom4Vector::deriv(const Symmetry& s, const IntSequence& coor,
+						 Vector& out) const
+{
+	TL_RAISE_IF(out.length() != len,
+				"Wrong length of output vector in Monom4Vector::deriv");
+	TL_RAISE_IF(s.num() != 4,
+				"Wrong symmetry for Monom4Vector::deriv");
+	TL_RAISE_IF(s.dimen() != coor.size(),
+				"Incompatible symmetry and coordinates in Monom4Vector::deriv");
+
+	for (int i = 0; i < len; i++) {
+		out[i] = 1;
+		int off = 0;
+		out[i] *= x1[i]->deriv(IntSequence(coor, off, off+s[0]));
+		off += s[0];
+		out[i] *= x2[i]->deriv(IntSequence(coor, off, off+s[1]));
+		off += s[1];
+		out[i] *= x3[i]->deriv(IntSequence(coor, off, off+s[2]));
+		off += s[2];
+		out[i] *= x4[i]->deriv(IntSequence(coor, off, off+s[3]));
+	}
+}
+
+FGSTensor* Monom4Vector::deriv(const Symmetry& s) const
+{
+	IntSequence nvs(4);
+	nvs[0] = nx1; nvs[1] = nx2; 
+	nvs[2] = nx3; nvs[3] = nx4;
+
+	FGSTensor* res = new FGSTensor(len, TensorDimens(s, nvs));
+	for (Tensor::index run = res->begin(); run != res->end(); ++run) {
+		Vector col(*res, *run);
+		deriv(s, run.getCoor(), col);
+	}
+	return res;
+}
+
+FSSparseTensor* Monom4Vector::deriv(int dim) const
+{
+	IntSequence cum(4);
+	cum[0] = 0; cum[1] = nx1; cum[2] = nx1+nx2; cum[3] = nx1+nx2+nx3;
+
+	FSSparseTensor* res = new FSSparseTensor(dim, nx1+nx2+nx3+nx4, len);
+
+	FFSTensor dummy(0, nx1+nx2+nx3+nx4, dim);
+	for (Tensor::index run = dummy.begin(); run != dummy.end(); ++run) {
+		Symmetry ind_sym(0,0,0,0);
+		IntSequence ind(run.getCoor());
+		for (int i = 0; i < ind.size(); i++) {
+			int j = 3;
+			while (j >= 0 && ind[i] < cum[j]) j--;
+			ind_sym[j]++;
+			ind[i] -= cum[j];
+		}
+
+		Vector col(len);
+		deriv(ind_sym, ind, col);
+		for (int i = 0; i < len; i++) {
+			if (col[i] != 0.0) {
+				res->insert(run.getCoor(), i, col[i]);
+			}
+		}
+	}
+
+	return res;
+}
+
+void Monom4Vector::print() const
+{
+	printf("Variables: x1(%d) x2(%d) x3(%d) x4(%d)\n",
+		   nx1, nx2, nx3, nx4);
+	printf("Rows: %d\n", len);
+	for (int i = 0; i < len; i++) {
+		printf("%2d: ", i);
+		x1[i]->print();
+		printf("    ");
+		x2[i]->print();
+		printf("    ");
+		x3[i]->print();
+		printf("    ");
+		x4[i]->print();
+		printf("\n");
+	}
+}
+
+SparseDerivGenerator::SparseDerivGenerator(
+	int nf, int ny, int nu, int nup, int nbigg, int ng,
+	int mx, double prob, int maxdim)
+	: maxdimen(maxdim), ts(new (FSSparseTensor*)[maxdimen])
+{
+	intgen.init(nf, ny, nu, nup, nbigg, mx, prob);
+
+	Monom4Vector bigg_m(nbigg, ny, nu, nup);
+	Monom4Vector g_m(ng, ny, nu);
+	Monom4Vector f(nf, nbigg, ng, ny, nu);
+	Monom4Vector r(f, bigg_m, g_m);
+	bigg = new FGSContainer(4);
+	g = new FGSContainer(4);
+	rcont = new FGSContainer(4);
+
+	for (int dim = 1; dim <= maxdimen; dim++) {
+		SymmetrySet ss(dim, 4);
+		for (symiterator si(ss); !si.isEnd(); ++si) {
+			bigg->insert(bigg_m.deriv(*si));
+			rcont->insert(r.deriv(*si));
+			if ((*si)[2] == 0)
+				g->insert(g_m.deriv(*si));
+		}
+
+		ts[dim-1] = f.deriv(dim);
+	}
+}
+
+SparseDerivGenerator::~SparseDerivGenerator()
+{
+	delete bigg;
+	delete g;
+	delete rcont;
+	for(int i = 0; i < maxdimen; i++)
+		delete ts[i];
+	delete [] ts;
+}
+
+
+DenseDerivGenerator::DenseDerivGenerator(int ng, int nx, int ny, int nu,
+										 int mx, double prob, int maxdim)
+	: maxdimen(maxdim), ts(new (FGSTensor*)[maxdimen]),
+	  uts(new (UGSTensor*)[maxdimen])
+{
+	intgen.init(ng, nx, ny, nu, nu, mx, prob);
+	Monom1Vector g(nx, ng);
+	Monom2Vector x(ny, nu, nx);
+	Monom2Vector r(g, x);
+	xcont = x.deriv(maxdimen);
+	rcont = r.deriv(maxdimen);
+	uxcont = NULL;
+	for (int d = 1; d <= maxdimen; d++) {
+		ts[d-1] = g.deriv(d);
+		uts[d-1] = NULL;
+	}
+}
+
+void DenseDerivGenerator::unfold()
+{
+	uxcont = new UGSContainer(*xcont);
+	for (int i = 0; i < maxdimen; i++) {
+		uts[i] = new UGSTensor(*(ts[i]));
+	}
+}
+
+DenseDerivGenerator::~DenseDerivGenerator()
+{
+	delete xcont;
+	delete rcont;
+	for (int i = 0; i < maxdimen; i++) {
+		delete ts[i];
+		if (uts[i])
+			delete uts[i];
+	}
+	delete [] ts;	
+	delete [] uts;	
+}
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/tl/testing/monoms.h b/dynare++/tl/testing/monoms.h
new file mode 100644
index 0000000000000000000000000000000000000000..94b34a3da002ae4ec786ecfa9e784bf5298989f8
--- /dev/null
+++ b/dynare++/tl/testing/monoms.h
@@ -0,0 +1,128 @@
+/* $Id: monoms.h 148 2005-04-19 15:12:26Z kamenik $ */
+/* Copyright 2004, Ondra Kamenik */
+
+#ifndef MONOMS_H
+#define MONOMS_H
+
+#include "int_sequence.h"
+#include "gs_tensor.h"
+#include "t_container.h"
+#include "sparse_tensor.h"
+#include "Vector.h"
+
+class IntGenerator {
+	int maxim;
+	double probab;
+public:
+	IntGenerator()
+		: maxim(5), probab(0.3) {}
+	void init(int nf, int ny, int nv, int nw, int nu, int mx, double prob);
+	int get() const;
+};
+
+extern IntGenerator intgen;
+
+
+class Monom : public IntSequence {
+public:
+	Monom(int len); // generate a random monom
+	Monom(int len, int item); // generate monom whose items are the given item
+	double deriv(const IntSequence& vars) const;
+	// this = this*m^ex (in monomial sense)
+	void multiplyWith(int ex, const Monom& m);
+	void print() const;
+};
+
+class Monom2Vector;
+class Monom1Vector {
+	friend class Monom2Vector;
+	int nx;
+	int len;
+	Monom** const x;
+public:
+	Monom1Vector(int nxx, int l);
+	~Monom1Vector();
+	void deriv(const IntSequence& c, Vector& out) const;
+	FGSTensor* deriv(int dim) const;
+	void print() const;
+};
+
+//class Monom3Vector;
+class Monom2Vector {
+	int ny;
+	int nu;
+	int len;
+	Monom** const y;
+	Monom** const u;
+public:
+	// generate random vector of monom two vector
+	Monom2Vector(int nyy, int nuu, int l);
+	// calculate g(x(y,u))
+	Monom2Vector(const Monom1Vector& g, const Monom2Vector& xmon);
+	~Monom2Vector();
+	void deriv(const Symmetry& s, const IntSequence& c, Vector& out) const;
+	FGSTensor* deriv(const Symmetry& s) const;
+	FGSContainer* deriv(int maxdim) const;
+	void print() const;
+};
+
+class Monom4Vector {
+	int len;
+	int nx1;
+	int nx2;
+	int nx3;
+	int nx4;
+	Monom** const x1;
+	Monom** const x2;
+	Monom** const x3;
+	Monom** const x4;
+public:
+    /* random for g(y,u,sigma) */
+	Monom4Vector(int l, int ny, int nu);
+	/* random for G(y,u,u',sigma) */
+	Monom4Vector(int l, int ny, int nu, int nup);
+	/* random for f(y+,y,y-,u) */
+	Monom4Vector(int l, int nbigg, int ng, int ny, int nu);
+	/* substitution f(G(y,u,u',sigma),g(y,u,sigma),y,u) */
+	Monom4Vector(const Monom4Vector& f, const Monom4Vector& bigg,
+				 const Monom4Vector& g);
+	~Monom4Vector();
+	FSSparseTensor* deriv(int dim) const;
+	FGSTensor* deriv(const Symmetry& s) const;
+	void deriv(const Symmetry& s, const IntSequence& coor, Vector& out) const;
+	void print() const;
+protected:
+	void init_random();
+};
+
+
+struct SparseDerivGenerator {
+	int maxdimen;
+	FGSContainer* bigg;
+	FGSContainer* g;
+	FGSContainer* rcont;
+	FSSparseTensor** const ts;
+	SparseDerivGenerator(int nf, int ny, int nu, int nup, int nbigg, int ng,
+						 int mx, double prob, int maxdim);
+	~SparseDerivGenerator();
+};
+
+
+struct DenseDerivGenerator {
+	int maxdimen;
+	FGSContainer* xcont;
+	FGSContainer* rcont;
+	FGSTensor** const ts;
+	UGSContainer* uxcont;
+	UGSTensor** const uts;
+	DenseDerivGenerator(int ng, int nx, int ny, int nu,
+						int mx, double prob, int maxdim);
+	void unfold();
+	~DenseDerivGenerator();
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/tl/testing/tests.cpp b/dynare++/tl/testing/tests.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c05dfb8566a1437a9dfd7440744a80932b53ebce
--- /dev/null
+++ b/dynare++/tl/testing/tests.cpp
@@ -0,0 +1,1019 @@
+/* $Id: tests.cpp 148 2005-04-19 15:12:26Z kamenik $ */
+/* Copyright 2004, Ondra Kamenik */
+
+#include "SylvException.h"
+#include "tl_exception.h"
+#include "gs_tensor.h"
+#include "factory.h"
+#include "monoms.h"
+#include "t_container.h"
+#include "stack_container.h"
+#include "t_polynomial.h"
+#include "rfs_tensor.h"
+#include "ps_tensor.h"
+#include "tl_static.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+
+
+class TestRunnable {
+	char name[100];
+public:
+	int dim; // dimension of the solved problem
+	int nvar; // number of variable of the solved problem
+	TestRunnable(const char* n, int d, int nv)
+		: dim(d), nvar(nv)
+		{strncpy(name, n, 100);}
+	bool test() const;
+	virtual bool run() const =0;
+	const char* getName() const
+		{return name;}
+protected:
+	template<class _Ttype>
+	static bool index_forward(const Symmetry& s, const IntSequence& nvs);
+
+	template <class _Ttype>
+	static bool index_backward(const Symmetry& s, const IntSequence& nvs);
+
+	template <class _Ttype>
+	static bool index_offset(const Symmetry& s, const IntSequence& nvs);
+
+	static bool fold_unfold(const FTensor* folded);
+	static bool fs_fold_unfold(int r, int nv, int dim)
+		{
+			Factory f;
+			FTensor* folded = f.make<FFSTensor>(r, nv, dim);
+			return fold_unfold(folded); // folded deallocated in fold_unfold
+		}
+	static bool r_fold_unfold(int r, int nv, int dim)
+		{
+			Factory f;
+			FTensor* folded = f.make<FRTensor>(r, nv, dim);
+			return fold_unfold(folded); // folded deallocated in fold_unfold
+		}
+	static bool gs_fold_unfold(int r, const Symmetry& s, const IntSequence& nvs)
+		{
+			Factory f;
+			FTensor* folded = f.make<FGSTensor>(r, s, nvs);
+			return fold_unfold(folded); // folded deallocated in fold_unfold
+		}
+
+	static bool dense_prod(const Symmetry& bsym, const IntSequence& bnvs,
+						   int hdim, int hnv, int rows);
+
+	static bool folded_monomial(int ng, int nx, int ny, int nu, int dim);
+
+	static bool unfolded_monomial(int ng, int nx, int ny, int nu, int dim);
+
+	static bool fold_zcont(int nf, int ny, int nu, int nup, int nbigg,
+						   int ng, int dim);
+
+	static bool unfold_zcont(int nf, int ny, int nu, int nup, int nbigg,
+							 int ng, int dim);
+
+	static bool folded_contraction(int r, int nv, int dim);
+
+	static bool unfolded_contraction(int r, int nv, int dim);
+
+	static bool poly_eval(int r, int nv, int maxdim);
+
+
+};
+
+bool TestRunnable::test() const
+{
+	printf("Running test <%s>\n",name);
+	clock_t start = clock();
+	bool passed = run();
+	clock_t end = clock();
+	printf("CPU time %8.4g (CPU seconds)..................",
+		   ((double)(end-start))/CLOCKS_PER_SEC);
+	if (passed) {
+		printf("passed\n\n");
+		return passed;
+	} else {
+		printf("FAILED\n\n");
+		return passed;
+	}
+}
+
+
+/****************************************************/
+/*     definition of TestRunnable static methods    */
+/****************************************************/
+template <class _Ttype>
+bool TestRunnable::index_forward(const Symmetry& s, const IntSequence& nvs)
+{
+	int fails = 0;
+	int ndecr = 0;
+	int nincr = 0;
+	_Ttype dummy(0, TensorDimens(s, nvs));
+	typename _Ttype::index run = dummy.end();
+	do {
+		--run;
+		ndecr++;
+		typename _Ttype::index run2 = dummy.begin();
+		for (int i = 0; i < *run; i++) {
+			++run2;
+			nincr++;
+		}
+		if (! (run == run2))
+			fails++;
+	} while (run != dummy.begin());
+
+	printf("\tnumber of columns    = %d\n",dummy.ncols());
+	printf("\tnumber of increments = %d\n",nincr);
+	printf("\tnumber of decrements = %d\n",ndecr);
+	printf("\tnumber of failures   = %d\n",fails);
+
+	return fails == 0;
+}
+
+template <class _Ttype>
+bool TestRunnable::index_backward(const Symmetry& s, const IntSequence& nvs)
+{
+	int fails = 0;
+	int ndecr = 0;
+	int nincr = 0;
+	_Ttype dummy(0, TensorDimens(s, nvs));
+	typename _Ttype::index run = dummy.begin();
+	while (run != dummy.end()) {
+		typename _Ttype::index run2 = dummy.end();
+		for (int i = 0; i < dummy.ncols() - *run; i++) {
+			--run2;
+			ndecr++;
+		}
+		if (! (run == run2))
+			fails++;
+		++run;
+		nincr++;
+	}
+
+	printf("\tnumber of columns    = %d\n",dummy.ncols());
+	printf("\tnumber of increments = %d\n",nincr);
+	printf("\tnumber of decrements = %d\n",ndecr);
+	printf("\tnumber of failures   = %d\n",fails);
+
+	return fails == 0;
+}
+
+template <class _Ttype>
+bool TestRunnable::index_offset(const Symmetry& s, const IntSequence& nvs)
+{
+	int fails = 0;
+	int nincr = 0;
+	_Ttype dummy(0, TensorDimens(s, nvs));
+	for (typename _Ttype::index run = dummy.begin();
+		 run != dummy.end(); ++run, nincr++) {
+		typename _Ttype::index run2(&dummy, run.getCoor());
+		if (! (run == run2))
+			fails++;
+	}
+
+	printf("\tnumber of columns    = %d\n",dummy.ncols());
+	printf("\tnumber of increments = %d\n",nincr);
+	printf("\tnumber of failures   = %d\n",fails);
+
+	return fails == 0;
+}
+
+bool TestRunnable::fold_unfold(const FTensor* folded)
+{
+	UTensor* unfolded = &(folded->unfold());
+	FTensor* folded2 = &(unfolded->fold());
+	folded2->add(-1.0, *folded);
+	double normInf = folded2->getNormInf();
+	double norm1 = folded2->getNorm1();
+	printf("\tfolded size:       (%d, %d)\n",folded->nrows(), folded->ncols());
+	printf("\tunfolded size:     (%d, %d)\n",unfolded->nrows(), unfolded->ncols());
+	printf("\tdifference normInf: %8.4g\n", normInf);
+	printf("\tdifference norm1:   %8.4g\n", norm1);
+
+	delete folded;
+	delete unfolded;
+	delete folded2;
+
+	return normInf < 1.0e-15;
+}
+
+bool TestRunnable::dense_prod(const Symmetry& bsym, const IntSequence& bnvs,
+							  int hdim, int hnv, int rows)
+{
+	Factory f;
+	FGSContainer* cont =
+		f.makeCont<FGSTensor,FGSContainer>(hnv, bnvs, bsym.dimen()-hdim+1);
+	FGSTensor* fh =
+		f.make<FGSTensor>(rows, Symmetry(hdim), IntSequence(1, hnv));
+	UGSTensor uh(*fh);
+	FGSTensor fb(rows, TensorDimens(bsym, bnvs));
+	fb.getData().zeros();
+	clock_t s1 = clock();
+	cont->multAndAdd(uh, fb);
+	clock_t s2 = clock();
+	UGSContainer ucont(*cont);
+	clock_t s3 = clock();
+	UGSTensor ub(rows, fb.getDims());
+	ub.getData().zeros();
+	clock_t s4 = clock();
+	ucont.multAndAdd(uh, ub);
+	clock_t s5 = clock();
+
+	UGSTensor btmp(fb);
+	btmp.add(-1, ub);
+	double norm = btmp.getData().getMax();
+	double norm1 = btmp.getNorm1();
+	double normInf = btmp.getNormInf();
+
+	printf("\ttime for folded product:     %8.4g\n",
+		   ((double)(s2-s1))/CLOCKS_PER_SEC);
+	printf("\ttime for unfolded product:   %8.4g\n",
+		   ((double)(s5-s4))/CLOCKS_PER_SEC);
+	printf("\ttime for container convert:  %8.4g\n",
+		   ((double)(s3-s2))/CLOCKS_PER_SEC);
+	printf("\tunfolded difference normMax: %10.6g\n", norm);
+	printf("\tunfolded difference norm1:   %10.6g\n", norm1);
+	printf("\tunfolded difference normInf: %10.6g\n", normInf);
+
+	delete cont;
+	delete fh;
+
+	return norm < 1.e-13;
+}
+
+bool TestRunnable::folded_monomial(int ng, int nx, int ny, int nu, int dim)
+{
+	clock_t gen_time = clock();
+	DenseDerivGenerator gen(ng, nx, ny, nu, 5, 0.3, dim);
+	gen_time = clock()-gen_time;
+	printf("\ttime for monom generation: %8.4g\n",
+		   ((double)gen_time)/CLOCKS_PER_SEC);
+	IntSequence nvs(2); nvs[0] = ny; nvs[1] = nu;
+	double maxnorm = 0;
+	for (int ydim = 0; ydim <= dim; ydim++) {
+		Symmetry s(ydim, dim-ydim);
+		printf("\tSymmetry: ");s.print();
+		FGSTensor res(ng, TensorDimens(s, nvs));
+		res.getData().zeros();
+		clock_t stime = clock();
+		for (int d = 1; d <= dim; d++) {
+			gen.xcont->multAndAdd(*(gen.ts[d-1]), res);
+		}
+		stime = clock() - stime;
+		printf("\t\ttime for symmetry: %8.4g\n",
+			   ((double)stime)/CLOCKS_PER_SEC);
+		const FGSTensor* mres = gen.rcont->get(s);
+		res.add(-1.0, *mres);
+		double normtmp = res.getData().getMax();
+		printf("\t\terror normMax:     %10.6g\n", normtmp);
+		if (normtmp > maxnorm)
+			maxnorm = normtmp;
+	}
+	return maxnorm < 1.0e-10;
+}
+
+bool TestRunnable::unfolded_monomial(int ng, int nx, int ny, int nu, int dim)
+{
+	clock_t gen_time = clock();
+	DenseDerivGenerator gen(ng, nx, ny, nu, 5, 0.3, dim);
+	gen_time = clock()-gen_time;
+	printf("\ttime for monom generation: %8.4g\n",
+		   ((double)gen_time)/CLOCKS_PER_SEC);
+	clock_t u_time = clock();
+	gen.unfold();
+	u_time = clock() - u_time;
+	printf("\ttime for monom unfolding:  %8.4g\n",
+		   ((double)u_time)/CLOCKS_PER_SEC);
+	IntSequence nvs(2); nvs[0] = ny; nvs[1] = nu;
+	double maxnorm = 0;
+	for (int ydim = 0; ydim <= dim; ydim++) {
+		Symmetry s(ydim, dim-ydim);
+		printf("\tSymmetry: ");s.print();
+		UGSTensor res(ng, TensorDimens(s, nvs));
+		res.getData().zeros();
+		clock_t stime = clock();
+		for (int d = 1; d <= dim; d++) {
+			gen.uxcont->multAndAdd(*(gen.uts[d-1]), res);
+		}
+		stime = clock() - stime;
+		printf("\t\ttime for symmetry: %8.4g\n",
+			   ((double)stime)/CLOCKS_PER_SEC);
+		const FGSTensor* mres = gen.rcont->get(s);
+		FGSTensor foldres(res);
+		foldres.add(-1.0, *mres);
+		double normtmp = foldres.getData().getMax();
+		printf("\t\terror normMax:     %10.6g\n", normtmp);
+		if (normtmp > maxnorm)
+			maxnorm = normtmp;
+	}
+	return maxnorm < 1.0e-10;
+}
+
+bool TestRunnable::fold_zcont(int nf, int ny, int nu, int nup, int nbigg,
+							  int ng, int dim)
+{
+	clock_t gen_time = clock();
+	SparseDerivGenerator dg(nf, ny, nu, nup, nbigg, ng,
+							5, 0.55, dim);
+	gen_time = clock()-gen_time;
+	for (int d = 1; d <= dim; d++) {
+		printf("\tfill of dim=%d tensor:     %3.2f %%\n",
+			   d, 100*dg.ts[d-1]->getFillFactor());
+	}
+	printf("\ttime for monom generation: %8.4g\n",
+		   ((double)gen_time)/CLOCKS_PER_SEC);
+
+	IntSequence nvs(4);
+	nvs[0] = ny; nvs[1] = nu; nvs[2] = nup; nvs[3] = 1;
+	double maxnorm = 0.0;
+
+	// form ZContainer
+	FoldedZContainer zc(dg.bigg, nbigg, dg.g, ng, ny, nu);
+
+	for (int d = 2; d <= dim; d++) {
+		SymmetrySet ss(d, 4);
+		for (symiterator si(ss); !si.isEnd(); ++si) {
+			printf("\tSymmetry: ");(*si).print();
+			FGSTensor res(nf, TensorDimens(*si, nvs));
+			res.getData().zeros();
+			clock_t stime = clock();
+			for (int l = 1; l <= (*si).dimen(); l++) {
+				zc.multAndAdd(*(dg.ts[l-1]), res);
+			}
+			stime = clock() - stime;
+			printf("\t\ttime for symmetry: %8.4g\n",
+				   ((double)stime)/CLOCKS_PER_SEC);
+			const FGSTensor* mres = dg.rcont->get(*si);
+			res.add(-1.0, *mres);
+			double normtmp = res.getData().getMax();
+			printf("\t\terror normMax:     %10.6g\n", normtmp);
+			if (normtmp > maxnorm)
+				maxnorm = normtmp;
+		}
+	}
+	return maxnorm < 1.0e-10;
+}
+
+bool TestRunnable::unfold_zcont(int nf, int ny, int nu, int nup, int nbigg,
+								int ng, int dim)
+{
+	clock_t gen_time = clock();
+	SparseDerivGenerator dg(nf, ny, nu, nup, nbigg, ng,
+							5, 0.55, dim);
+	gen_time = clock()-gen_time;
+	for (int d = 1; d <= dim; d++) {
+		printf("\tfill of dim=%d tensor:     %3.2f %%\n",
+			   d, 100*dg.ts[d-1]->getFillFactor());
+	}
+	printf("\ttime for monom generation: %8.4g\n",
+		   ((double)gen_time)/CLOCKS_PER_SEC);
+
+	clock_t con_time = clock();
+	UGSContainer uG_cont(*(dg.bigg));
+	UGSContainer ug_cont(*(dg.g));
+	con_time = clock()-con_time;
+	printf("\ttime for container unfold: %8.4g\n",
+		   ((double)con_time)/CLOCKS_PER_SEC);
+
+	IntSequence nvs(4);
+	nvs[0] = ny; nvs[1] = nu; nvs[2] = nup; nvs[3] = 1;
+	double maxnorm = 0.0;
+
+	// form ZContainer
+	UnfoldedZContainer zc(&uG_cont, nbigg, &ug_cont, ng, ny, nu);
+
+	for (int d = 2; d <= dim; d++) {
+		SymmetrySet ss(d, 4);
+		for (symiterator si(ss); !si.isEnd(); ++si) {
+			printf("\tSymmetry: ");(*si).print();
+			UGSTensor res(nf, TensorDimens(*si, nvs));
+			res.getData().zeros();
+			clock_t stime = clock();
+			for (int l = 1; l <= (*si).dimen(); l++) {
+				zc.multAndAdd(*(dg.ts[l-1]), res);
+			}
+			stime = clock() - stime;
+			printf("\t\ttime for symmetry: %8.4g\n",
+				   ((double)stime)/CLOCKS_PER_SEC);
+			FGSTensor fold_res(res);
+			const FGSTensor* mres = dg.rcont->get(*si);
+			fold_res.add(-1.0, *mres);
+			double normtmp = fold_res.getData().getMax();
+			printf("\t\terror normMax:     %10.6g\n", normtmp);
+			if (normtmp > maxnorm)
+				maxnorm = normtmp;
+		}
+	}
+	return maxnorm < 1.0e-10;
+}
+
+bool TestRunnable::folded_contraction(int r, int nv, int dim)
+{
+	Factory fact;
+	Vector* x = fact.makeVector(nv);
+
+	FFSTensor* forig = fact.make<FFSTensor>(r, nv, dim);
+	FFSTensor* f = new FFSTensor(*forig);
+	clock_t ctime = clock();
+	for (int d = dim-1; d > 0; d--) {
+		FFSTensor* fnew = new FFSTensor(*f, ConstVector(*x));
+		delete f;
+		f = fnew;
+	}
+	ctime = clock() - ctime;
+	Vector res(forig->nrows());
+	res.zeros();
+	f->multaVec(res, *x);
+
+	UFSTensor u(*forig);
+	clock_t utime = clock();
+	URSingleTensor ux(*x, dim);
+	Vector v(u.nrows());
+	v.zeros();
+	u.multaVec(v, ux.getData());
+	utime = clock() - utime;
+
+	v.add(-1.0, res);
+	printf("\ttime for folded contraction: %8.4g\n",
+		   ((double)ctime)/CLOCKS_PER_SEC);
+	printf("\ttime for unfolded power:     %8.4g\n",
+		   ((double)utime)/CLOCKS_PER_SEC);
+	printf("\terror normMax:     %10.6g\n", v.getMax());
+	printf("\terror norm1:       %10.6g\n", v.getNorm1());
+
+	delete f;
+	delete x;
+
+	return (v.getMax() < 1.e-10);
+}
+
+bool TestRunnable::unfolded_contraction(int r, int nv, int dim)
+{
+	Factory fact;
+	Vector* x = fact.makeVector(nv);
+
+	FFSTensor* forig = fact.make<FFSTensor>(r, nv, dim);
+	UFSTensor uorig(*forig);
+	delete forig;
+	UFSTensor* u = new UFSTensor(uorig);
+	clock_t ctime = clock();
+	for (int d = dim-1; d > 0; d--) {
+		UFSTensor* unew = new UFSTensor(*u, ConstVector(*x));
+		delete u;
+		u = unew;
+	}
+	ctime = clock() - ctime;
+	Vector res(uorig.nrows());
+	res.zeros();
+	u->multaVec(res, *x);
+
+	clock_t utime = clock();
+	URSingleTensor ux(*x, dim);
+	Vector v(uorig.nrows());
+	v.zeros();
+	uorig.multaVec(v, ux.getData());
+	utime = clock() - utime;
+
+	v.add(-1.0, res);
+	printf("\ttime for unfolded contraction: %8.4g\n",
+		   ((double)ctime)/CLOCKS_PER_SEC);
+	printf("\ttime for unfolded power:       %8.4g\n",
+		   ((double)utime)/CLOCKS_PER_SEC);
+	printf("\terror normMax:     %10.6g\n", v.getMax());
+	printf("\terror norm1:       %10.6g\n", v.getNorm1());
+
+	delete u;
+	delete x;
+
+	return (v.getMax() < 1.e-10);
+}
+
+bool TestRunnable::poly_eval(int r, int nv, int maxdim)
+{
+	Factory fact;
+	Vector* x = fact.makeVector(nv);
+
+	Vector out_ft(r); out_ft.zeros();
+	Vector out_fh(r); out_fh.zeros();
+	Vector out_ut(r); out_ut.zeros();
+	Vector out_uh(r); out_uh.zeros();
+
+	UTensorPolynomial* up;
+	{
+		FTensorPolynomial* fp = fact.makePoly<FFSTensor, FTensorPolynomial>(r, nv, maxdim);
+
+		clock_t ft_cl = clock();
+		fp->evalTrad(out_ft, *x);
+		ft_cl = clock() - ft_cl;
+		printf("\ttime for folded power eval:    %8.4g\n",
+			   ((double)ft_cl)/CLOCKS_PER_SEC);
+		
+		clock_t fh_cl = clock();
+		fp->evalHorner(out_fh, *x);
+		fh_cl = clock() - fh_cl;
+		printf("\ttime for folded horner eval:   %8.4g\n",
+			   ((double)fh_cl)/CLOCKS_PER_SEC);
+
+		up = new UTensorPolynomial(*fp);
+		delete fp;
+	}
+
+	clock_t ut_cl = clock();
+	up->evalTrad(out_ut, *x);
+	ut_cl = clock() - ut_cl;
+	printf("\ttime for unfolded power eval:  %8.4g\n",
+		   ((double)ut_cl)/CLOCKS_PER_SEC);
+
+	clock_t uh_cl = clock();
+	up->evalHorner(out_uh, *x);
+	uh_cl = clock() - uh_cl;
+	printf("\ttime for unfolded horner eval: %8.4g\n",
+		   ((double)uh_cl)/CLOCKS_PER_SEC);
+
+	out_ft.add(-1.0, out_ut);
+	double max_ft = out_ft.getMax();
+	out_fh.add(-1.0, out_ut);
+	double max_fh = out_fh.getMax();
+	out_uh.add(-1.0, out_ut);
+	double max_uh = out_uh.getMax();
+
+	printf("\tfolded power error norm max:     %10.6g\n", max_ft);
+	printf("\tfolded horner error norm max:    %10.6g\n", max_fh);
+	printf("\tunfolded horner error norm max:  %10.6g\n", max_uh);
+
+	delete up;
+	delete x;
+	return (max_ft+max_fh+max_uh < 1.0e-10);
+}
+
+
+/****************************************************/
+/*     definition of TestRunnable subclasses        */
+/****************************************************/
+class SmallIndexForwardFold : public TestRunnable {
+public:
+	SmallIndexForwardFold()
+		: TestRunnable("small index forward for fold (44)(222)", 5, 4) {}
+	bool run() const
+		{
+			Symmetry s(2,3);
+			IntSequence nvs(2); nvs[0] = 4; nvs[1] = 2;
+			return index_forward<FGSTensor>(s, nvs);
+		}
+};
+
+class SmallIndexForwardUnfold : public TestRunnable {
+public:
+	SmallIndexForwardUnfold()
+		: TestRunnable("small index forward for unfold (44)(222)", 5, 4) {}
+	bool run() const
+		{
+			Symmetry s(2,3);
+			IntSequence nvs(2); nvs[0] = 4; nvs[1] = 2;
+			return index_forward<UGSTensor>(s, nvs);
+		}
+};
+
+class IndexForwardFold : public TestRunnable {
+public:
+	IndexForwardFold()
+		: TestRunnable("index forward for fold (55)(222)(22)", 7, 5) {}
+	bool run() const
+		{
+			Symmetry s(2,3,2);
+			IntSequence nvs(3); nvs[0] = 5; nvs[1] = 2; nvs[2] = 2;
+			return index_forward<FGSTensor>(s, nvs);
+		}
+};
+
+class IndexForwardUnfold : public TestRunnable {
+public:
+	IndexForwardUnfold()
+		: TestRunnable("index forward for unfold (55)(222)(22)", 7, 5) {}
+	bool run() const
+		{
+			Symmetry s(2,3,2);
+			IntSequence nvs(3); nvs[0] = 5; nvs[1] = 2; nvs[2] = 2;
+			return index_forward<UGSTensor>(s, nvs);
+		}
+};
+
+class SmallIndexBackwardFold : public TestRunnable {
+public:
+	SmallIndexBackwardFold()
+		: TestRunnable("small index backward for fold (3)(3)(222)", 5, 3) {}
+	bool run() const
+		{
+			Symmetry s(1,1,3);
+			IntSequence nvs(3); nvs[0] = 3; nvs[1] = 3; nvs[2] = 2;
+			return index_backward<FGSTensor>(s, nvs);
+		}
+};
+
+class IndexBackwardFold : public TestRunnable {
+public:
+	IndexBackwardFold()
+		: TestRunnable("index backward for fold (44)(222)(44)", 7, 4) {}
+	bool run() const
+		{
+			Symmetry s(2,3,2);
+			IntSequence nvs(3); nvs[0] = 4; nvs[1] = 2; nvs[2] = 4;
+			return index_backward<FGSTensor>(s, nvs);
+		}
+};
+
+class SmallIndexBackwardUnfold : public TestRunnable {
+public:
+	SmallIndexBackwardUnfold()
+		: TestRunnable("small index backward for unfold (3)(3)(222)", 5, 3) {}
+	bool run() const
+		{
+			Symmetry s(1,1,3);
+			IntSequence nvs(3); nvs[0] = 3; nvs[1] = 3; nvs[2] = 2;
+			return index_backward<UGSTensor>(s, nvs);
+		}
+};
+
+class IndexBackwardUnfold : public TestRunnable {
+public:
+	IndexBackwardUnfold()
+		: TestRunnable("index backward for unfold (44)(222)(44)", 7, 4) {}
+	bool run() const
+		{
+			Symmetry s(2,3,2);
+			IntSequence nvs(3); nvs[0] = 4; nvs[1] = 2; nvs[2] = 4;
+			return index_backward<UGSTensor>(s, nvs);
+		}
+};
+
+class SmallIndexOffsetFold : public TestRunnable {
+public:
+	SmallIndexOffsetFold()
+		: TestRunnable("small index offset for fold (44)(222)", 5, 4) {}
+	bool run() const
+		{
+			Symmetry s(2,3);
+			IntSequence nvs(2); nvs[0] = 4; nvs[1] = 2;
+			return index_offset<FGSTensor>(s, nvs);
+		}
+};
+
+class SmallIndexOffsetUnfold : public TestRunnable {
+public:
+	SmallIndexOffsetUnfold()
+		: TestRunnable("small index offset for unfold (44)(222)", 5, 4) {}
+	bool run() const
+		{
+			Symmetry s(2,3);
+			IntSequence nvs(2); nvs[0] = 4; nvs[1] = 2;
+			return index_offset<UGSTensor>(s, nvs);
+		}
+};
+
+class IndexOffsetFold : public TestRunnable {
+public:
+	IndexOffsetFold()
+		: TestRunnable("index offset for fold (55)(222)(22)", 5, 5) {}
+	bool run() const
+		{
+			Symmetry s(2,3,2);
+			IntSequence nvs(3); nvs[0] = 5; nvs[1] = 2; nvs[2] = 2;
+			return index_offset<FGSTensor>(s, nvs);
+		}
+};
+
+class IndexOffsetUnfold : public TestRunnable {
+public:
+	IndexOffsetUnfold()
+		: TestRunnable("index offset for unfold (55)(222)(22)", 7, 5) {}
+	bool run() const
+		{
+			Symmetry s(2,3,2);
+			IntSequence nvs(3); nvs[0] = 5; nvs[1] = 2; nvs[2] = 2;
+			return index_offset<UGSTensor>(s, nvs);
+		}
+};
+
+class SmallFoldUnfoldFS : public TestRunnable {
+public:
+	SmallFoldUnfoldFS()
+		: TestRunnable("small fold-unfold for full symmetry (444)", 3, 4) {}
+	bool run() const
+		{
+			return fs_fold_unfold(5, 4, 3);
+		}
+};
+
+
+class SmallFoldUnfoldGS : public TestRunnable {
+public:
+	SmallFoldUnfoldGS()
+		: TestRunnable("small fold-unfold for gen symmetry (3)(33)(22)", 5, 3) {}
+	bool run() const
+		{
+			Symmetry s(1,2,2);
+			IntSequence nvs(3); nvs[0] = 3; nvs[1] = 3; nvs[2] = 2;
+			return gs_fold_unfold(5, s, nvs);
+		}
+};
+
+class FoldUnfoldFS : public TestRunnable {
+public:
+	FoldUnfoldFS()
+		: TestRunnable("fold-unfold for full symmetry (9999)", 4, 9) {}
+	bool run() const
+		{
+			return fs_fold_unfold(5, 9, 4);
+		}
+};
+
+
+class FoldUnfoldGS : public TestRunnable {
+public:
+	FoldUnfoldGS()
+		: TestRunnable("fold-unfold for gen symmetry (66)(2)(66)", 5, 6) {}
+	bool run() const
+		{
+			Symmetry s(2,1,2);
+			IntSequence nvs(3); nvs[0] = 6; nvs[1] = 2; nvs[2] = 6;
+			return gs_fold_unfold(5, s, nvs);
+		}
+};
+
+class SmallFoldUnfoldR : public TestRunnable {
+public:
+	SmallFoldUnfoldR()
+		: TestRunnable("small fold-unfold for row full symmetry (333)", 3, 3) {}
+	bool run() const
+		{
+			return r_fold_unfold(5, 3, 3);
+		}
+};
+
+class FoldUnfoldR : public TestRunnable {
+public:
+	FoldUnfoldR()
+		: TestRunnable("fold-unfold for row full symmetry (66666)", 5, 6) {}
+	bool run() const
+		{
+			return r_fold_unfold(5, 6, 5);
+		}
+};
+
+class SmallDenseProd : public TestRunnable {
+public:
+	SmallDenseProd()
+		: TestRunnable("small dense prod bsym=1-2,nvs=3-2,h=2-3,r=2",3,3) {}
+	bool run() const
+		{
+			IntSequence bnvs(2); bnvs[0]=3; bnvs[1]=2;
+			return dense_prod(Symmetry(1,2), bnvs, 2, 3, 2);
+		}
+};
+
+class DenseProd : public TestRunnable {
+public:
+	DenseProd()
+		: TestRunnable("dense prod bsym=2-3,nvs=10-7,h=3-15,r=10",5,15) {}
+	bool run() const
+		{
+			IntSequence bnvs(2); bnvs[0]=10; bnvs[1]=7;
+			return dense_prod(Symmetry(2,3), bnvs, 3, 15, 10);
+		}
+};
+
+class BigDenseProd : public TestRunnable {
+public:
+	BigDenseProd()
+		: TestRunnable("dense prod bsym=3-2,nvs=13-11,h=3-20,r=20",6,20) {}
+	bool run() const
+		{
+			IntSequence bnvs(2); bnvs[0]=13; bnvs[1]=11;
+			return dense_prod(Symmetry(3,2), bnvs, 3, 20, 20);
+		}
+};
+
+class SmallFoldedMonomial : public TestRunnable {
+public:
+	SmallFoldedMonomial()
+		: TestRunnable("folded vrs. monoms (g,x,y,u)=(10,4,5,3), dim=4", 4, 8) {}
+	bool run() const
+		{
+			return folded_monomial(10, 4, 5, 3, 4);
+		}
+};
+
+class FoldedMonomial : public TestRunnable {
+public:
+	FoldedMonomial()
+		: TestRunnable("folded vrs. monoms (g,x,y,u)=(20,12,10,5), dim=4", 4, 15) {}
+	bool run() const
+		{
+			return folded_monomial(20, 12, 10, 5, 4);
+		}
+};
+
+class SmallUnfoldedMonomial : public TestRunnable {
+public:
+	SmallUnfoldedMonomial()
+		: TestRunnable("unfolded vrs. monoms (g,x,y,u)=(10,4,5,3), dim=4", 4, 8) {}
+	bool run() const
+		{
+			return unfolded_monomial(10, 4, 5, 3, 4);
+		}
+};
+
+class UnfoldedMonomial : public TestRunnable {
+public:
+	UnfoldedMonomial()
+		: TestRunnable("unfolded vrs. monoms (g,x,y,u)=(20,12,10,5), dim=4", 4, 15) {}
+	bool run() const
+		{
+			return unfolded_monomial(20, 12, 10, 5, 4);
+		}
+};
+
+class FoldedContractionSmall : public TestRunnable {
+public:
+	FoldedContractionSmall()
+		: TestRunnable("folded contraction small (r=5, nv=4, dim=3)", 3, 4) {}
+	bool run() const
+		{
+			return folded_contraction(5, 4, 3);
+		}
+};
+
+class FoldedContractionBig : public TestRunnable {
+public:
+	FoldedContractionBig()
+		: TestRunnable("folded contraction big (r=20, nv=12, dim=5)", 5, 12) {}
+	bool run() const
+		{
+			return folded_contraction(20, 12, 5);
+		}
+};
+
+class UnfoldedContractionSmall : public TestRunnable {
+public:
+	UnfoldedContractionSmall()
+		: TestRunnable("unfolded contraction small (r=5, nv=4, dim=3)", 3, 4) {}
+	bool run() const
+		{
+			return unfolded_contraction(5, 4, 3);
+		}
+};
+
+class UnfoldedContractionBig : public TestRunnable {
+public:
+	UnfoldedContractionBig()
+		: TestRunnable("unfolded contraction big (r=20, nv=12, dim=5)", 5, 12) {}
+	bool run() const
+		{
+			return unfolded_contraction(20, 12, 5);
+		}
+};
+
+class PolyEvalSmall : public TestRunnable {
+public:
+	PolyEvalSmall()
+		: TestRunnable("polynomial evaluation small (r=4, nv=5, maxdim=4)", 4, 5) {}
+	bool run() const
+		{
+			return poly_eval(4, 5, 4);
+		}
+};
+
+class PolyEvalBig : public TestRunnable {
+public:
+	PolyEvalBig()
+		: TestRunnable("polynomial evaluation big (r=244, nv=97, maxdim=2)", 2, 97) {}
+	bool run() const
+		{
+			return poly_eval(244, 97, 2);
+		}
+};
+
+class FoldZContSmall : public TestRunnable {
+public:
+	FoldZContSmall()
+		: TestRunnable("folded Z container (r=3,ny=2,nu=2,nup=1,G=2,g=2,dim=3)",
+					   3, 8) {}
+	bool run() const
+		{
+			return fold_zcont(3, 2, 2, 1, 2, 2, 3);
+		}
+};
+
+class FoldZCont : public TestRunnable {
+public:
+	FoldZCont()
+		: TestRunnable("folded Z container (r=13,ny=5,nu=7,nup=4,G=6,g=7,dim=4)",
+					   4, 25) {}
+	bool run() const
+		{
+			return fold_zcont(13, 5, 7, 4, 6, 7, 4);
+		}
+};
+
+class UnfoldZContSmall : public TestRunnable {
+public:
+	UnfoldZContSmall()
+		: TestRunnable("unfolded Z container (r=3,ny=2,nu=2,nup=1,G=2,g=2,dim=3)",
+					   3, 8) {}
+	bool run() const
+		{
+			return unfold_zcont(3, 2, 2, 1, 2, 2, 3);
+		}
+};
+
+class UnfoldZCont : public TestRunnable {
+public:
+	UnfoldZCont()
+		: TestRunnable("unfolded Z container (r=13,ny=5,nu=7,nup=4,G=6,g=7,dim=4",
+					   4, 25) {}
+	bool run() const
+		{
+			return unfold_zcont(13, 5, 7, 4, 6, 7, 4);
+		}
+};
+
+
+
+int main()
+{
+	TestRunnable* all_tests[50];
+	// fill in vector of all tests
+	int num_tests = 0;
+	all_tests[num_tests++] = new SmallIndexForwardFold();
+	all_tests[num_tests++] = new SmallIndexForwardUnfold();
+	all_tests[num_tests++] = new IndexForwardFold();
+	all_tests[num_tests++] = new IndexForwardUnfold();
+	all_tests[num_tests++] = new SmallIndexBackwardFold();
+	all_tests[num_tests++] = new IndexBackwardFold();
+	all_tests[num_tests++] = new SmallIndexBackwardUnfold();
+	all_tests[num_tests++] = new IndexBackwardUnfold();
+	all_tests[num_tests++] = new SmallIndexOffsetFold();
+	all_tests[num_tests++] = new SmallIndexOffsetUnfold();
+	all_tests[num_tests++] = new IndexOffsetFold();
+	all_tests[num_tests++] = new IndexOffsetUnfold();
+	all_tests[num_tests++] = new SmallFoldUnfoldFS();
+	all_tests[num_tests++] = new SmallFoldUnfoldGS();
+	all_tests[num_tests++] = new FoldUnfoldFS();
+	all_tests[num_tests++] = new FoldUnfoldGS();
+	all_tests[num_tests++] = new SmallFoldUnfoldR();
+	all_tests[num_tests++] = new FoldUnfoldR();
+	all_tests[num_tests++] = new SmallDenseProd();
+	all_tests[num_tests++] = new DenseProd();
+	all_tests[num_tests++] = new BigDenseProd();
+	all_tests[num_tests++] = new SmallFoldedMonomial();
+	all_tests[num_tests++] = new FoldedMonomial();
+	all_tests[num_tests++] = new SmallUnfoldedMonomial();
+	all_tests[num_tests++] = new UnfoldedMonomial();
+	all_tests[num_tests++] = new FoldedContractionSmall();
+	all_tests[num_tests++] = new FoldedContractionBig();
+	all_tests[num_tests++] = new UnfoldedContractionSmall();
+	all_tests[num_tests++] = new UnfoldedContractionBig();
+	all_tests[num_tests++] = new PolyEvalSmall();
+	all_tests[num_tests++] = new PolyEvalBig();
+	all_tests[num_tests++] = new FoldZContSmall();
+	all_tests[num_tests++] = new FoldZCont();
+	all_tests[num_tests++] = new UnfoldZContSmall();
+	all_tests[num_tests++] = new UnfoldZCont();
+
+	// find maximum dimension and maximum nvar
+	int dmax=0;
+	int nvmax = 0;
+	for (int i = 0; i < num_tests; i++) {
+		if (dmax < all_tests[i]->dim)
+			dmax = all_tests[i]->dim;
+		if (nvmax < all_tests[i]->nvar)
+			nvmax = all_tests[i]->nvar;
+	}
+	tls.init(dmax, nvmax); // initialize library
+
+	// launch the tests
+	int success = 0;
+	for (int i = 0; i < num_tests; i++) {
+		try {
+			if (all_tests[i]->test())
+				success++;
+		} catch (const TLException& e) {
+			printf("Caugth TL exception in <%s>:\n", all_tests[i]->getName());
+			e.print();
+		} catch (SylvException& e) {
+			printf("Caught Sylv exception in <%s>:\n", all_tests[i]->getName());
+			e.printMessage();
+		}
+	}
+
+	printf("There were %d tests that failed out of %d tests run.\n",
+		   num_tests - success, num_tests);
+
+	// destroy
+	for (int i = 0; i < num_tests; i++) {
+		delete all_tests[i];
+	}
+
+	return 0;
+}
diff --git a/dynare++/utils/cc/Makefile b/dynare++/utils/cc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..7a43a15592677b5e2a64a447873138d2a45576ad
--- /dev/null
+++ b/dynare++/utils/cc/Makefile
@@ -0,0 +1,26 @@
+# Copyright (C) 2005, Ondra Kamenik
+
+# $Id: Makefile 843 2006-07-28 08:54:19Z tamas $
+
+CC_FLAGS := -Wall -I../../
+
+ifeq ($(DEBUG),yes)
+	CC_FLAGS := $(CC_FLAGS) -g
+else
+	CC_FLAGS := $(CC_FLAGS) -O3
+endif
+
+objects := $(patsubst %.cpp,%.o,$(wildcard *.cpp))
+headers := $(wildcard *.h)
+source  := $(wildcard *.cpp)
+
+%.o: %.cpp $(headers)
+	$(CC) $(CC_FLAGS) -c $*.cpp
+
+utils.a: $(objects) $(headers) $(source)
+	ar cr utils.a $(objects)
+
+clear:
+	rm -f *~
+	rm -f *.o
+	rm -f utils.a
diff --git a/dynare++/utils/cc/exception.h b/dynare++/utils/cc/exception.h
new file mode 100644
index 0000000000000000000000000000000000000000..42c1f08a90abacd77c2b68d3c5a66693ca8b9f4d
--- /dev/null
+++ b/dynare++/utils/cc/exception.h
@@ -0,0 +1,55 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: exception.h 1367 2007-07-11 14:21:57Z kamenik $
+
+#ifndef OGU_EXCEPTION_H
+#define OGU_EXCEPTION_H
+
+#include <stdio.h>
+#include <string.h>
+
+#include <string>
+#include <algorithm>
+
+namespace ogu {
+
+	/** A primitive exception. */
+	class Exception {
+		static const int file_length = 100;
+		static const int mes_length = 500;
+	protected:
+		char file[file_length];
+		int line;
+		char mes[mes_length];
+	public:
+		Exception(const char* f, int l, const char* m)
+			{
+				strncpy(file, f, file_length-1);
+				file[file_length-1] = '\0';
+				line = l;
+				strncpy(mes, m, std::min(mes_length-1,(int)strlen(m)));
+				mes[mes_length-1] = '\0';
+			}
+		Exception(const char* f, int l, const std::string& m)
+			{
+				strncpy(file, f, file_length-1);
+				file[file_length-1] = '\0';
+				line = l;
+				strncpy(mes, m.c_str(), std::min(mes_length-1,(int)m.length()));
+				mes[mes_length-1] = '\0';
+			}
+		virtual ~Exception() {}
+		void print(FILE* fd) const
+			{ fprintf(fd, "%s:%d: %s\n", file, line, mes); }
+		void print() const
+			{ print(stdout); }
+		const char* message() const
+			{ return mes; }
+	};
+};
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/utils/cc/memory_file.cpp b/dynare++/utils/cc/memory_file.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e0072bfa5fbf4e8c79f1d32869ca2cbc3cd23e0f
--- /dev/null
+++ b/dynare++/utils/cc/memory_file.cpp
@@ -0,0 +1,62 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: memory_file.cpp 987 2006-10-17 14:39:19Z kamenik $
+
+#include "memory_file.h"
+
+#include <stdio.h>
+
+using namespace ogu;
+
+int ogu::calc_pos_offset(int length, const char* str, int line, int col)
+{
+	int i = 0;
+	int il = 1;
+	int ic = 1;
+	while (i < length && il <= line && ic <= col) {
+		if (str[i] == '\n') {
+			il++;
+			ic = 1;
+		} else {
+			ic++;
+		}
+	}
+	return i;
+}
+
+void ogu::calc_pos_line_and_col(int length, const char* str, int offset,
+						   int& line, int& col)
+{
+	line = 1;
+	col = 0;
+	int i = 0;
+	while (i < length && i < offset) {
+		if (str[i] == '\n') {
+			line++;
+			col = 0;
+		}
+		i++;
+		col++;
+	}
+}
+
+MemoryFile::MemoryFile(const char* fname)
+	: len(-1), data(NULL)
+{
+	FILE* fd = fopen(fname, "rb");
+	if (fd) {
+		// get the file size
+		fseek(fd, 0, SEEK_END);
+		len = ftell(fd);
+		// allocate space for the file plus ending '\0' character
+		data = new char[len+1];
+		// read file and set data
+		fseek(fd, 0, SEEK_SET);
+		int i = 0;
+		int c;
+		while (EOF != (c = fgetc(fd)))
+			data[i++] = (unsigned char)c;
+		data[len] = '\0';
+		fclose(fd);
+	}
+}
diff --git a/dynare++/utils/cc/memory_file.h b/dynare++/utils/cc/memory_file.h
new file mode 100644
index 0000000000000000000000000000000000000000..79f08717ebc140c9b08a15ba5fe5fa055df04cb6
--- /dev/null
+++ b/dynare++/utils/cc/memory_file.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: memory_file.h 762 2006-05-22 13:00:07Z kamenik $
+
+#ifndef OGU_MEMORY_FILE
+#define OGU_MEMORY_FILE
+
+namespace ogu {
+	/** This function calculates an offset of a given position in a
+	 * given string. The position is given by the line number and by
+	 * the offset in the line (both starting from 1). */
+	int calc_pos_offset(int length, const char* str, int line, int col);
+	/** This function calculates a line number and column number of a
+	 * character given by the offset in the string. It is inverse to
+	 * calc_pos_offset. */
+	void calc_pos_line_and_col(int length, const char* str, int offset,
+							   int& line, int& col);
+
+	/** This class opens a given file and makes its copy in memory and
+	 * appends it with the '\0' character. Since the type of length is
+	 * int, it can store files with size at most 4GB. If the file
+	 * could be opened for reading, data is NULL and length is -1. If
+	 * the file is empty but exists, len is zero and data points to a
+	 * newly allocated memory containing '\0' character at the end. */
+	class MemoryFile {
+	protected:
+		int len;
+		char* data;
+	public:
+		MemoryFile(const char* fname);
+		virtual ~MemoryFile()
+			{if (data) delete [] data;}
+		int length() const
+			{return len;}
+		const char* base() const
+			{return data;}
+		bool exists() const
+			{return len != -1;}
+		/** Return the offset of a character in the given line
+		 * (starting from 1) with the given offset in the line. */
+		int offset(int line, int lineoff) const
+			{return calc_pos_offset(len, data, line, lineoff);}
+		/** Return the line number and column number of the character
+		 * defined by the offset. */
+		void line_and_col(int offset, int& line, int& col) const
+			{calc_pos_line_and_col(len, data, offset, line, col);}
+	};
+
+	
+};
+
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End:
diff --git a/dynare++/utils/cc/pascal_triangle.cpp b/dynare++/utils/cc/pascal_triangle.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1dc4c09d65fa98f1d699e789129360e76f6b8b01
--- /dev/null
+++ b/dynare++/utils/cc/pascal_triangle.cpp
@@ -0,0 +1,88 @@
+#include "pascal_triangle.h"
+#include <cstdio>
+
+using namespace ogu;
+
+PascalTriangle ptriang;
+
+void PascalRow::setFromPrevious(const PascalRow& prev)
+{
+	k = prev.k + 1;
+	clear();
+	prolong(prev);
+}
+
+/** This prolongs the PascalRow. If it is empty, we set the first item
+ * to k+1, which is noverk(k+1,k) which is the second item in the real
+ * pascal row, which starts from noverk(k,k)=1. Then we calculate
+ * other items from the provided row which must be the one with k-1.*/
+void PascalRow::prolong(const PascalRow& prev)
+{
+	if (size() == 0)
+		push_back(k+1);
+	int last = back();
+	for (unsigned int i = size(); i < prev.size(); i++) {
+		last += prev[i];
+		push_back(last);
+	}
+}
+
+void PascalRow::prolongFirst(int n)
+{
+	// todo: check n = 1;
+	for (int i = (int)size()+2; i <= n; i++)
+		push_back(i);
+}
+
+void PascalRow::print() const
+{
+	printf("k=%d\n",k);
+	for (unsigned int i = 0; i < size(); i++)
+		printf("%d ",operator[](i));
+	printf("\n");
+}
+
+int PascalTriangle::max_n() const
+{
+	return (int)(tr[0].size()+1);
+}
+
+int PascalTriangle::max_k() const
+{
+	return (int)tr.size();
+}
+
+void PascalTriangle::ensure(int n, int k)
+{
+	// add along n
+	if (n > max_n()) {
+		tr[0].prolongFirst(n);
+		for (int i = 2; i <= max_k(); i++)
+			tr[i-1].prolong(tr[i-2]);
+	}
+
+	if (k > max_k()) {
+		for (int i = max_k()+1; i <= k; i++) {
+			PascalRow r;
+			tr.push_back(r);
+			tr.back().setFromPrevious(tr[i-2]);
+		}
+	}
+}
+
+int PascalTriangle::noverk(int n, int k)
+{
+	// todo: rais if out of bounds
+	if (n-k < k)
+		k = n-k;
+	if (k == 0)
+		return 1;
+	ensure(n, k);
+	return (tr[k-1])[n-1-k];
+}
+
+void PascalTriangle::print() const
+{
+	for (unsigned int i = 0; i < tr.size(); i++)
+		tr[i].print();
+}
diff --git a/dynare++/utils/cc/pascal_triangle.h b/dynare++/utils/cc/pascal_triangle.h
new file mode 100644
index 0000000000000000000000000000000000000000..2e989aa5723cf21dd042f2bffc021884cf8ffb51
--- /dev/null
+++ b/dynare++/utils/cc/pascal_triangle.h
@@ -0,0 +1,51 @@
+// Copyright (C) 2005, Ondra Kamenik
+
+// $Id: pascal_triangle.h 762 2006-05-22 13:00:07Z kamenik $
+
+#ifndef PASCAL_TRIANGLE_H
+#define PASCAL_TRIANGLE_H
+
+#include <vector>
+
+namespace ogu {
+
+	using std::vector;
+
+	class PascalRow : public vector<int> {
+		int k;
+	public:
+		PascalRow()
+			: vector<int>(), k(1)
+			{ push_back(2); }
+		void setFromPrevious(const PascalRow& prev);
+		void prolong(const PascalRow& prev);
+		void prolongFirst(int n);
+		void print() const;
+	};
+
+	class PascalTriangle {
+		vector<PascalRow> tr;
+	public:
+		PascalTriangle()
+			{tr.push_back(PascalRow());}
+		PascalTriangle(const PascalTriangle& triang)
+			: tr(triang.tr) {}
+		const PascalTriangle& operator=(const PascalTriangle& triang)
+			{ tr = triang.tr; return *this;}
+		int noverk(int n, int k);
+		void print() const;
+	protected:
+		void ensure(int n, int k);
+		int max_n() const;
+		int max_k() const;
+	};
+};
+
+extern ogu::PascalTriangle ptriang;
+
+
+#endif
+
+// Local Variables:
+// mode:C++
+// End: