\documentclass{beamer} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{lmodern} \usepackage{amsmath} \usepackage[copyright]{ccicons} \usepackage{pstricks} \usetheme{Boadilla} \title{Deterministic Models} \subtitle{Perfect foresight, nonlinearities and occasionally binding constraints} \author{Sébastien Villemot} \pgfdeclareimage[height=0.6cm]{logo}{cepremap} \institute[CEPREMAP]{\pgfuseimage{logo}} \date{June 4, 2019} \AtBeginSection[] { \begin{frame} \frametitle{Outline} \tableofcontents[currentsection] \end{frame} } \AtBeginSubsection[] { \begin{frame} \frametitle{Outline} \tableofcontents[currentsection,currentsubsection] \end{frame} } \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Introduction} \begin{itemize} \item Perfect foresight = agents perfectly anticipate all future shocks \item Concretely, at period 1: \begin{itemize} \item agents learn the value of all future shocks; \item since there is shared knowledge of the model and of future shocks, agents can compute their optimal plans for all future periods; \item optimal plans are not adjusted in periods 2 and later \\ $\Rightarrow$ the model behaves as if it were deterministic. \end{itemize} \item Cost of this approach: the effect of future uncertainty is not taken into account (\textit{e.g.} no precautionary motive) \item Advantage: numerical solution can be computed exactly (up to rounding errors), contrarily to perturbation or global solution methods for rational expectations models \item In particular, nonlinearities fully taken into account (\textit{e.g.} occasionally binding constraints) \end{itemize} \end{frame} \begin{frame} \frametitle{Outline} \tableofcontents \end{frame} \section{Presentation of the problem} \begin{frame} \frametitle{The (deterministic) neoclassical growth model} \begin{equation*} \max_{\{c_t\}_{t=1}^\infty}\sum_{t=1}^\infty \beta^{t-1}\frac{c_t^{1-\sigma}}{1-\sigma} \end{equation*} s.t. \begin{equation*} c_t+k_t = A_t k_{t-1}^\alpha + (1-\delta)k_{t-1} \end{equation*} First order conditions: \begin{align*} c_t^{-\sigma} &= \beta c_{t+1}^{-\sigma}\left(\alpha A_{t+1}k_t^{\alpha-1}+1-\delta\right)\\ c_t+k_t &= A_t k_{t-1}^\alpha + (1-\delta)k_{t-1} \end{align*} Steady state: \begin{align*} \bar k &= \left(\frac{1-\beta(1-\delta)}{\beta\alpha\bar A}\right)^{\frac{1}{\alpha-1}}\\ \bar c &= \bar A \bar k^\alpha -\delta\bar k \end{align*} Note the absence of stochastic elements! \\ No expectancy term, no probability distribution \end{frame} \begin{frame}[fragile] \frametitle{Dynare code (1/3)} \framesubtitle{\texttt{rcb\_basic.mod}} \begin{verbatim} var c k; varexo A; parameters alpha beta gamma delta; alpha=0.5; beta=0.95; gamma=0.5; delta=0.02; model; c + k = A*k(-1)^alpha + (1-delta)*k(-1); c^(-gamma) = beta*c(+1)^(-gamma)*(alpha*A(+1)*k^(alpha-1) + 1 - delta); end; \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{Dynare code (2/3)} \framesubtitle{\texttt{rcb\_basic.mod}} \begin{verbatim} // Steady state (analytically solved) initval; A = 1; k = ((1-beta*(1-delta))/(beta*alpha*A))^(1/(alpha-1)); c = A*k^alpha-delta*k; end; // Check that this is indeed the steady state steady; \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{Dynare code (3/3)} \framesubtitle{\texttt{rcb\_basic.mod}} \begin{verbatim} // Declare a positive technological shock in period 1 shocks; var A; periods 1; values 1.2; end; // Prepare the deterministic simulation over 100 periods perfect_foresight_setup(periods=100); // Perform the simulation perfect_foresight_solver; // Display the path of consumption rplot c; \end{verbatim} \end{frame} \begin{frame} \frametitle{Simulated consumption path} \includegraphics[width=16cm]{rplot_c.pdf} \end{frame} \begin{frame} \frametitle{The general problem} Deterministic, perfect foresight, case: \begin{equation*} f(y_{t+1},y_t,y_{t-1},u_t)=0 \end{equation*} \begin{description} \item[$y$]: vector of endogenous variables \item[$u$]: vector of exogenous shocks \end{description} \bigskip Identification rule: as many endogenous ($y$) as equations ($f$) \end{frame} \begin{frame} \frametitle{Return to the neoclassical growth model} \begin{center} $$y_t = \left(\begin{array}{c} c_t \\ k_t \end{array} \right)$$ $$u_t = A_t$$ \bigskip $$f(y_{t+1}, y_t, y_{t-1}, u_t) = \left(\begin{array}{l} c_t^{-\sigma} - \beta c_{t+1}^{-\sigma}\left(\alpha A_{t+1}k_t^{\alpha-1}+1-\delta\right)\\ c_t+k_t - A_t k_{t-1}^\alpha + (1-\delta)k_{t-1} \end{array}\right)$$ \end{center} \end{frame} \begin{frame} \frametitle{What if more than one lead or one lag?} \begin{itemize} \item A model with more than one lead or lag can be transformed in the form with one lead and one lag using \textit{auxiliary} variables \item Transformation done automatically by Dynare \item For example, if there is a variable with two leads $x_{t+2}$: \begin{itemize} \item create a new auxiliary variable $a$ \item replace all occurrences of $x_{t+2}$ by $a_{t+1}$ \item add a new equation: $a_t = x_{t+1}$ \end{itemize} \item Symmetric process for variables with more than one lag \item With future uncertainty, the transformation is more elaborate (but still possible) on variables with leads \end{itemize} \end{frame} \begin{frame} \frametitle{Steady state} \begin{itemize} \item A steady state, $\bar y$, for the model satisfies $f(\bar y, \bar y, \bar y, \bar u) = 0$ \item Note that a steady state is conditional to: \begin{itemize} \item The steady state values of exogenous variables $\bar{u}$ \item The value of parameters (implicit in the above definition) \end{itemize} \item Even for a given set of exogenous and parameter values, some (nonlinear) models have several steady states \item The steady state is computed by Dynare with the \texttt{steady} command \item That command internally uses a nonlinear solver \end{itemize} \end{frame} \begin{frame} \frametitle{A two-boundary value problem} \begin{itemize} \item Stacked system for a perfect foresight simulation over $T$ periods: \begin{equation*} \left\{\begin{array}{c} f(y_2,y_1,{\red y_0},u_1)=0\\ f(y_3,y_2,y_1,u_2)=0\\ \vdots\\ f({\red y_{T+1}},y_T,y_{T-1},u_T)=0 \end{array}\right. \end{equation*} for ${\red y_0}$ and ${\red y_{T+1}}$ given. \bigskip \item Compact representation: \begin{equation*} F(Y) = 0 \end{equation*} where $Y = \left[\begin{array}{llll}y_1'&y_2'&\ldots&y_T'\end{array}\right]'$ \\ and $y_0$, $y_{T+1}$, $u_1\ldots u_T$ are implicit \item Resolution uses a Newton-type method on the stacked system \end{itemize} \end{frame} \begin{frame}{Approximating infinite horizon problems} \begin{itemize} \item The above technique numerically computes trajectories for given shocks over a \emph{finite} number of periods \item Suppose you are rather interested in solving an \emph{infinite} horizon problem \item One option consists in computing the recursive policy function (as with perturbation methods), but this is challenging \begin{itemize} \item in the general case, this function is defined over an infinite-dimensional space (because all future shocks are state variables) \item in the particular case of a return to equilibrium, the state-space is finite (starting from the date where all shocks are zero), but a projection method would still be needed \item in any case, Dynare does not do that \end{itemize} \item An easier solution, in the case of a return to equilibrium, is to approximate it by a finite horizon problem \begin{itemize} \item consists in computing the trajectory with $y_{T+1}=\bar y$ and $T$ large enough \item drawback compared to the policy function approach: the solution is specific to a given sequence of shock, and not generic \end{itemize} \end{itemize} \end{frame} \section{Solution techniques} \begin{frame} \frametitle{The Newton method (unidimensional)} \medskip \only<1>{\includegraphics[width=11cm]{NewtonIteration_Ani-0.png}}\only<2>{\includegraphics[width=11cm]{NewtonIteration_Ani-1.png}}\only<3>{\includegraphics[width=11cm]{NewtonIteration_Ani-2.png}}\only<4>{\includegraphics[width=11cm]{NewtonIteration_Ani-3.png}}\only<5>{\includegraphics[width=11cm]{NewtonIteration_Ani-4.png}}\only<6>{\includegraphics[width=11cm]{NewtonIteration_Ani-5.png}}\only<7>{\includegraphics[width=11cm]{NewtonIteration_Ani-6.png}}\only<8>{\includegraphics[width=11cm]{NewtonIteration_Ani-7.png}}\only<9>{\includegraphics[width=11cm]{NewtonIteration_Ani-8.png}}\only<10>{\includegraphics[width=11cm]{NewtonIteration_Ani-9.png}}\only<11>{\includegraphics[width=11cm]{NewtonIteration_Ani-10.png}}\only<12>{\includegraphics[width=11cm]{NewtonIteration_Ani-11.png}}\only<13>{\includegraphics[width=11cm]{NewtonIteration_Ani-12.png}}\only<14>{\includegraphics[width=11cm]{NewtonIteration_Ani-13.png}}\only<15>{\includegraphics[width=11cm]{NewtonIteration_Ani-14.png}}\only<16>{\includegraphics[width=11cm]{NewtonIteration_Ani-15.png}}\only<17>{\includegraphics[width=11cm]{NewtonIteration_Ani-16.png}}\only<18>{\includegraphics[width=11cm]{NewtonIteration_Ani-17.png}} {\vspace*{-2mm} \tiny Copyright © 2005 Ralf Pfeifer / \href{http://creativecommons.org/licenses/by-sa/3.0/}{Creative Commons Attribution-ShareAlike 3.0}} \end{frame} \begin{frame}\frametitle{The Newton method (multidimensional)} \begin{itemize} \item Start from an initial guess $Y^{(0)}$ \item Iterate. Updated solutions $Y^{(k+1)}$ are obtained by solving a linear system: \begin{equation*} F(Y^{(k)}) + \left[\frac{\partial F}{\partial Y}\right]\left(Y^{(k+1)} -Y^{(k)}\right) = 0 \end{equation*} \item Terminal condition: \begin{equation*} ||Y^{(k+1)}-Y^{(k)}|| < \varepsilon_Y \end{equation*} or \begin{equation*} ||F(Y^{(k)})|| < \varepsilon_F \end{equation*} \item Convergence may never happen if function is ill-behaved or initial guess $Y^{(0)}$ too far from a solution \\ $\Rightarrow$ to avoid an infinite loop, abort after a given number of iterations \end{itemize} \end{frame} \begin{frame} \frametitle{Controlling the Newton algorithm from Dynare} The following options to the \texttt{perfect\_foresight\_solver} can be used to control the Newton algorithm: \begin{description} \item[\texttt{maxit}] Maximum number of iterations before aborting (default: 50) \item[\texttt{tolf}] Convergence criterion based on function value ($\varepsilon_F$) (default: $10^{-5}$) \item[\texttt{tolx}] Convergence criterion based on change in the function argument ($\varepsilon_Y$) (default: $10^{-5}$) \item[\texttt{stack\_solve\_algo}] select between the different flavors of Newton algorithms (see thereafter) \end{description} \end{frame} \begin{frame} \frametitle{A practical difficulty} The Jacobian can be very large: for a simulation over $T$ periods of a model with $n$ endogenous variables, it is a matrix of dimension $nT\times nT$. \bigskip Three alternative ways of dealing with the large problem size: \begin{itemize} \item Exploit the particular structure of the Jacobian using a relaxation technique developped by Laffargue, Boucekkine and Juillard (was the default method in Dynare $\leq$ 4.2) \item Handle the Jacobian as one large, sparse, matrix (now the default method) \item Block decomposition, which is a divide-and-conquer method, implemented by Mihoubi \end{itemize} \end{frame} \begin{frame} \frametitle{Shape of the Jacobian} $$\frac{\partial F}{\partial Y} = \begin{pmatrix} B_1 & C_1 & & & & & \\ A_2 & B_2 & C_2 & & & & \\ & \ddots & \ddots & \ddots & & & \\ & & A_t & B_t & C_t & & \\ & & & \ddots & \ddots & \ddots & \\ & & & & A_{T-1} & B_{T-1} & C_{T-1} \\ & & & & & A_T & B_T \end{pmatrix}$$ where $$A_s = \frac{\partial f}{\partial y_{t-1}}(y_{s+1},y_s,y_{s-1})$$ $$B_s = \frac{\partial f}{\partial y_t}(y_{s+1},y_s,y_{s-1})$$ $$C_s = \frac{\partial f}{\partial y_{t+1}}(y_{s+1},y_s,y_{s-1})$$ \end{frame} \begin{frame} \frametitle{Relaxation (1/5)} The idea is to triangularize the stacked system: \begin{small} \begin{equation*} \begin{pmatrix} B_1 & C_1 & & & & \\ A_2 & B_2 & C_2 & & & \\ & \ddots & \ddots & \ddots & & \\ & & \ddots & \ddots & \ddots & \\ & & & A_{T-1} & B_{T-1} & C_{T-1} \\ & & & & A_T & B_T \end{pmatrix} \Delta Y = -\begin{pmatrix} f(y_2,y_1,y_0,u_1)\\ f(y_3,y_2,y_1,u_2)\\ \vdots\\ \vdots\\ f(y_{T},y_{T-1},y_{T},u_{T-1}) \\ f(y_{T+1},y_T,y_{T-1},u_T) \end{pmatrix} \end{equation*} \end{small} \end{frame} \begin{frame} \frametitle{Relaxation (2/5)} First period is special: \begin{small} \begin{equation*} \begin{pmatrix} I & D_1 & & & & \\ & B_2-A_2D_1 & C_2 & & & \\ & A_3 & B_3 & C_3 & & \\ & & \ddots & \ddots & \ddots & \\ & & & A_{T-1} & B_{T-1} & C_{T-1} \\ & & & & A_T & B_T \end{pmatrix} \Delta Y = -\begin{pmatrix} d_1 \\ f(y_3,y_2,y_1,u_2)+A_2 d_1\\ f(y_4,y_3,y_2,u_3)\\ \vdots\\ f(y_{T},y_{T-1},y_{T},u_{T-1}) \\ f(y_{T+1},y_T,y_{T-1},u_T) \end{pmatrix} \end{equation*} \end{small} where \begin{itemize} \item $D_1 = B_1^{-1}C_1$ \item $d_1 = B_1^{-1}f(y_2,y_1,y_0,u_1)$ \end{itemize} \end{frame} \begin{frame} \frametitle{Relaxation (3/5)} Normal iteration: \begin{small} \begin{equation*} \begin{pmatrix} I & D_1 & & & & \\ & I & D_2 & & & \\ & & B_3-A_3D_2 & C_3 & & \\ & & \ddots & \ddots & \ddots & \\ & & & A_{T-1} & B_{T-1} & C_{T-1} \\ & & & & A_T & B_T \end{pmatrix} \Delta Y = -\begin{pmatrix} d_1 \\ d_2\\ f(y_4,y_3,y_2,u_3) + A_3d_2\\ \vdots\\ f(y_{T},y_{T-1},y_{T},u_{T-1}) \\ f(y_{T+1},y_T,y_{T-1},u_T) \end{pmatrix} \end{equation*} \end{small} where \begin{itemize} \item $D_2 = (B_2-A_2D_1)^{-1}C_2$ \item $d_2 = (B_2-A_2D_1)^{-1}(f(y_3,y_2,y_1,u_2)+A_2d_1)$ \end{itemize} \end{frame} \begin{frame} \frametitle{Relaxation (4/5)} Final iteration: \begin{small} \begin{equation*} \begin{pmatrix} I & D_1 & & & & \\ & I & D_2 & & & \\ & & I & D_3 & & \\ & & & \ddots & \ddots & \\ & & & & I & D_{T-1} \\ & & & & & I \end{pmatrix} \Delta Y = -\begin{pmatrix} d_1 \\ d_2\\ d_3\\ \vdots\\ d_{T-1} \\ d_T \end{pmatrix} \end{equation*} \end{small} where $$d_T = (B_T-A_TD_{T-1})^{-1}(f(y_{T+1},y_T,y_{T-1},u_T)+A_Td_{T-1})$$ \end{frame} \begin{frame} \frametitle{Relaxation (5/5)} \begin{itemize} \item The system is then solved by backward iteration: \begin{gather*} y_T^{k+1} = y_T^k - d_T \\ y_{T-1}^{k+1} = y_{T-1}^k - d_{T-1} - D_{T-1}(y_T^{k+1} - y_T^k) \\ \vdots \\ y_1^{k+1} = y_1^k - d_1 - D_1(y_2^{k+1} - y_2^k) \end{gather*} \item No need to ever store the whole Jacobian: only the $D_s$ and $d_s$ have to be stored \item This technique is memory efficient (was the default method in Dynare $\leq$ 4.2 for this reason) \item Still available as option \texttt{stack\_solve\_algo=6} of \texttt{perfect\_foresight\_solver} command \end{itemize} \end{frame} \begin{frame} \frametitle{Sparse matrices (1/3)} \begin{itemize} \item Consider the following matrix with most elements equal to zero: \begin{equation*} A = \left(\begin{matrix} 0 & 0 & 2.5 \\ -3 & 0 & 0 \\ 0 & 0 & 0 \end{matrix}\right) \end{equation*} \item Dense matrix storage (in column-major order) treats it as a one-dimensional array: $$[0, -3, 0, 0, 0, 0, 2.5, 0, 0]$$ \item Sparse matrix storage: \begin{itemize} \item views it as a list of triplets $(i,j,v)$ where $(i,j)$ is a matrix coordinate and $v$ a non-zero value \item $A$ would be stored as $$\{(2,1,-3), (1,3,2.5)\}$$ \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Sparse matrices (2/3)} \begin{itemize} \item In the general case, given an $m\times n$ matrix with $k$ non-zero elements: \begin{itemize} \item dense matrix storage = $8mn$ bytes \item sparse matrix storage = $16k$ bytes \item sparse storage more memory-efficient as soon as $k < mn/2$ \end{itemize} (assuming 32-bit integers and 64-bit floating point numbers) \item In practice, sparse storage becomes interesting if $k \ll mn/2$, because linear algebra algorithms are vectorized \end{itemize} \end{frame} \begin{frame} \frametitle{Sparse matrices (3/3)} \begin{itemize} \item The Jacobian of the deterministic problem is a sparse matrix: \begin{itemize} \item Lots of zero blocks \item The $A_s$, $B_s$ and $C_s$ usually are themselves sparse \end{itemize} \item Family of optimized algorithms for sparse matrices (including matrix inversion for our Newton algorithm) \item Available as native objects in MATLAB/Octave (see the \texttt{sparse} command) \item Works well for medium size deterministic models \item Nowadays more efficient than relaxation, even though it does not exploit the particular structure of the Jacobian \\ $\Rightarrow$ now the default method in Dynare (\texttt{stack\_solve\_algo=0}) \end{itemize} \end{frame} \begin{frame} \frametitle{Block decomposition (1/3)} \begin{itemize} \item Idea: apply a divide-and-conquer technique to model simulation \item Principle: identify recursive and simultaneous blocks in the model structure \item First block (prologue): equations that only involve variables determined by previous equations; example: AR(1) processes \item Last block (epilogue): pure output/reporting equations \item In between: simultaneous blocks, that depend recursively on each other \item The identification of the blocks is performed through a matching between variables and equations (normalization), then a reordering of both \end{itemize} \end{frame} \begin{frame} \frametitle{Block decomposition (2/3)} \framesubtitle{Form of the reordered Jacobian (equations in lines, variables in columns)} \includegraphics[width=11cm]{block.png} \end{frame} \begin{frame} \frametitle{Block decomposition (3/3)} \begin{itemize} \item Can provide a significant speed-up on large models \item Implemented in Dynare by Ferhat Mihoubi \item Available as option \texttt{block} to the \texttt{model} command \item Bigger gains when used in conjunction with \texttt{bytecode} option \end{itemize} \end{frame} \begin{frame} \frametitle{Homotopy} \begin{itemize} \item Another divide-and-conquer method, but in the shocks dimension \item Useful if shocks so large that convergence does not occur \item Idea: achieve convergence on smaller shock size, then use the result as starting point for bigger shock size \item Algorithm: \begin{enumerate} \item Starting point for simulation path: steady state at all $t$ \item $\lambda\leftarrow 0$: scaling factor of shocks (simulation succeeds when $\lambda=1$) \item $s \leftarrow 1$: step size \item \label{loop} Try to compute simulation with shocks scaling factor equal to $\lambda + s$ (using last successful computation as starting point) \begin{itemize} \item If success: $\lambda \leftarrow \lambda+s$. Stop if $\lambda=1$. Otherwise possibly increase $s$. \item If failure: diminish $s$. \end{itemize} \item Go to \ref{loop} \end{enumerate} \item Can be combined with any deterministic solver \item Used by default by \texttt{perfect\_foresight\_solver} (can be disabled with option \texttt{no\_homotopy}) \end{itemize} \end{frame} \section{Shocks: temporary/permanent, unexpected/pre-announced} \begin{frame} \frametitle{Example: neoclassical growth model with investment} The social planner problem is as follows: \begin{equation*} \label{eq:social-planner-problem:1} \max_{\{c_{t+j},\ell_{t+j},k_{t+j}\}_{j=0}^{\infty}} \mathbb{E}_t \sum_{j=0}^{\infty}\beta^ju(c_{t+j},\ell_{t+j}) \end{equation*} s.t. \begin{gather*} y_t = c_t + i_t\\ y_t = A_tf(k_{t-1},\ell_t)\\ k_t = i_t + (1-\delta)k_{t-1}\\ A_{t} = {A^{\star}}e^{a_{t}}\\ a_{t} = \rho\, a_{t-1} + \varepsilon_t \end{gather*} where $\varepsilon_t$ is an exogenous shock. \end{frame} \begin{frame} \frametitle{Specifications} \begin{itemize} \item Utility function: \begin{equation*} u(c_t,\ell_t) = \frac{\left(c_t^{\theta}(1-\ell_t)^{1-\theta}\right)^{1-\tau}}{1-\tau} \end{equation*} \item Production function: \begin{equation*} f(k_{t-1},\ell_t) = \left(\alpha k_{t-1}^{\psi} + (1-\alpha)\ell_t^{\psi}\right)^{\frac{1}{\psi}} \end{equation*} \end{itemize} \end{frame} \begin{frame} \frametitle{First order conditions} \begin{itemize} \item Euler equation: \begin{equation*} u_c(c_t,\ell_t) = \beta\, \mathbb E_t\left[ u_c(c_{t+1},\ell_{t+1})\Bigl(A_{t+1}f_k(k_t,\ell_{t+1}) + 1 -\delta\Bigr) \right] \end{equation*} \item Arbitrage between consumption and leisure: \begin{equation*} \frac{u_{\ell}(c_t,\ell_t)}{u_c(c_t,\ell_t)} + A_t f_{\ell}(k_{t-1},\ell_t) = 0 \end{equation*} \item Resource constraint: \begin{equation*} c_t + k_t = A_tf(k_{t-1},\ell_t) + (1-\delta)k_{t-1} \end{equation*} \end{itemize} \end{frame} \begin{frame}[fragile,fragile] \frametitle{Dynare code (1/3)} \begin{verbatim} var k, y, L, c, A, a; varexo epsilon; parameters beta, theta, tau, alpha, psi, delta, rho, Astar; beta = 0.9900; theta = 0.3570; tau = 2.0000; alpha = 0.4500; psi = -0.1000; delta = 0.0200; rho = 0.8000; Astar = 1.0000; \end{verbatim} \end{frame} \begin{frame}[fragile,fragile] \frametitle{Dynare code (2/3)} \begin{verbatim} model; a = rho*a(-1) + epsilon; A = Astar*exp(a); y = A*(alpha*k(-1)^psi+(1-alpha)*L^psi)^(1/psi); k = y-c + (1-delta)*k(-1); (1-theta)/theta*c/(1-L) - (1-alpha)*(y/L)^(1-psi); (c^theta*(1-L)^(1-theta))^(1-tau)/c = beta*(c(+1)^theta*(1-L(+1))^(1-theta))^(1-tau)/c(+1) *(alpha*(y(+1)/k)^(1-psi)+1-delta); end; \end{verbatim} \end{frame} \begin{frame}[fragile,fragile] \frametitle{Dynare code (3/3)} \scriptsize \begin{verbatim} steady_state_model; a = epsilon/(1-rho); A = Astar*exp(a); Output_per_unit_of_Capital=((1/beta-1+delta)/alpha)^(1/(1-psi)); Consumption_per_unit_of_Capital=Output_per_unit_of_Capital-delta; Labour_per_unit_of_Capital=(((Output_per_unit_of_Capital/A)^psi-alpha) /(1-alpha))^(1/psi); Output_per_unit_of_Labour=Output_per_unit_of_Capital/Labour_per_unit_of_Capital; Consumption_per_unit_of_Labour=Consumption_per_unit_of_Capital /Labour_per_unit_of_Capital; % Compute steady state of the endogenous variables. L=1/(1+Consumption_per_unit_of_Labour/((1-alpha)*theta/(1-theta) *Output_per_unit_of_Labour^(1-psi))); c=Consumption_per_unit_of_Labour*L; k=L/Labour_per_unit_of_Capital; y=Output_per_unit_of_Capital*k; end; \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{Scenario 1: Return to equilibrium} Return to equilibrium starting from $k_0 = 0.5 \bar k$. \begin{block}{Fragment from \texttt{rbc\_det1.mod}} \begin{verbatim} ... steady; ik = varlist_indices('k',M_.endo_names); kstar = oo_.steady_state(ik); histval; k(0) = kstar/2; end; perfect_foresight_setup(periods=300); perfect_foresight_solver; \end{verbatim} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{Scenario 2: A temporary shock to TFP} \begin{itemize} \item The economy starts from the steady state \item There is an unexpected negative shock at the beginning of period 1: $\varepsilon_1=-0.1$ \end{itemize} \begin{block}{Fragment from \texttt{rbc\_det2.mod}} \begin{verbatim} ... steady; shocks; var epsilon; periods 1; values -0.1; end; perfect_foresight_setup(periods=300); perfect_foresight_solver; \end{verbatim} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{Scenario 3: Pre-announced favorable shocks in the future} \begin{itemize} \item The economy starts from the steady state \item There is a sequence of positive shocks to $A_t$: 4\% in period 5 and an additional 1\% during the 4 following periods \end{itemize} \begin{block}{Fragment from \texttt{rbc\_det3.mod}} \begin{verbatim} ... steady; shocks; var epsilon; periods 4, 5:8; values 0.04, 0.01; end; perfect_foresight_setup(periods=300); perfect_foresight_solver; \end{verbatim} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{Scenario 4: A permanent shock} \begin{itemize} \item The economy starts from the initial steady state ($a_0=0$) \item In period 1, TFP increases by 5\% permanently (and this was unexpected) \end{itemize} \begin{block}{Fragment from \texttt{rbc\_det4.mod}} \begin{verbatim} ... initval; epsilon = 0; end; steady; endval; epsilon = (1-rho)*log(1.05); end; steady; \end{verbatim} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{Scenario 5: A pre-announced permanent shock} \begin{itemize} \item The economy starts from the initial steady state ($a_0=0$) \item In period 6, TFP increases by 5\% permanently \item A \texttt{shocks} block is used to maintain TFP at its initial level during periods 1--5 \end{itemize} \begin{block}{Fragment from \texttt{rbc\_det5.mod}} \begin{verbatim} ... // Same initval and endval blocks as in Scenario 4 ... shocks; var epsilon; periods 1:5; values 0; end; \end{verbatim} \end{block} \end{frame} \begin{frame} \frametitle{Summary of commands} \begin{description} \item[\texttt{initval}] for the initial steady state (followed by \texttt{steady}) \item[\texttt{endval}] for the terminal steady state (followed by \texttt{steady}) \item[\texttt{histval}] for initial or terminal conditions out of steady state \item[\texttt{shocks}] for shocks along the simulation path \item[\texttt{perfect\_foresight\_setup}] prepare the simulation \item[\texttt{perfect\_foresight\_solver}] compute the simulation \item[\texttt{simul}] do both operations at the same time (deprecated syntax, alias for \texttt{perfect\_foresight\_setup} + \texttt{perfect\_foresight\_solver}) \end{description} \end{frame} \begin{frame} \frametitle{Under the hood} \begin{itemize} \item The paths for exogenous and endogenous variables are stored in two MATLAB/Octave matrices: \begin{equation*} \begin{matrix} \texttt{oo\_.endo\_simul} & = & ( & y_0 & y_1 & \ldots & y_T & y_{T+1} & ) \\ \texttt{oo\_.exo\_simul'} & = & ( & \boxtimes & u_1 & \ldots & u_T & \boxtimes & ) \end{matrix} \end{equation*} \item \texttt{perfect\_foresight\_setup} initializes those matrices, given the \texttt{shocks}, \texttt{initval}, \texttt{endval} and \texttt{histval} blocks \begin{itemize} \item $y_0$, $y_{T+1}$ and $u_1 \ldots u_T$ are the constraints of the problem \item $y_1\ldots y_T$ are the initial guess for the Newton algorithm \end{itemize} \item \texttt{perfect\_foresight\_solver} replaces $y_1\ldots y_T$ in \texttt{oo\_.endo\_simul} by the solution \item Notes: \begin{itemize} \scriptsize \item for historical reasons, dates are in columns in \texttt{oo\_.endo\_simul} and in lines in \texttt{oo\_.exo\_simul}, hence the transpose (\texttt{'}) above \item this is the setup for no lead and no lag on exogenous \item if one lead and/or one lag, $u_0$ and/or $u_{T+1}$ would become relevant \item if more than one lead and/or lag, matrices would be larger \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Initial guess} The Newton algorithm needs an initial guess $Y^{(0)}= [ {y_1^{(0)}}'\;\ldots\; {y_T^{(0)}}' ]$. \bigskip What is Dynare using for this? \begin{itemize} \item By default, if there is no \texttt{endval} block, it is the steady state as specified by \texttt{initval} (repeated for all simulations periods) \item Or, if there is an \texttt{endval} block, then it is the final steady state declared within this block \item Possibility of customizing this default by manipulating \texttt{oo\_.endo\_simul} after \texttt{perfect\_foresight\_setup} (but of course before \texttt{perfect\_foresight\_solver}!) \item If homotopy is triggered, the initial guess of subsequent iterations is the result of the previous iteration \end{itemize} \end{frame} \begin{frame} \frametitle{Alternative way of specifying terminal conditions} \begin{itemize} \item With the \texttt{differentiate\_forward\_vars} option of the \texttt{model} block, Dynare will substitute forward variables using new auxiliary variables: \begin{itemize} \item Substitution: $x_{t+1} \rightarrow x_t + a_{t+1}$ \item New equation: $a_t = x_{t+1} - x_t$ \end{itemize} \item If the terminal condition is a steady state, the new auxiliary variables have obvious \emph{zero} terminal condition \item Useful when: \begin{itemize} \item the final steady state is hard to compute (this transformation actually provides a way to find it) \item the model is very persistent and takes time to go back to steady state (this transformation avoids a kink at the end of the simulation if $T$ is not large enough) \end{itemize} \end{itemize} \end{frame} \section{Occasionally binding constraints} \begin{frame} \frametitle{Zero nominal interest rate lower bound} \begin{itemize} \item Implemented by writing the law of motion under the following form in Dynare: \begin{equation*} i_t = \max\left\{0, (1-\rho_i)i^* + \rho_i i_{t-1}+ \rho_\pi(\pi_t-\pi^*) + \varepsilon^i_t\right\} \end{equation*} \item \textit{Warning:} this form will be accepted in a stochastic model, but the constraint will not be enforced in that case! \end{itemize} \end{frame} \begin{frame} \frametitle{Irreversible investment} Same model as above, but the social planner is constrained to positive investment paths: \begin{equation*} \max_{\{c_{t+j},\ell_{t+j},k_{t+j}\}_{j=0}^{\infty}} \sum_{j=0}^{\infty}\beta^ju(c_{t+j},\ell_{t+j}) \end{equation*} s.t. \begin{gather*} y_t = c_t + i_t\\ y_t = A_tf(k_{t-1},\ell_t)\\ k_t = i_t + (1-\delta)k_{t-1}\\ {\red i_t \geq 0}\\ A_{t} = {A^{\star}}e^{a_{t}}\\ a_{t} = \rho\, a_{t-1} + \varepsilon_t \end{gather*} where the technology ($f$) and the preferences ($u$) are as above. \end{frame} \begin{frame}[fragile] \frametitle{First order conditions} \begin{multline*} u_c(c_t,\ell_t) - \mu_t = \beta\, \mathbb E_t\big[ u_c(c_{t+1},\ell_{t+1})\left(A_{t+1}f_k(k_t,\ell_{t+1}) + 1 -\delta\right) \\ - \mu_{t+1}(1-\delta)\big] \end{multline*} \begin{gather*} \frac{u_{\ell}(c_t,\ell_t)}{u_c(c_t,\ell_t)} + A_tf_l(k_{t-1},\ell_t) = 0 \\ c_t + k_t = A_tf(k_{t-1},\ell_t) + (1-\delta)k_{t-1} \end{gather*} Slackness condition: \begin{equation*} \mu_t = 0 \texttt{ and } i_t \geq 0 \end{equation*} \begin{center} or \end{center} \begin{equation*} \mu_t > 0 \texttt{ and } i_t = 0 \end{equation*} where $\mu_t\geq 0$ is the Lagrange multiplier associated to the non-negativity constraint for investment. \end{frame} \begin{frame} \frametitle{Mixed complementarity problems} \begin{itemize} \item A mixed complementarity problem (MCP) is given by: \begin{itemize} \item function $F(x) : \mathbb{R}^n \rightarrow \mathbb{R}^n$ \item lower bounds $\ell_i \in \mathbb{R} \cup \{-\infty\}$ \item upper bounds $u_i \in \mathbb{R} \cup \{+\infty\}$ \end{itemize} \item A solution of the MCP is a vector $x\in\mathbb{R}^n$ such that for each $i\in\{1\ldots n\}$, one of the following alternatives holds: \begin{itemize} \item $\ell_i < x_i < u_i$ and $F_i(x) = 0$ \item $x_i = \ell_i$ and $F_i(x) \geq 0$ \item $x_i = u_i$ and $F_i(x) \leq 0$ \end{itemize} \item Notation: $$\ell \leq x \leq u\;\bot\;F(x)$$ \item Solving a square system of nonlinear equations is a particular case (with $\ell_i = -\infty$ and $u_i = +\infty$ for all $i$) \item Optimality problems with inequality constraints are naturally expressed as MCPs (finite bounds are imposed on Lagrange multipliers) \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{The irreversible investment model in Dynare} \begin{itemize} \item MCP solver triggered with option \texttt{lmmcp} of \texttt{perfect\_foresight\_solver} \item Slackness condition described by equation tag \texttt{mcp} \end{itemize} \begin{block}{Fragment from \texttt{rbcii.mod}} \begin{verbatim} (c^theta*(1-L)^(1-theta))^(1-tau)/c - mu = beta*((c(+1)^theta*(1-L(+1))^(1-theta))^(1-tau)/c(+1) *(alpha*(y(+1)/k)^(1-psi)+1-delta)-mu(+1)*(1-delta)); ... [ mcp = 'i > 0' ] mu = 0; ... perfect_foresight_setup(periods=400); perfect_foresight_solver(lmmcp, maxit=200); \end{verbatim} \end{block} \end{frame} \section{More unexpected shocks, extended path} \begin{frame} \frametitle{Simulating unexpected shocks} With a perfect foresight solver: \begin{itemize} \item shocks are unexpected in period 1 \item but in subsequent periods they are anticipated \end{itemize} \bigskip How to simulate an unexpected shock at a period $t > 1$? \begin{itemize} \item Do a perfect foresight simulation from periods $0$ to $T$ \emph{without the shock} \item Do another perfect foresight simulation from periods $t$ to $T$ \begin{itemize} \item applying the shock in $t$, \item and using the results of the first simulation as initial condition \end{itemize} \item Combine the two simulations: \begin{itemize} \item use the first one for periods $1$ to $t-1$, \item and the second one for $t$ to $T$ \end{itemize} \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{A Dynare example (1/2)} Simulation of a scenario with: \begin{itemize} \item Pre-announced (negative) shocks in periods 5 and 15 \item Unexpected (positive) shock in period 10 \end{itemize} \bigskip From \texttt{rbc\_unexpected.mod}: \begin{verbatim} ... // Declare pre-announced shocks shocks; var epsilon; periods 5, 15; values -0.1, -0.1; end; perfect_foresight_setup(periods=300); perfect_foresight_solver; \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{A Dynare example (2/2)} \begin{verbatim} // Declare unexpected shock (after first simulation!) oo_.exo_simul(11, 1) = 0.1; // Period 10 has index 11! // Strip first 9 periods and save them saved_endo = oo_.endo_simul(:, 1:9); // Save periods 0 to 8 saved_exo = oo_.exo_simul(1:9, :); oo_.endo_simul = oo_.endo_simul(:, 10:end); // Keep periods 9 to 301 oo_.exo_simul = oo_.exo_simul(10:end, :); periods 291; perfect_foresight_solver; // Combine the two simulations oo_.endo_simul = [ saved_endo oo_.endo_simul ]; oo_.exo_simul = [ saved_exo; oo_.exo_simul ]; \end{verbatim} \end{frame} \begin{frame} \frametitle{Consumption path} \includegraphics[width=11cm]{consumption_unexpected.png} \end{frame} \begin{frame} \frametitle{Extended path (EP) algorithm} \begin{itemize} \item Generalization of the previous method, where unexpected shocks may happen in all periods \item At every period, compute endogenous variables by running a deterministic simulation with: \begin{itemize} \item the previous period as initial condition \item the steady state as terminal condition \item a random shock drawn for the current period \item but no shock in the future \end{itemize} \item Advantages: \begin{itemize} \item shocks are unexpected \emph{at every period} \item nonlinearities fully taken into account \end{itemize} \item Inconvenient: solution under certainty equivalence (Jensen inequality is violated) \item Method introduced by Fair and Taylor (1983) \item Implemented under the command \texttt{extended\_path} (with option \texttt{order = 0}, which is the default) \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{Extended path in Dynare} From \texttt{rbc\_ep.mod}: \begin{verbatim} ... // Declare shocks as in a stochastic setup shocks; var epsilon; stderr 0.02; end; extended_path(periods=300); // Plot 20 first periods of consumption ic = varlist_indices('c',M_.endo_names); plot(oo_.endo_simul(ic, 1:21)); \end{verbatim} \end{frame} \begin{frame} \frametitle{$k$-step ahead extended path} \begin{itemize} \item Accuracy can be improved by computing conditional expectation by quadrature, computing next period endogenous variables with the previous algorithm \item Approximation: at date $t$, agents assume that there will be no more shocks after period $t+k$ (hence $k$ measures the degree of future uncertainty taken into account) \item If $k=1$: one-step ahead EP; no more certainty equivalence \item By recurrence, one can compute a $k$-step ahead EP: even more uncertainty taken into account \item Difficulty: computing complexity grows exponentially with $k$ \item $k$-step ahead EP triggered with option \texttt{order = k} of \texttt{extended\_path} command \end{itemize} \end{frame} \section{Dealing with nonlinearities using higher order approximation of stochastic models} \begin{frame} \frametitle{Local approximation of stochastic models} The general problem: \begin{equation*} \mathbb{E}_t f(y_{t+1},y_t,y_{t-1},u_t)=0 \end{equation*} \begin{description} \item[$y$]: vector of endogenous variables \item[$u$]: vector of exogenous shocks \end{description} with: \begin{eqnarray*} \mathbb{E}(u_t) &=& 0\\ \mathbb{E}(u_tu_t') &=& \Sigma_u\\ \mathbb{E}(u_tu_s') &=& 0\;\text{ for } t\neq s \end{eqnarray*} \end{frame} \begin{frame} \frametitle{What is a solution to this problem?} \begin{itemize} \item A solution is a policy function of the form: \begin{equation*} y_t = g\left(y_{t-1},u_t,\sigma\right) \end{equation*} where $\sigma$ is the \emph{stochastic scale} of the problem and: \begin{equation*} u_{t+1} = \sigma\,\varepsilon_{t+1} \end{equation*} \item The policy function must satisfy: \begin{equation*} \mathbb{E}_t f\left(g\left( g\left(y_{t-1},u_t,\sigma\right),u_{t+1},\sigma\right), g\left(y_{t-1},u_t,\sigma\right),y_{t-1},u_t\right)=0 \end{equation*} \end{itemize} \end{frame} \begin{frame} \frametitle{Local approximations} \begin{align*} \hat{g}^{(1)}\left(y_{t+1},u_t,\sigma\right) &= \bar y + g_y\hat y_{t-1} + g_u u_t\\ \hat{g}^{(2)}\left(y_{t+1},u_t,\sigma\right) &= \bar y + {\red \frac{1}{2} g_{\sigma\sigma}}+g_y\hat y_{t-1} + g_u u_t\\ &\quad + \frac{1}{2}\left(g_{yy}\left(\hat y_{t-1}\otimes\hat y_{t-1}\right)+g_{uu}\left(u_t\otimes u_t\right)\right)\\ &\quad +g_{yu}\left(\hat y_{t-1}\otimes u_t\right)\\ \hat{g}^{(3)}\left(y_{t+1},u_t,\sigma\right) &= \bar y + {\red \frac{1}{2} g_{\sigma\sigma}+ \frac{1}{6} g_{\sigma\sigma\sigma}+\frac{1}{2}g_{\sigma\sigma y}\hat y_{t-1}+\frac{1}{2}g_{\sigma\sigma u} u_t}\\ & \quad +g_y\hat y_{t-1} + g_u u_t + \ldots \end{align*} \end{frame} \begin{frame}[fragile] \frametitle{Breaking certainty equivalence (1/2)} The combination of future uncertainty (future shocks) and nonlinear relationships makes for precautionary motives or risk premia. \begin{itemize} \item 1\textsuperscript{st} order: certainty equivalence; today's decisions don't depend on future uncertainty \item 2\textsuperscript{nd} order: \begin{align*} \hat{g}^{(2)}\left(y_{t+1},u_t,\sigma\right) &= \bar y + {\red \frac{1}{2} g_{\sigma\sigma}}+g_y\hat y_{t-1} + g_u u_t\\ &\quad + \frac{1}{2}\left(g_{yy}\left(\hat y_{t-1}\otimes\hat y_{t-1}\right)+g_{uu}\left(u_t\otimes u_t\right)\right)\\ &\quad +g_{yu}\left(\hat y_{t-1}\otimes u_t\right) \end{align*} Risk premium is a constant: ${\red \frac{1}{2} g_{\sigma\sigma}}$ \end{itemize} \end{frame} \begin{frame} \frametitle{Breaking certainty equivalence (2/2)} \begin{itemize} \item 3\textsuperscript{rd} order: \begin{multline*} \hat{g}^{(3)}\left(y_{t+1},u_t,\sigma\right) = \bar y + {\red \frac{1}{2} g_{\sigma\sigma}+ \frac{1}{6} g_{\sigma\sigma\sigma}+\frac{1}{2}g_{\sigma\sigma y}\hat y_{t-1}+\frac{1}{2}g_{\sigma\sigma u} u_t}\\ +g_y\hat y_{t-1} + g_u u_t + \ldots \end{multline*} Risk premium is linear in the state variables: \begin{equation*} {\red \frac{1}{2} g_{\sigma\sigma}+ \frac{1}{6} g_{\sigma\sigma\sigma}+\frac{1}{2}g_{\sigma\sigma y}\hat y_{t-1}+\frac{1}{2}g_{\sigma\sigma u} u_t} \end{equation*} \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{The cost of local approximations} \begin{enumerate} \item High order approximations are accurate around the steady state, and more so than lower order approximations \item But can be totally wrong far from the steady state (and may be more so than lower order approximations) \item Error of approximation of a solution $\hat{g}$, at a given point of the state space $(y_{t-1}, u_t)$: \begin{multline*} \mathcal{E}\left(y_{t-1},u_t\right) =\\ \mathbb{E}_t f\left(\hat g\left(\hat g\left(y_{t-1},u_t,\sigma\right),u_{t+1},\sigma\right),\hat g\left(y_{t-1},u_t,\sigma\right),y_{t-1},u_t\right) \end{multline*} \item Necessity for pruning \end{enumerate} \end{frame} \begin{frame} \frametitle{Approximation of occasionally binding constraints with penalty functions} The investment positivity constraint is translated into a penalty on the welfare: \begin{equation*} \max_{\{c_{t+j},\ell_{t+j},k_{t+j}\}_{j=0}^{\infty}} \sum_{j=0}^{\infty}\beta^j u(c_{t+j},\ell_{t+j})+h\cdot\log(i_{t+j}) \end{equation*} s.t. \begin{gather*} y_t = c_t + i_t\\ y_t = A_tf(k_{t-1},\ell_t)\\ k_t = i_t + (1-\delta)k_{t-1}\\ A_{t} = {A^{\star}}e^{a_{t}}\\ a_{t} = \rho\, a_{t-1} + \varepsilon_t \end{gather*} where the technology ($f$) and the preferences ($u$) are as before, and $h$ governs the strength of the penalty (\emph{barrier parameter}) \end{frame} \begin{frame} \begin{center} \vfill {\LARGE Thanks for your attention!} \vfill {\LARGE Questions?} \vfill \end{center} \vfill \begin{columns}[T] \column{0.2\textwidth} \column{0.09\textwidth} \ccbysa \column{0.71\textwidth} \tiny Copyright © 2015-2019 Dynare Team \\ License: \href{http://creativecommons.org/licenses/by-sa/4.0/}{Creative Commons Attribution-ShareAlike 4.0} \end{columns} \end{frame} \end{document}