Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • giovanma/dynare
  • giorgiomas/dynare
  • Vermandel/dynare
  • Dynare/dynare
  • normann/dynare
  • MichelJuillard/dynare
  • wmutschl/dynare
  • FerhatMihoubi/dynare
  • sebastien/dynare
  • lnsongxf/dynare
  • rattoma/dynare
  • CIMERS/dynare
  • FredericKarame/dynare
  • SumuduK/dynare
  • MinjeJeon/dynare
  • camilomrch/dynare
  • DoraK/dynare
  • avtishin/dynare
  • selma/dynare
  • claudio_olguin/dynare
  • jeffjiang07/dynare
  • EthanSystem/dynare
  • stepan-a/dynare
  • wjgatt/dynare
  • JohannesPfeifer/dynare
  • gboehl/dynare
  • chskcau/dynare-doc-fixes
27 results
Select Git revision
Show changes
Showing
with 2531 additions and 2676 deletions
# -*- coding: utf-8 -*-
# Copyright © 2018-2024 Dynare Team
#
# This file is part of Dynare.
#
# Dynare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dynare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dynare. If not, see <https://www.gnu.org/licenses/>.
"""
sphinx.domains.dynare
~~~~~~~~~~~~~~~~~~~~~
The Dynare domain.
Loosely based on the JavaScript domain from the Sphinx team and the CoffeScript domain
by Stephen Sugden (available at sphinx-contrib)
"""
import re
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.domains import Domain, ObjType
from sphinx.locale import _
from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, GroupedField, TypedField
############### Dynare Object types #######################
class DynObject(ObjectDescription):
has_arguments = True
display_prefix = None
allow_nesting = False
def handle_signature(self, sig, signode):
sig = sig.strip()
# Some variable/parameter declarations combine spaces and parenthesis
# in a strange way, so they are treated separately
if sig.startswith(('var V','varexo V','varexo_det V','parameters P','model_comparison F')):
member = sig[:sig.index(' ')]
arglist = sig[sig.index(' '):]
# General cases
elif '(' in sig:
member = sig[:sig.index('(')]
arglist = sig[sig.index('('):]
arglist = arglist.strip()
elif ' ' in sig:
member = sig[:sig.index(' ')]
arglist = sig[sig.index(' '):]
else:
member = sig
arglist = None
prefix = self.env.ref_context.get('dynare:object', None)
name = member.strip()
fullname = name
signode['object'] = prefix
signode['fullname'] = fullname
if self.display_prefix:
signode += addnodes.desc_annotation(self.display_prefix, self.display_prefix)
signode += addnodes.desc_name(name, name)
if self.has_arguments:
if arglist:
signode += addnodes.desc_addname(arglist,arglist)
return fullname, prefix
def add_target_and_index(self, name_obj, sig, signode):
fullname = name_obj[0]
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = not self.names
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['dynare']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
str(self.env.doc2path(objects[fullname][0])),line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(fullname,name_obj)
if indextext:
self.indexnode['entries'].append(('single', indextext, fullname,'', None))
def get_index_text(self, objectname, name_obj):
name, obj = name_obj
regexp = re.compile(r'\s*=\s*')
if bool(regexp.search(name)):
aux, name = re.compile(r'\s*=\s*').split(name)
if self.objtype == 'function':
return _('%s (function)') % name
elif self.objtype == 'class':
return _('%s (class)') % name
elif self.objtype == 'datesmethod':
return _('%s (dates method)') % name
elif self.objtype == 'dseriesmethod':
return _('%s (dseries method)') % name
elif self.objtype == 'x13method':
return _('%s (x13 method)') % name
elif self.objtype == 'reportingmethod':
return _('%s (reporting method)') % name
elif self.objtype == 'matcomm':
return _('%s (MATLAB command)') % name
elif self.objtype == 'command':
return _('%s (command)') % name
elif self.objtype == 'block':
return _('%s (block)') % name
elif self.objtype == 'confblock':
name = name[1:-1]
return _('%s (config block)') % name
elif self.objtype == 'macrodir':
name = name[2:]
return _('%s (macro directive)') % name
class DynCallable(DynObject):
has_arguments = True
doc_field_types = [
TypedField('arguments', label=_('Arguments'),
names=('argument', 'arg', 'parameter', 'param'),
typerolename='func', typenames=('paramtype', 'type')),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
Field('example', label=_('Example'), has_arg=False,
names=('ex',)),
]
class DynClass(DynObject):
has_arguments = False
display_prefix = 'Dynare class: '
allow_nesting = True
doc_field_types = [
TypedField('members', label=_('Members'),
names=('argument', 'arg', ),
typerolename='func', typenames=('type', )),
Field('example', label=_('Example'), has_arg=False,
names=('ex',)),
]
class DynFunction(DynCallable):
display_prefix = 'Function: '
allow_nesting = True
class DatesMethod(DynCallable):
display_prefix = 'Method: '
allow_nesting = True
class DseriesMethod(DynCallable):
display_prefix = 'Method: '
allow_nesting = True
class X13Method(DynCallable):
display_prefix = 'Method: '
allow_nesting = True
class ReportingMethod(DynCallable):
display_prefix = 'Method: '
allow_nesting = True
class MatComm(DynCallable):
display_prefix = 'MATLAB/Octave command: '
allow_nesting = False
class DynComm(DynCallable):
display_prefix = 'Command: '
allow_nesting = False
class DynBlock(DynCallable):
display_prefix = 'Block: '
allow_nesting = False
class DynConfBlock(DynCallable):
display_prefix = 'Configuration block: '
has_arguments = False
allow_nesting = False
class DynMacroDir(DynCallable):
display_prefix = 'Macro directive: '
allow_nesting = False
class Constructor(DynCallable):
display_prefix = 'Constructor: '
allow_nesting = False
class DynSimpleObject(ObjectDescription):
has_arguments = False
allow_nesting = False
def handle_signature(self, sig, signode):
sig = sig.strip()
member = sig
arglist = None
prefix = self.env.ref_context.get('dynare:object', None)
name = member
fullname = name
signode['object'] = prefix
signode['fullname'] = fullname
if self.display_prefix:
signode += addnodes.desc_annotation(self.display_prefix, self.display_prefix)
signode += addnodes.desc_name(name, name)
return fullname, prefix
def add_target_and_index(self, name_obj, sig, signode):
fullname = name_obj[0]
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = not self.names
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['dynare']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
str(self.env.doc2path(objects[fullname][0])), line=self.lineno)
objects[fullname] = self.env.docname, self.objtype
indextext = self.get_index_text(fullname,name_obj)
if indextext:
self.indexnode['entries'].append(('single', indextext, fullname,'', None))
def get_index_text(self, objectname, name_obj):
name, obj = name_obj
if self.objtype == 'construct':
name, rest = name.split(' ',1)
return _('%s (constructor)') % name
elif self.objtype == 'matvar':
return _('%s (MATLAB variable)') % name
elif self.objtype == 'specvar':
return _('%s (special variable)') % name
elif self.objtype == 'operator':
endsig = name.find(' ')
name = name[0:endsig]
return _('%s (operator)') % name
elif self.objtype == 'constant':
return _('%s (constant)') % name
class MatlabVar(DynSimpleObject):
display_prefix = 'MATLAB/Octave variable: '
allow_nesting = False
class SpecialVar(MatlabVar):
display_prefix = 'Special variable: '
class Operator(MatlabVar):
display_prefix = 'Operator: '
class Constant(MatlabVar):
display_prefix = 'Constant: '
class Option(MatlabVar):
display_prefix = None
############## Cross-referencing ####################
class DynareXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['dynare:object'] = env.ref_context.get('dynare:object')
return title, target
############### Dynare domain #######################
class DynareDomain(Domain):
name = 'dynare'
label = 'Dynare'
object_types = {
'function': ObjType(_('function'), 'func'),
'datesmethod': ObjType(_('method'), 'datmeth'),
'dseriesmethod': ObjType(_('method'), 'dsermeth'),
'x13method': ObjType(_('method'), 'x13meth'),
'reportingmethod': ObjType(_('method'), 'repmeth'),
'matcomm': ObjType(_('matlab command'), 'mcomm'),
'command': ObjType(_('command'), 'comm'),
'class': ObjType(_('class'), 'class'),
'block': ObjType(_('block'), 'bck'),
'confblock': ObjType(_('config block'), 'cbck'),
'macrodir': ObjType(_('macro directive'), 'mdir'),
'construct': ObjType(_('constructor'), 'cstr'),
'matvar': ObjType(_('matlab variable'), 'mvar'),
'specvar': ObjType(_('special variable'), 'svar'),
'operator': ObjType(_('operator'), 'op'),
'constant': ObjType(_('constant'), 'const'),
'option': ObjType(_('option'), 'opt'),
}
directives = {
'function': DynFunction,
'datesmethod': DatesMethod,
'dseriesmethod': DseriesMethod,
'x13method': X13Method,
'reportingmethod': ReportingMethod,
'matcomm': MatComm,
'command': DynComm,
'class': DynClass,
'block': DynBlock,
'confblock': DynConfBlock,
'macrodir': DynMacroDir,
'construct': Constructor,
'matvar': MatlabVar,
'specvar': SpecialVar,
'operator': Operator,
'constant': Constant,
'option': Option,
}
roles = {
'func': DynareXRefRole(),
'datmeth': DynareXRefRole(),
'dsermeth': DynareXRefRole(),
'x13meth': DynareXRefRole(),
'repmeth': DynareXRefRole(),
'mcomm': DynareXRefRole(),
'comm': DynareXRefRole(),
'class': DynareXRefRole(),
'bck': DynareXRefRole(),
'cbck': DynareXRefRole(),
'mdir': DynareXRefRole(),
'cstr': DynareXRefRole(),
'mvar': DynareXRefRole(),
'svar': DynareXRefRole(),
'op': DynareXRefRole(),
'const': DynareXRefRole(),
'opt': DynareXRefRole(),
}
initial_data = {
'objects': {},
}
def clear_doc(self, docname):
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def find_obj(self, env, obj, name, typ, searchorder=0):
objects = self.data['objects']
newname = name # None
return newname, objects.get(newname)
def merge_domaindata(self, docnames, otherdata):
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
objectname = node.get('dynare:object')
searchorder = node.hasattr('refspecific') and 1 or 0
name, obj = self.find_obj(env, objectname, target, typ, searchorder)
if not obj:
return None
return make_refnode(builder, fromdocname, obj[0], name, contnode, name)
def get_objects(self):
for refname, (docname, type) in list(self.data['objects'].items()):
yield refname, refname, type, docname, refname, 1
# Copyright © 2018-2021 Dynare Team
#
# This file is part of Dynare.
#
# Dynare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dynare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dynare. If not, see <https://www.gnu.org/licenses/>.
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
# Configuration block :BOLD
#conf_blocks = ('hooks','paths','cluster','node')
__all__ = ['DynareLexer']
class DynareLexer(RegexLexer):
name = 'Dynare'
aliases = ['dynare']
filenames = ['*.mod']
commands = (
"dynare","var","varexo","varexo_det","parameters","change_type","model_local_variable",
"predetermined_variables","trend_var","log_trend_var","external_function",
"write_latex_original_model","write_latex_dynamic_model",
"write_latex_static_model","write_latex_steady_state_model","resid","initval_file","histval_file","dsample",
"periods","values","scales","corr","stderr","steady","check","model_diagnostics","model_info",
"print_bytecode_dynamic_model"," print_bytecode_static_model",
"perfect_foresight_setup","perfect_foresight_solver","simul","stoch_simul",
"extended_path","varobs","estimation","unit_root_vars","bvar_density",
"model_comparison","shock_decomposition","realtime_shock_decomposition",
"plot_shock_decomposition","calib_smoother","forecast",
"conditional_forecast","plot_conditional_forecast","bvar_forecast",
"smoother2histval","osr","osr_params","ramsey_model","ramsey_policy",
"discretionary_policy","planner_objective","dynare_sensitivity",
"markov_switching","svar","sbvar","ms_estimation","ms_simulation",
"ms_compute_mdd","ms_compute_probabilities","ms_irf","ms_forecast",
"ms_variance_decomposition","rplot","dynatype","dynasave","set_dynare_seed",
"save_params_and_steady_state","load_params_and_steady_state",
"dynare_version","write_latex_definitions","write_latex_parameter_table",
"write_latex_prior_table","collect_latex_files","prior_function",
"posterior_function","generate_trace_plots","evaluate_planner_objective",
"occbin_setup","occbin_solver","occbin_write_regimes","occbin_graph","method_of_moments",
"var_model","trend_component_model","var_expectation_model","pac_model")
report_commands = ("report","addPage","addSection","addGraph","addTable",
"addSeries","addParagraph","addVspace","write","compile")
operators = (
"STEADY_STATE","EXPECTATION","var_expectation","pac_expectation","pac_target_nonstationary")
macro_dirs = (
"@#includepath", "@#include", "@#define", "@#if",
"@#ifdef", "@#ifndef", "@#else","@#endif",
"@#for", "@#endfor", "@#echo", "@#error")
builtin_constants = (
"inf", "nan")
tokens = {
'root': [
(r'\s*(%|//).*$', Comment.Single),
(r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
(words((
'model','steady_state_model','initval','endval','histval','epilogue',
'shocks','mshocks','homotopy_setup','observation_trends',
'estimated_params','estimated_params_init','estimated_params_bounds',
'shock_groups','conditional_forecast_paths','optim_weights',
'osr_params_bounds','ramsey_constraints','irf_calibration',
'moment_calibration','identification','svar_identification',
'matched_moments','occbin_constraints','surprise','overwrite','bind','relax',
'verbatim','end','node','cluster','paths','hooks','target','pac_target_info','auxname_target_nonstationary',
'component', 'growth', 'auxname', 'kind'), prefix=r'\b', suffix=r'\s*\b'),Keyword.Reserved),
# FIXME: Commands following multiline comments are not highlighted properly.
(words(commands + report_commands,
prefix=r'\b', suffix=r'\s*\b'), Name.Entity),
(words(operators), Operator.Word),
(words(macro_dirs,suffix=r'\s*'), Name.Function),
(words(builtin_constants), Name.Constant),
(r'\s*[a-zA-Z_]\s*', Name),
(r'\s*(\d+\.\d+|\d*\.\d+)([eEf][+-]?[0-9]+)?\s*', Number.Float),
(r'\s*\d+[eEf][+-]?[0-9]+\s*', Number.Float),
(r'\s*\d+\s*', Number.Integer),
(r'"[^"]*"', String),
(r"`[^`]*'", String),
(r"'[^']*'", String),
(r'\s*(-|\+|\*|\/|\^)\s*', Operator),
(r'\s*(==|<=|>=|~=|<|>|&&|!)\s*', Operator),
(r'\s*[\[\](){}:@.,\|]\s*', Punctuation),
(r'\s*(=|:|;|>>|#|\$)\s*', Punctuation),
]
}
if HAVE_PDFLATEX
if HAVE_BIBTEX
pdf-local: parallel.pdf
endif
endif
SRC = AvenueParadigm.pdf iVaNo_gain.pdf iVaNo_time_comp.pdf marco.bib \
netbook_complete_comp.pdf netbook_complete_openclose.pdf \
netbook_partial_comp.pdf netbook_partial_openclose.pdf parallel.tex \
quest_complete_comp.pdf quest_complete_openclose.pdf quest_partial_comp.pdf \
quest_partial_openclose.pdf RWMH_quest1_PriorsAndPosteriors1Comp.pdf \
RWMH_quest1_PriorsAndPosteriors2Comp.pdf \
RWMH_quest1_PriorsAndPosteriors3Comp.pdf \
RWMH_quest1_PriorsAndPosteriors4Comp.pdf \
RWMH_quest1_PriorsAndPosteriors5Comp.pdf \
RWMH_quest1_PriorsAndPosteriors6Comp.pdf \
RWMH_quest1_PriorsAndPosteriors7Comp.pdf waitbars1.pdf waitbars2.pdf \
waitbarsP.pdf
EXTRA_DIST = $(SRC)
parallel.pdf: $(SRC)
$(PDFLATEX) parallel
$(BIBTEX) parallel
$(PDFLATEX) parallel
$(PDFLATEX) parallel
$(PDFLATEX) parallel
clean-local:
rm -f *.log *.aux *.toc *.blg *.bbl *.out
rm -f parallel.pdf
% ---------------------------------------------------------------- % ----------------------------------------------------------------
% AMS-LaTeX Paper ************************************************ % AMS-LaTeX Paper ************************************************
% **** ----------------------------------------------------------- % **** -----------------------------------------------------------
\documentclass[12pt,a4paper,pdftex,nofootinbib]{article} \documentclass[12pt,a4paper,pdftex]{article}
\usepackage[margin=2.5cm]{geometry}
\usepackage[utf8]{inputenc}
\usepackage{amssymb,amsmath} \usepackage{amssymb,amsmath}
\usepackage[pdftex]{graphicx} \usepackage{graphicx}
\usepackage{epstopdf} \usepackage{epstopdf}
\usepackage{natbib} \usepackage{natbib}
\usepackage{verbatim} \usepackage{verbatim}
\usepackage[pdftex]{color} \usepackage{xcolor}
\usepackage{psfrag} \usepackage{psfrag}
\usepackage{setspace} \usepackage{setspace}
\usepackage{rotating} \usepackage{rotating}
...@@ -48,6 +50,15 @@ ...@@ -48,6 +50,15 @@
\def \supp{{\rm supp}} \def \supp{{\rm supp}}
\def \var{{\rm var}} \def \var{{\rm var}}
\usepackage[pdfpagelabels]{hyperref}
\hypersetup{
pdfproducer = {LaTeX},
colorlinks,
linkcolor=blue,
filecolor=yellow,
urlcolor=green,
citecolor=green}
% ---------------------------------------------------------------- % ----------------------------------------------------------------
\begin{document} \begin{document}
...@@ -178,7 +189,7 @@ We have considered the following DYNARE components suitable to be parallelized u ...@@ -178,7 +189,7 @@ We have considered the following DYNARE components suitable to be parallelized u
\item the Random Walk- (and the analogous Independent-)-Metropolis-Hastings algorithm with multiple chains: the different chains are completely independent and do not require any communication between them, so it can be executed on different cores/CPUs/Computer Network easily; \item the Random Walk- (and the analogous Independent-)-Metropolis-Hastings algorithm with multiple chains: the different chains are completely independent and do not require any communication between them, so it can be executed on different cores/CPUs/Computer Network easily;
\item a number of procedures performed after the completion of Metropolis, that use the posterior MC sample: \item a number of procedures performed after the completion of Metropolis, that use the posterior MC sample:
\begin{enumerate} \begin{enumerate}
\item the diagnostic tests for the convergence of the Markov Chain \\(\texttt{McMCDiagnostics.m}); \item the diagnostic tests for the convergence of the Markov Chain \\(\texttt{mcmc\_diagnostics.m});
\item the function that computes posterior IRF's (\texttt{posteriorIRF.m}). \item the function that computes posterior IRF's (\texttt{posteriorIRF.m}).
\item the function that computes posterior statistics for filtered and smoothed variables, forecasts, smoothed shocks, etc.. \\ (\verb"prior_posterior_statistics.m"). \item the function that computes posterior statistics for filtered and smoothed variables, forecasts, smoothed shocks, etc.. \\ (\verb"prior_posterior_statistics.m").
\item the utility function that loads matrices of results and produces plots for posterior statistics (\texttt{pm3.m}). \item the utility function that loads matrices of results and produces plots for posterior statistics (\texttt{pm3.m}).
...@@ -247,7 +258,7 @@ The configuration file is designed as follows: ...@@ -247,7 +258,7 @@ The configuration file is designed as follows:
The list of slave options includes: The list of slave options includes:
\begin{description} \begin{description}
\item[Name]: name of the node; \item[Name]: name of the node;
\item[CPUnbr]: this is the number of CPU's to be used on that computer; if \verb"CPUnbr" is a vector of integers, the syntax is \verb"[s:d]", with \verb"d>=s" (\verb"d, s" are integer); the first core has number 1 so that, on a quad-core, use \verb"4" to use all cores, but use \verb [3:4] to specify just the last two cores (this is particularly relevant for Windows where it is possible to assign jobs to specific processors); \item[CPUnbr]: this is the number of CPU's to be used on that computer; if \verb"CPUnbr" is a vector of integers, the syntax is \verb"[s:d]", with \verb"d>=s" (\verb"d, s" are integer); the first core has number 1 so that, on a quad-core, use \verb"4" to use all cores, but use \verb"[3:4]" to specify just the last two cores (this is particularly relevant for Windows where it is possible to assign jobs to specific processors);
\item[ComputerName]: Computer name on the network or IP address; use the NETBIOS name under Windows\footnote{In Windows XP it is possible find this name in 'My Computer' $->$ mouse right click $->$ 'Property' $->$ 'Computer Name'.}, or the DNS name under Unix.; \item[ComputerName]: Computer name on the network or IP address; use the NETBIOS name under Windows\footnote{In Windows XP it is possible find this name in 'My Computer' $->$ mouse right click $->$ 'Property' $->$ 'Computer Name'.}, or the DNS name under Unix.;
\item[UserName]: required for remote login; in order to assure proper communications between the master and the slave threads, it must be the same user name actually logged on the `master' machine. On a Windows network, this is in the form \verb"DOMAIN\username", like \verb"DEPT\JohnSmith", i.e. user JohnSmith in windows group DEPT; \item[UserName]: required for remote login; in order to assure proper communications between the master and the slave threads, it must be the same user name actually logged on the `master' machine. On a Windows network, this is in the form \verb"DOMAIN\username", like \verb"DEPT\JohnSmith", i.e. user JohnSmith in windows group DEPT;
\item[Password]: required for remote login (only under Windows): it is the user password on \verb"DOMAIN" and \verb"ComputerName"; \item[Password]: required for remote login (only under Windows): it is the user password on \verb"DOMAIN" and \verb"ComputerName";
...@@ -348,7 +359,7 @@ Finally, the DYNARE command line options are: ...@@ -348,7 +359,7 @@ Finally, the DYNARE command line options are:
\item \verb"parallel": trigger the parallel computation using the first cluster specified in config file \item \verb"parallel": trigger the parallel computation using the first cluster specified in config file
\item \verb"parallel=<clustername>": trigger the parallel computation, using the given cluster \item \verb"parallel=<clustername>": trigger the parallel computation, using the given cluster
\item \verb"parallel_slave_open_mode": use the leaveSlaveOpen mode in the cluster \item \verb"parallel_slave_open_mode": use the leaveSlaveOpen mode in the cluster
\item \verb"parallel_test": just test the cluster, don’t actually run the MOD file \item \verb"parallel_test": just test the cluster, don't actually run the MOD file
\end{itemize} \end{itemize}
...@@ -615,10 +626,10 @@ MatlabOctavePath = matlab ...@@ -615,10 +626,10 @@ MatlabOctavePath = matlab
In this section we describe what happens when the user omits a mandatory entry or provides bad values for them and how DYNARE reacts in these cases. In the parallel package there is a utility (\verb"AnalyseComputationalEnvironment.m") devoted to this task (this is triggered by the command line option \verb"parallel_test"). When necessary during the discussion, we use the \verb"parallel" entries used in the previous examples. In this section we describe what happens when the user omits a mandatory entry or provides bad values for them and how DYNARE reacts in these cases. In the parallel package there is a utility (\verb"AnalyseComputationalEnvironment.m") devoted to this task (this is triggered by the command line option \verb"parallel_test"). When necessary during the discussion, we use the \verb"parallel" entries used in the previous examples.
%Le parti in rosa sono una possibile reazione ad un errore che può accadere … vanno concordate e quindi magari riviste. %Le parti in rosa sono una possibile reazione ad un errore che può accadere vanno concordate e quindi magari riviste.
\begin{description} \begin{description}
%\item[Local:] if no value is given for this variable the execution is stopped when DYNARE starts. More serious if we give a bad value (i.e. for example -1, 3), DYNARE will be stopped after some time with no error message! In these cases the Dynare … %\item[Local:] if no value is given for this variable the execution is stopped when DYNARE starts. More serious if we give a bad value (i.e. for example -1, 3), DYNARE will be stopped after some time with no error message!
\item[ComputerName:] If \verb"Local=0", DYNARE checks if the computer \verb"vonNeumann" exists and if it is possible communicate with it. If this is not the case, an error message is generated and the computation is stopped. \item[ComputerName:] If \verb"Local=0", DYNARE checks if the computer \verb"vonNeumann" exists and if it is possible communicate with it. If this is not the case, an error message is generated and the computation is stopped.
\item[CPUnbr:] a value for this variable must be in the form \verb"[s:d]" with \verb"d>=s". If the user types values \verb"s>d", their order is flipped and a warning message is sent. When the user provides a correct value for this field, DYNARE checks if \verb"d" CPUs (or cores) are available on the computer. Suppose that this check returns an integer \verb"nC". We can have three possibilities: \item[CPUnbr:] a value for this variable must be in the form \verb"[s:d]" with \verb"d>=s". If the user types values \verb"s>d", their order is flipped and a warning message is sent. When the user provides a correct value for this field, DYNARE checks if \verb"d" CPUs (or cores) are available on the computer. Suppose that this check returns an integer \verb"nC". We can have three possibilities:
\begin{enumerate} \begin{enumerate}
...@@ -633,7 +644,8 @@ In this section we describe what happens when the user omits a mandatory entry o ...@@ -633,7 +644,8 @@ In this section we describe what happens when the user omits a mandatory entry o
\subsection{The Developers perspective} \subsection{The Developers perspective}
%L'esposizione nel seguito (e anche in alcuni punti su) dipende molto da come il pacchetto parallelo viene rilasciato in Dynare, e quindi se PsTools viene installato durante l'installazione di Dynare oppure no, se le directory necessarie alla computazione vengono create durante l'installazione di Dynare oppure no. E cose così ... %L'esposizione nel seguito (e anche in alcuni punti su) dipende molto da come il pacchetto parallelo viene rilasciato in Dynare, e quindi se PsTools viene installato durante l'installazione di Dynare oppure no, se le directory necessarie alla computazione vengono create durante l'installazione di Dynare oppure no. E cose così ...
In this section we describe with some accuracy the DYNARE parallel routines. In this section we describe with some accuracy the DYNARE parallel routines.
\begin{description} \begin{description}
...@@ -714,8 +726,8 @@ So far, we have parallelized the following functions, by selecting the most comp ...@@ -714,8 +726,8 @@ So far, we have parallelized the following functions, by selecting the most comp
\verb"independent_metropolis_hastings.m", \\ \verb"independent_metropolis_hastings.m", \\
\verb"independent_metropolis_hastings_core.m"; \verb"independent_metropolis_hastings_core.m";
\item the cycle looping over estimated parameters computing univariate diagnostics:\\ \item the cycle looping over estimated parameters computing univariate diagnostics:\\
\verb"McMCDiagnostics.m", \\ \verb"mcmc_diagnostics.m", \\
\verb"McMCDiagnostics_core.m"; \verb"mcmc_diagnostics_core.m";
\item the Monte Carlo cycle looping over posterior parameter subdraws performing the IRF simulations (\verb"<*>_core1") and the cycle looping over exogenous shocks plotting IRF's charts (\verb"<*>_core2"):\\ \item the Monte Carlo cycle looping over posterior parameter subdraws performing the IRF simulations (\verb"<*>_core1") and the cycle looping over exogenous shocks plotting IRF's charts (\verb"<*>_core2"):\\
\verb"posteriorIRF.m", \\\verb"posteriorIRF_core1.m", \verb"posteriorIRF_core2.m"; \verb"posteriorIRF.m", \\\verb"posteriorIRF_core1.m", \verb"posteriorIRF_core2.m";
\item the Monte Carlo cycle looping over posterior parameter subdraws, that computes filtered, smoothed, forecasted variables and shocks:\\ \item the Monte Carlo cycle looping over posterior parameter subdraws, that computes filtered, smoothed, forecasted variables and shocks:\\
...@@ -827,7 +839,7 @@ The modified \verb"random_walk_metropolis_hastings.m" is therefore: ...@@ -827,7 +839,7 @@ The modified \verb"random_walk_metropolis_hastings.m" is therefore:
\noindent\begin{tabular}[b]{| p{\linewidth} |} \noindent\begin{tabular}[b]{| p{\linewidth} |}
\hline \hline
\begin{verbatim} \begin{verbatim}
function random_walk_metropolis_hastings(TargetFun,ProposalFun,…,varargin) function random_walk_metropolis_hastings(TargetFun,ProposalFun,\ldots,varargin)
[...] [...]
% here we wrap all local variables needed by the <*>_core function % here we wrap all local variables needed by the <*>_core function
localVars = struct('TargetFun', TargetFun, ... localVars = struct('TargetFun', TargetFun, ...
...@@ -895,7 +907,7 @@ waitbarString = [ '(' int2str(b) '/' int2str(mh_nblck) ') ... ...@@ -895,7 +907,7 @@ waitbarString = [ '(' int2str(b) '/' int2str(mh_nblck) ') ...
if mod(j, 3)==0 & ~whoiam if mod(j, 3)==0 & ~whoiam
% serial computation % serial computation
waitbar(prtfrc,hh,waitbarString); waitbar(prtfrc,hh_fig,waitbarString);
elseif mod(j,50)==0 & whoiam, elseif mod(j,50)==0 & whoiam,
% parallel computation % parallel computation
...@@ -960,20 +972,20 @@ On the other hand, under the parallel implementation, a parallel monitoring plot ...@@ -960,20 +972,20 @@ On the other hand, under the parallel implementation, a parallel monitoring plot
%[...] %[...]
%\end{verbatim} %\end{verbatim}
%Non so se può andare bene impostata in questo modo … se va bene la concludo. %Non so se può andare bene impostata in questo modo se va bene la concludo.
% %
%2. %2.
%Prendiamo il tutto il lavoro fatto per arrivare a parallellizzare la PosteriorIRF, %Prendiamo il tutto il lavoro fatto per arrivare a parallellizzare la PosteriorIRF,
%Traacianti, anlisi computazionale … commentiamo bene tutte le funzioni coinvolte e le usiamo qui … %Traacianti, anlisi computazionale commentiamo bene tutte le funzioni coinvolte e le usiamo qui
\section{Parallel DYNARE: testing} \section{Parallel DYNARE: testing}
We checked the new parallel platform for DYNARE performing a number of tests, using different models and computer architectures. We present here all tests performed with Windows Xp/Matlab. However, similar tests were performed successfully under Linux/Ubuntu environment. We checked the new parallel platform for DYNARE performing a number of tests, using different models and computer architectures. We present here all tests performed with Windows XP/MATLAB. However, similar tests were performed successfully under Linux/Ubuntu environment.
In the Bayesian estimation of DSGE models with DYNARE, most of the computing time is devoted to the posterior parameter estimation with the Metropolis algorithm. The first and second tests are therefore focused on the parallelization of the Random Walking Metropolis Hastings algorithm (Sections \ref{s:test1}-\ref{s:test2}). In addition, further tests (Sections \ref{s:test3}-\ref{s:test4}) are devoted to test all the parallelized functions in DYNARE. Finally, we compare the two parallel implementations of the Metropolis Hastings algorithms, available in DYNARE: the Independent and the Random Walk (Section \ref{s:test5}). In the Bayesian estimation of DSGE models with DYNARE, most of the computing time is devoted to the posterior parameter estimation with the Metropolis algorithm. The first and second tests are therefore focused on the parallelization of the Random Walking Metropolis Hastings algorithm (Sections \ref{s:test1}-\ref{s:test2}). In addition, further tests (Sections \ref{s:test3}-\ref{s:test4}) are devoted to test all the parallelized functions in DYNARE. %Finally, we compare the two parallel implementations of the Metropolis Hastings algorithms, available in DYNARE: the Independent and the Random Walk (Section \ref{s:test5}).
\subsection{Test 1.}\label{s:test1} \subsection{Test 1.}\label{s:test1}
The main goal here was to evaluate the parallel package on a \emph{fixed hardware platform} and using chains of \emph{variable length}. The model used for testing is a modification of \cite{Hradisky_etal_2006}. This is a small scale open economy DSGE model with 6 observed variables, 6 endogenous variables and 19 parameters to be estimated. The main goal here was to evaluate the parallel package on a \emph{fixed hardware platform} and using chains of \emph{variable length}. The model used for testing is a modification of \cite{Hradisky_etal_2006}. This is a small scale open economy DSGE model with 6 observed variables, 6 endogenous variables and 19 parameters to be estimated.
We estimated the model on a bi-processor machine (Fujitsu Siemens, Celsius R630) powered with an Intel(R) Xeon(TM) CPU 2.80GHz Hyper Treading Technology; first with the original serial Metropolis and subsequently using the parallel solution, to take advantage of the two processors technology. We ran chains of increasing length: 2500, 5000, 10,000, 50,000, 100,000, 250,000, 1,000,000. We estimated the model on a bi-processor machine (Fujitsu Siemens, Celsius R630) powered with an Intel\textsuperscript{\textregistered} Xeon\texttrademark CPU 2.80GHz Hyper Treading Technology; first with the original serial Metropolis and subsequently using the parallel solution, to take advantage of the two processors technology. We ran chains of increasing length: 2500, 5000, 10,000, 50,000, 100,000, 250,000, 1,000,000.
\begin{figure}[!ht] \begin{figure}[!ht]
\begin{centering} \begin{centering}
...@@ -996,8 +1008,8 @@ Overall results are given in Figure \ref{fig:test_time_comp}, showing the comput ...@@ -996,8 +1008,8 @@ Overall results are given in Figure \ref{fig:test_time_comp}, showing the comput
The scope of the second test was to verify if results were robust over different hardware platforms. The scope of the second test was to verify if results were robust over different hardware platforms.
We estimated the model with chain lengths of 1,000,000 runs on the following hardware platforms: We estimated the model with chain lengths of 1,000,000 runs on the following hardware platforms:
\begin{itemize} \begin{itemize}
\item Single processor machine: Intel(R) Pentium4(R) CPU 3.40GHz with Hyper Treading Technology (Fujitsu-Siemens Scenic Esprimo); \item Single processor machine: Intel\textsuperscript{\textregistered} Pentium4\textsuperscript{\textregistered} CPU 3.40GHz with Hyper Treading Technology (Fujitsu-Siemens Scenic Esprimo);
\item Bi-processor machine: two CPU's Intel(R) Xeon(TM) 2.80GHz Hyper Treading Technology (Fujitsu-Siemens, Celsius R630); \item Bi-processor machine: two CPU's Intel\textsuperscript{\textregistered} Xeon\texttrademark 2.80GHz Hyper Treading Technology (Fujitsu-Siemens, Celsius R630);
\item Dual core machine: Intel Centrino T2500 2.00GHz Dual Core (Fujitsu-Siemens, LifeBook S Series). \item Dual core machine: Intel Centrino T2500 2.00GHz Dual Core (Fujitsu-Siemens, LifeBook S Series).
\end{itemize} \end{itemize}
...@@ -1041,7 +1053,7 @@ Unplugged network cable. & ...@@ -1041,7 +1053,7 @@ Unplugged network cable. &
Given the excellent results reported above, we have parallelized many other DYNARE functions. This implies that parallel instances can be invoked many times during a single DYNARE session. Under the basic parallel toolbox implementation, that we call the `Open/Close' strategy, this implies that MATLAB instances are opened and closed many times by system calls, possibly slowing down the computation, specially for `entry-level' computer resources. As mentioned before, this suggested to implement an alternative strategy for the parallel toolbox, that we call the `Always-Open' strategy, where the slave MATLAB threads, once opened, stay alive and wait for new tasks assigned by the master until the full DYNARE procedure is completed. We show next the tests of these latest implementations. Given the excellent results reported above, we have parallelized many other DYNARE functions. This implies that parallel instances can be invoked many times during a single DYNARE session. Under the basic parallel toolbox implementation, that we call the `Open/Close' strategy, this implies that MATLAB instances are opened and closed many times by system calls, possibly slowing down the computation, specially for `entry-level' computer resources. As mentioned before, this suggested to implement an alternative strategy for the parallel toolbox, that we call the `Always-Open' strategy, where the slave MATLAB threads, once opened, stay alive and wait for new tasks assigned by the master until the full DYNARE procedure is completed. We show next the tests of these latest implementations.
\subsection{Test 3}\label{s:test3} \subsection{Test 3}\label{s:test3}
In this Section we use the \cite{Lubik2003} model as test function\footnote{The \cite{Lubik2003} model is also selected as the `official' test model for the parallel toolbox in DYNARE.} and a very simple computer class, quite diffuse nowadays: Netbook personal Computer. In particular we used the Dell Mini 10 with Processor Intel® Atom™ Z520 (1,33 GHz, 533 MHz), 1 GB di RAM (with Hyper-trading). First, we tested the computational gain of running a full Bayesian estimation: Metropolis (two parallel chains), MCMC diagnostics, posterior IRF's and filtered, smoothed, forecasts, etc. In other words, we designed DYNARE sessions that invoke all parallelized functions. Results are shown in Figures \ref{fig:netbook_complete_openclose}-\ref{fig:netbook_partial_openclose}. In this Section we use the \cite{Lubik2003} model as test function\footnote{The \cite{Lubik2003} model is also selected as the `official' test model for the parallel toolbox in DYNARE.} and a very simple computer class, quite diffuse nowadays: Netbook personal Computer. In particular we used the Dell Mini 10 with Processor Intel\textsuperscript{\textregistered} Atom\texttrademark Z520 (1,33 GHz, 533 MHz), 1 GB di RAM (with Hyper-trading). First, we tested the computational gain of running a full Bayesian estimation: Metropolis (two parallel chains), MCMC diagnostics, posterior IRF's and filtered, smoothed, forecasts, etc. In other words, we designed DYNARE sessions that invoke all parallelized functions. Results are shown in Figures \ref{fig:netbook_complete_openclose}-\ref{fig:netbook_partial_openclose}.
\begin{figure}[p] \begin{figure}[p]
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
...@@ -1142,49 +1154,49 @@ The methodology identified for parallelizing MATLAB codes within DYNARE proved t ...@@ -1142,49 +1154,49 @@ The methodology identified for parallelizing MATLAB codes within DYNARE proved t
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors1Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors1Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp1} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp1}
\end{centering} \end{centering}
\end{figure} \end{figure}
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors2Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors2Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp2} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp2}
\end{centering} \end{centering}
\end{figure} \end{figure}
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors3Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors3Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp3} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp3}
\end{centering} \end{centering}
\end{figure} \end{figure}
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors4Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors4Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp4} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp4}
\end{centering} \end{centering}
\end{figure} \end{figure}
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors5Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors5Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp5} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp5}
\end{centering} \end{centering}
\end{figure} \end{figure}
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors6Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors6Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp6} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp6}
\end{centering} \end{centering}
\end{figure} \end{figure}
\begin{figure} \begin{figure}
\begin{centering} \begin{centering}
% Requires \usepackage{graphicx} % Requires \usepackage{graphicx}
\epsfxsize=250pt \epsfbox{RWMH_quest1_PriorsAndPosteriors7Comp.pdf} \epsfxsize=300pt \epsfbox{RWMH_quest1_PriorsAndPosteriors7Comp.pdf}
\caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp7} \caption{Prior (grey lines) and posterior density of estimated parameters (black = 100,000 runs; red = 1,000,000 runs) using the RWMH algorithm \citep[QUEST III model][]{Ratto_et_al_EconModel2009}.}\label{fig:quest_RWMH_comp7}
\end{centering} \end{centering}
\end{figure} \end{figure}
......
if HAVE_PDFLATEX
if HAVE_BEAMER
pdf-local: preprocessor.pdf
endif
endif
SRC = preprocessor.tex expr.png expr-sharing.png matrices.png overview.png
EXTRA_DIST = $(SRC)
preprocessor.pdf: $(SRC)
$(PDFLATEX) preprocessor
$(PDFLATEX) preprocessor
clean-local:
rm -f *.pdf *.toc *.aux *.log *.nav *.snm *.vrb *.out *~
File deleted
doc/preprocessor/expr-sharing.png

26.9 KiB

File deleted
doc/preprocessor/expr.png

11 KiB

File deleted
doc/preprocessor/matrices.png

25.8 KiB

File deleted
doc/preprocessor/overview.png

47.8 KiB

\documentclass{beamer}
%\documentclass[draft]{beamer}
%\documentclass[handout]{beamer}
\mode<handout>
{
\usepackage{pgfpages}
\pgfpagesuselayout{4 on 1}[a4paper,border shrink=3mm,landscape]
\usetheme{Madrid}
\usecolortheme{seagull}
}
\mode<beamer>
{
\usetheme{Madrid}
\setbeamercovered{transparent}
}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage{times}
\title{The Dynare Preprocessor}
\author[S. Villemot]{Sébastien Villemot}
\institute{CEPREMAP}
\date{October 19, 2007}
\AtBeginSection[]
{
\begin{frame}{Outline}
\tableofcontents[currentsection]
\end{frame}
}
\begin{document}
\begin{frame}
\titlepage
\end{frame}
\begin{frame}
\frametitle{General overview}
\begin{center}
\includegraphics[width=11cm]{overview.png}
\end{center}
\end{frame}
\begin{frame}{Outline}
\tableofcontents
\end{frame}
\section{Introduction to object-oriented programming in C++}
\begin{frame}
\frametitle{Object-oriented programming (OOP)}
\begin{itemize}
\item Traditional way of programming: a program is a list of instructions (organized in functions) which manipulate data
\item OOP is an alternative programming paradigm that uses \alert{objects} and their interactions to design programs
\pause
\item With OOP, programming becomes a kind of modelization: each object of the program should modelize a real world object, or a mathematical object (\textit{e.g.} a matrix, an equation, a model...)
\item Each object can be viewed as an independent little machine with a distinct role or responsibility
\item Each object is capable of receiving messages, processing data, and sending messages to other objects
\pause
\item Main advantage of OOP is \alert{modularity}, which leads to greater reusability, flexibility and maintainability
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Object}
\framesubtitle{Definition and example}
\begin{itemize}
\item An \alert{object} is the bundle of:
\begin{itemize}
\item several variables (called its \alert{attributes}), which modelize the characteristics (or the state) of the object
\item several functions (called its \alert{methods}) which operate on the attributes, and which modelize the behaviour of the object (the actions it can perform)
\end{itemize}
\pause
\item Example: suppose we want to modelize a coffee machine
\begin{itemize}
\item The coffee machine (in real life) is a box, with an internal counter for the credit balance, a slot to put coins in, and a button to get a coffee
\item The corresponding object will have one attribute (the current credit balance) and two methods (one which modelizes the introduction of money, and the other the making of a coffee)
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{A coffee machine}
\framesubtitle{Class definition}
\begin{block}{C++ header file (\texttt{CoffeeMachine.hh})}
\begin{scriptsize}
\begin{verbatim}
class CoffeeMachine {
public:
int credit;
CoffeeMachine();
void put_coin(int coin_value);
void get_coffee();
};
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item A \alert{class} is a template (or a blueprint) of an object
\item Collectively, the attributes and methods defined by a class are called \alert{members}
\item A class definition creates a new \alert{type} (\texttt{CoffeeMachine}) that can be used like other C++ types (\textit{e.g.} \texttt{int}, \texttt{string}, ...)
\item In C++, class definitions are put in header files (\texttt{.hh} extension)
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{A coffee machine}
\framesubtitle{Method bodies}
\begin{block}{C++ source file (\texttt{CoffeeMachine.cc})}
\begin{scriptsize}
\begin{verbatim}
void CoffeeMachine::put_coin(int coin_value)
{
credit += coin_value;
cout << "Credit is now " << credit << endl;
}
void CoffeeMachine::get_coffee()
{
if (credit == 0)
cout << "No credit!" << endl;
else {
credit--;
cout << "Your coffee is ready, credit is now " << credit << endl;
}
}
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item Methods can refer to other members (here the two methods modify the \texttt{credit} attribute)
\item Method bodies are put in source files (\texttt{.cc} extension)
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Constructors and destructors}
\begin{itemize}
\item In our class header, there is a special method called \texttt{CoffeeMachine()} (same name than the class)
\item It is a \alert{constructor}: called when the object is created, used to initalize the attributes of the class
\end{itemize}
\begin{block}{C++ source file (\texttt{CoffeeMachine.cc}, continued)}
\begin{scriptsize}
\begin{verbatim}
CoffeeMachine::CoffeeMachine()
{
credit = 0;
}
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item It is possible to create constructors with arguments
\item It is also possible to define a \alert{destructor} (method name is the class name prepended by a tilde, like \texttt{$\sim$CoffeeMachine}): called when the object is destroyed, used to do cleaning tasks (\textit{e.g.} freeing memory)
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Instantiation and method invocation}
\begin{block}{Program main function}
\begin{scriptsize}
\begin{verbatim}
#include "CoffeeMachine.hh"
int main()
{
CoffeeMachine A, B;
A.put_coin(2);
A.get_coffee();
B.put_coin(1);
B.get_coffee();
B.get_coffee();
}
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item Creates two machines: at the end, \texttt{A} has 1 credit, \texttt{B} has no credit and refused last coffee
\item \texttt{A} and \texttt{B} are called \alert{instances} of class \texttt{CoffeeMachine}
\item Methods are invoked by appending a dot and the method name to the instance variable name
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Dynamic instantiation with \texttt{new}}
\begin{block}{Program main function}
\begin{scriptsize}
\begin{verbatim}
#include "CoffeeMachine.hh"
void main()
{
CoffeeMachine *A;
A = new CoffeeMachine();
A->put_coin(2);
A->get_coffee();
delete A;
}
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item Here \texttt{A} is a pointer to an instance of class \texttt{CoffeeMachine}
\item Dynamic creation of instances is done with \texttt{new}, dynamic deletion with \texttt{delete} (analogous to \texttt{malloc} and \texttt{free})
\item Since \texttt{A} is a pointer, methods are called with \texttt{->} instead of a dot
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Access modifiers}
\begin{itemize}
\item In our coffee machine example, all attributes and methods were marked as \texttt{public}
\item Means that those attributes and methods can be accessed from anywhere in the program
\item Here, one can gain credit without putting money in the machine, with something like \texttt{A.credit = 1000;}
\item The solution is to declare it \alert{private}: such members can only be accessed from methods within the class
\end{itemize}
\begin{block}{C++ header file (\texttt{CoffeeMachine.hh})}
\begin{scriptsize}
\begin{verbatim}
class CoffeeMachine {
private:
int credit;
public:
CoffeeMachine();
void put_coin(int coin_value);
void get_coffee();
};
\end{verbatim}
\end{scriptsize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Interface}
\begin{itemize}
\item The public members of a class form its \alert{interface}: they describe how the class interacts with its environment
\item Seen from outside, an object is a ``black box'', receiving and sending messages through its interface
\item Particular attention should be given to the interface design: an external programmer should be able to work with an class by only studying its interface, but not its internals
\item A good design pratice is to limit the set of public members to the strict minimum:
\begin{itemize}
\item enhances code understandability by making clear the interface
\item limits the risk that an internal change in the object requires a change in the rest of the program: \alert{loose coupling}
\item prevents the disruption of the coherence of the object by an external action: principle of \alert{isolation}
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Why isolation is important}
\begin{itemize}
\item Consider a class \texttt{Circle} with the following attributes:
\begin{itemize}
\item coordinates of the center
\item radius
\item surface
\end{itemize}
\item If all members are public, it is possible to modify the radius but not the surface, therefore disrupting internal coherence
\item The solution is to make radius and surface private, and to create a public method \texttt{changeRadius} which modifies both simultaneously
\item \textit{Conclusion:} Creating a clear interface and isolating the rest diminishes the risk of introducing bugs
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Inheritance (1/2)}
\begin{block}{Matrices and positive definite matrices}
\begin{scriptsize}
\begin{columns}[t]
\begin{column}{4.8cm}
\begin{verbatim}
class Matrix
{
protected:
int height, width;
double[] elements;
public:
Matrix(int n, int p,
double[] e);
virtual ~Matrix();
double det();
};
\end{verbatim}
\end{column}
\begin{column}{6cm}
\begin{verbatim}
class PositDefMatrix : public Matrix
{
public:
PositDefMatrix(int n, int p,
double[] e);
Matrix cholesky();
};
\end{verbatim}
\end{column}
\end{columns}
\end{scriptsize}
\end{block}
\begin{itemize}
\item \texttt{PositDefMatrix} is a \alert{subclass} (or \alert{derived class}) of \texttt{Matrix}
\item Conversely \texttt{Matrix} is the \alert{superclass} of \texttt{PositDefMatrix}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Inheritance (2/2)}
\begin{itemize}
\item \texttt{PositDefMatrix} inherits \texttt{width}, \texttt{height}, \texttt{elements} and \texttt{det} from \texttt{Matrix}
\item Method \texttt{cholesky} can be called on an instance of \texttt{PositDefMatrix}, but not of \texttt{Matrix}
\item The keyword \texttt{protected} means: public for subclasses, but private for other classes
\item \alert{Type casts} are legal when going upward in the derivation tree:
\begin{itemize}
\item a pointer to \texttt{PositDefMatrix} can be safely cast to a \texttt{Matrix*}
\item the converse is faulty and leads to unpredictable results
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Constructors and destructors (bis)}
\begin{block}{C++ code snippet}
\begin{scriptsize}
\begin{verbatim}
Matrix::Matrix(int n, int p, double[] e) : height(n), width(p)
{
elements = new double[n*p];
memcpy(elements, e, n*p*sizeof(double));
}
Matrix::~Matrix()
{
delete[] elements;
}
PositDefMatrix::PositDefMatrix(int n, int p, double[] e) :
Matrix(n, p, e)
{
// Check that matrix is really positive definite
}
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item Constructor of \texttt{PositDefMatrix} calls constructor of \texttt{Matrix}
\item Note the abbreviated syntax with colon
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Possible derivation tree for real matrices}
\framesubtitle{Arrow means \textit{...is a subclass of...}}
\begin{center}
\includegraphics[width=10cm]{matrices.png}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Polymorphism (1/3)}
\begin{itemize}
\item In previous example, determinant computation method uses the same algorithm for both classes
\item But for positive definite matrices, a faster algorithm exists (using the cholesky)
\item \alert{Polymorphism} offers an elegant solution:
\begin{itemize}
\item declare \texttt{det} as a \alert{virtual method} in class \texttt{Matrix}
\item \alert{override} it in \texttt{PositDefMatrix}, and provide the corresponding implementation
\end{itemize}
\item When method \texttt{det} will be invoked, the correct implementation will be selected, depending on the type of the instance (this is done through a runtime type test)
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Polymorphism (2/3)}
\begin{block}{Class headers}
\begin{scriptsize}
\begin{columns}[t]
\begin{column}{4.8cm}
\begin{verbatim}
class Matrix
{
protected:
int height, width;
double[] elements;
public:
Matrix(int n, int p,
double[] e);
virtual ~Matrix();
virtual double det();
bool is_invertible();
};
\end{verbatim}
\end{column}
\begin{column}{6cm}
\begin{verbatim}
class PositDefMatrix : public Matrix
{
public:
PositDefMatrix(int n, int p,
double[] e);
Matrix cholesky();
virtual double det();
};
\end{verbatim}
\end{column}
\end{columns}
\end{scriptsize}
\end{block}
\begin{itemize}
\item Note the \texttt{virtual} keyword
\item A method has been added to determine if matrix is invertible
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Polymorphism (3/3)}
\begin{block}{C++ code snippet}
\begin{scriptsize}
\begin{verbatim}
bool Matrix::is_invertible()
{
return(det() != 0);
}
double PositDefMatrix::det()
{
// Square product of diagonal terms of cholesky decomposition
}
\end{verbatim}
\end{scriptsize}
\end{block}
\begin{itemize}
\item A call to \texttt{is\_invertible} on a instance of \texttt{Matrix} will use the generic determinant computation
\item The same call on an instance of \texttt{PositDefMatrix} will call the specialized determinant computation
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Abstract classes}
\begin{itemize}
\item It is possible to create classes which don't provide an implementation for some virtual methods
\item Syntax in the header: \\
\texttt{virtual int method\_name() = 0;}
\item As a consequence, such classes can never be instantiated
\item Generally used as the root of a derivation tree, when classes of the tree share behaviours but not implementations
\item Such classes are called \alert{abstract classes}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Some programming rules (1/2)}
\begin{itemize}
\item Don't repeat yourself (DRY): if several functions contain similar portions of code, \alert{factorize} that code into a new function
\begin{itemize}
\item makes code shorter
\item reduces the risk of introducing inconsistencies
\item makes easier the propagation of enhancements and bug corrections
\end{itemize}
\item Make short functions
\begin{itemize}
\item often difficult to grasp what a long function does
\item structuring the code by dividing it into short functions makes the logical structure more apparent
\item enhances code readability and maintainability
\end{itemize}
\item Use explicit variable names (except for loop indexes)
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Some programming rules (2/2)}
\begin{itemize}
\item Global variables are evil
\begin{itemize}
\item a global variable can be modified from anywhere in the code (nonlocality problem)
\item creates a potentially unlimited number of dependencies between all portions of the code
\item makes bugs difficult to localize (any part of the code could have created the trouble)
\item to summarize, goes against the principle of modularity
\item in addition, global variables are not thread safe (unless used with locks/mutexes)
\end{itemize}
\item Document your code when it doesn't speak by itself
\begin{itemize}
\item Dynare preprocessor code is documented using Doxygen
\item done through special comments beginning with an exclamation mark
\item run \texttt{doxygen} from the source directory to create a bunch of HTML files documenting the code
\end{itemize}
\end{itemize}
\end{frame}
\section{Parsing}
\begin{frame}
\frametitle{Parsing overview}
\begin{itemize}
\item Parsing is the action of transforming an input text (a \texttt{mod} file in our case) into a data structure suitable for computation
\item The parser consists of three components:
\begin{itemize}
\item the \alert{lexical analyzer}, which recognizes the ``words'' of the \texttt{mod} file (analog to the \textit{vocabulary} of a language)
\item the \alert{syntax analyzer}, which recognizes the ``sentences'' of the \texttt{mod} file (analog to the \textit{grammar} of a language)
\item the \alert{parsing driver}, which coordinates the whole process and constructs the data structure using the results of the lexical and syntax analyses
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Lexical analysis}
\begin{itemize}
\item The lexical analyzer recognizes the ``words'' (or \alert{lexemes}) of the language
\item Lexical analyzer is described in \texttt{DynareFlex.ll}. This file is transformed into C++ source code by the program \texttt{flex}
\item This file gives the list of the known lexemes (described by regular expressions), and gives the associated \alert{token} for each of them
\item For punctuation (semicolon, parentheses, ...), operators (+, -, ...) or fixed keywords (\textit{e.g.} \texttt{model}, \texttt{varexo}, ...), the token is simply an integer uniquely identifying the lexeme
\item For variable names or numbers, the token also contains the associated string for further processing
%\item \textit{Note:} the list of tokens can be found at the beginning of \texttt{DynareBison.yy}
\item When invoked, the lexical analyzer reads the next characters of the input, tries to recognize a lexeme, and either produces an error or returns the associated token
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Lexical analysis}
\framesubtitle{An example}
\begin{itemize}
\item Suppose the \texttt{mod} file contains the following:
\begin{verbatim}
model;
x = log(3.5);
end;
\end{verbatim}
\item Before lexical analysis, it is only a sequence of characters
\item The lexical analysis produces the following stream of tokens:
\begin{footnotesize}
\begin{verbatim}
MODEL
SEMICOLON
NAME "x"
EQUAL
LOG
LEFT_PARENTHESIS
FLOAT_NUMBER "3.5"
RIGHT_PARENTHESIS
SEMICOLON
END
SEMICOLON
\end{verbatim}
\end{footnotesize}
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Syntax analysis}
Using the list of tokens produced by lexical analysis, the syntax analyzer determines which ``sentences'' are valid in the language, according to a \alert{grammar} composed of \alert{rules}.
\begin{block}{A grammar for lists of additive and multiplicative expressions}
\begin{footnotesize}
\begin{verbatim}
%start expression_list;
expression_list := expression SEMICOLON
| expression_list expression SEMICOLON;
expression := expression PLUS expression
| expression TIMES expression
| LEFT_PAREN expression RIGHT_PAREN
| INT_NUMBER;
\end{verbatim}
\end{footnotesize}
\end{block}
\begin{itemize}
\item \texttt{(1+3)*2; 4+5;} will pass the syntax analysis without error
\item \texttt{1++2;} will fail the syntax analysis, even though it has passed the lexical analysis
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Syntax analysis}
\framesubtitle{In Dynare}
\begin{itemize}
\item The \texttt{mod} file grammar is described in \texttt{DynareBison.yy}
\item The grammar is transformed into C++ source code by the program \texttt{bison}
\item The grammar tells a story which looks like:
\begin{itemize}
\item A \texttt{mod} file is a list of statements
\item A statement can be a \texttt{var} statement, a \texttt{varexo} statement, a \texttt{model} block, an \texttt{initval} block, ...
\item A \texttt{var} statement begins with the token \texttt{VAR}, then a list of \texttt{NAME}s, then a semicolon
\item A \texttt{model} block begins with the token \texttt{MODEL}, then a semicolon, then a list of equations separated by semicolons, then an \texttt{END} token
\item An equation can be either an expression, or an expression followed by an \texttt{EQUAL} token and another expression
\item An expression can be a \texttt{NAME}, or a \texttt{FLOAT\_NUMBER}, or an expression followed by a \texttt{PLUS} and another expression, ...
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Semantic actions}
\begin{itemize}
\item So far we have only described how to accept valid \texttt{mod} files and to reject others
\item But validating is not enough: one need to do something about what has been parsed
\item Each rule of the grammar can have a \alert{semantic action} associated to it: C/C++ code enclosed in curly braces
\item Each rule can return a semantic value (referenced to by \texttt{\$\$} in the action)
\item In the action, it is possible to refer to semantic values returned by components of the rule (using \texttt{\$1}, \texttt{\$2}, ...)
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Semantic actions}
\framesubtitle{An example}
\begin{block}{A simple calculator which prints its results}
\begin{footnotesize}
\begin{verbatim}
%start expression_list
%type <int> expression
expression_list := expression SEMICOLON
{ cout << $1; }
| expression_list expression SEMICOLON
{ cout << $2; };
expression := expression PLUS expression
{ $$ = $1 + $3; }
| expression TIMES expression
{ $$ = $1 * $3; }
| LEFT_PAREN expression RIGHT_PAREN
{ $$ = $2; }
| INT_NUMBER
{ $$ = $1; };
\end{verbatim}
\end{footnotesize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Parsing driver}
The class \texttt{ParsingDriver} has the following roles:
\begin{itemize}
\item Given the \texttt{mod} filename, it opens the file and launches the lexical and syntaxic analyzers on it
\item It implements most of the semantic actions of the grammar
\item By doing so, it creates an object of type \texttt{ModFile}, which is the data structure representing the \texttt{mod} file
\item Or, if there is a parsing error (unknown keyword, undeclared symbol, syntax error), it displays the line and column numbers where the error occurred, and exits
\end{itemize}
\end{frame}
\section{Data structure representing a \texttt{mod} file}
\begin{frame}
\frametitle{The \texttt{ModFile} class}
\begin{itemize}
\item This class is the internal data structure used to store all the informations contained in a \texttt{mod} file
\item One instance of the class represents one \texttt{mod} file
\item The class contains the following elements (as class members):
\begin{itemize}
\item a symbol table
\item a numerical constants table
\item two trees of expressions: one for the model, and one for the expressions outside the model
\item the list of the statements (parameter initializations, shocks block, \texttt{check}, \texttt{steady}, \texttt{simul}, ...)
\item an evaluation context
\end{itemize}
\item An instance of \texttt{ModFile} is the output of the parsing process (return value of \texttt{ParsingDriver::parse()})
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The symbol table (1/3)}
\begin{itemize}
\item A \alert{symbol} is simply the name of a variable, of a parameter or of a function unknown to the preprocessor: actually everything that is not recognized as a Dynare keyword
\item The \alert{symbol table} is a simple structure used to maintain the list of the symbols used in the \texttt{mod} file
\item For each symbol, stores:
\begin{itemize}
\item its name (a string)
\item its type (an integer)
\item a unique integer identifier (unique for a given type, but not across types)
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The symbol table (2/3)}
Existing types of symbols:
\begin{itemize}
\item Endogenous variables
\item Exogenous variables
\item Exogenous deterministic variables
\item Parameters
\item Local variables inside model: declared with a pound sign (\#) construction
\item Local variables outside model: no declaration needed, not interpreted by the preprocessor (\textit{e.g.} Matlab loop indexes)
\item Names of functions unknown to the preprocessor: no declaration needed, not interpreted by the preprocessor, only allowed outside model (until we create an interface for providing custom functions with their derivatives)
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The symbol table (2/3)}
\begin{itemize}
\item Symbol table filled in:
\begin{itemize}
\item using the \texttt{var}, \texttt{varexo}, \texttt{varexo\_det}, \texttt{parameter} declarations
\item using pound sign (\#) constructions in the model block
\item on the fly during parsing: local variables outside models or unknown functions when an undeclared symbol is encountered
\end{itemize}
\item Roles of the symbol table:
\begin{itemize}
\item permits parcimonious and more efficient representation of expressions (no need to duplicate or compare strings, only handle a pair of integers)
\item ensures that a given symbol is used with only one type
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Expression trees (1/2)}
\begin{itemize}
\item The data structure used to store expressions is essentially a \alert{tree}
\item Graphically, the tree representation of $(1+z)*\log(y)$ is:
\begin{center}
\includegraphics[width=4cm]{expr.png}
\end{center}
\item No need to store parentheses
\item Each circle represents a \alert{node}
\item A node has at most one parent and at most two children
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Expression trees (2/2)}
\begin{itemize}
\item In Dynare preprocessor, a tree node is a represented by an instance of the abstract class \texttt{ExprNode}
\item This class has 5 sub-classes, corresponding to the 5 types of nodes:
\begin{itemize}
\item \texttt{NumConstNode} for constant nodes: contains the identifier of the numerical constants it represents
\item \texttt{VariableNode} for variable/parameters nodes: contains the identifier of the variable or parameter it represents
\item \texttt{UnaryOpNode} for unary operators (\textit{e.g.} unary minus, $\log$, $\sin$): contains an integer representing the operator, and a pointer to its child
\item \texttt{BinaryOpNode} for binary operators (\textit{e.g.} $+$, $*$, pow): contains an integer representing the operator, and pointers to its two children
\item \texttt{UnknownFunctionNode} for functions unknown to the parser (\textit{e.g.} user defined functions): contains the identifier of the function name, and a vector containing an arbitrary number of children (the function arguments)
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Classes \texttt{DataTree} and \texttt{ModelTree}}
\begin{itemize}
\item Class \texttt{DataTree} is a container for storing a set of expression trees
\item Class \texttt{ModelTree} is a sub-class of \texttt{DataTree}, specialized for storing a set of model equations (among other things, contains symbolic derivation algorithm)
\item Class \texttt{ModFile} contains:
\begin{itemize}
\item one instance of \texttt{ModelTree} for storing the equations of model block
\item one instance of \texttt{DataTree} for storing all expressions outside model block
\end{itemize}
\item Expression storage is optimized through three mechanisms:
\begin{itemize}
\item pre-computing of numerical constants
\item symbolic simplification rules
\item sub-expression sharing
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Constructing expression trees}
\begin{itemize}
\item Class \texttt{DataTree} contains a set of methods for constructing expression trees
\item Construction is done bottom-up, node by node:
\begin{itemize}
\item one method for adding a constant node (\texttt{AddPossiblyNegativeConstant(double)})
\item one method for a log node (\texttt{AddLog(arg)})
\item one method for a plus node (\texttt{AddPlus(arg1, arg2)})
\end{itemize}
\item These methods take pointers to \texttt{ExprNode}, allocate the memory for the node, construct it, and return its pointer
\item These methods are called:
\begin{itemize}
\item from \texttt{ParsingDriver} in the semantic actions associated to the parsing of expressions
\item during symbolic derivation, to create derivatives expressions
\end{itemize}
\item Note that \texttt{NodeID} is an alias (typedef) for \texttt{ExprNode*}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Reduction of constants and symbolic simplifications}
\begin{itemize}
\item The construction methods compute constants whenever it is possible
\begin{itemize}
\item Suppose you ask to construct the node $1+1$
\item The \texttt{AddPlus()} method will return a pointer to a constant node containing 2
\end{itemize}
\item The construction methods also apply a set of simplification rules, such as:
\begin{itemize}
\item $0+0=0$
\item $x+0 = x$
\item $0-x = -x$
\item $-(-x) = x$
\item $x*0 = 0$
\item $x/1 = x$
\item $x^0 = 1$
\end{itemize}
\item When a simplification rule applies, no new node is created
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Sub-expression sharing (1/2)}
\begin{itemize}
\item Consider the two following expressions: $(1+z)*\log(y)$ and $2^{(1+z)}$
\item Expressions share a common sub-expression: $1+z$
\item The internal representation of these expressions is:
\begin{center}
\includegraphics[width=6cm]{expr-sharing.png}
\end{center}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Sub-expression sharing (2/2)}
\begin{itemize}
\item Construction methods implement a simple algorithm which achieves maximal expression sharing
\item Algorithm uses the fact that each node has a unique memory address (pointer to the corresponding instance of \texttt{ExprNode})
\item It maintains 5 tables which keep track of the already constructed nodes: one table by type of node (constants, variables, unary ops, binary ops, unknown functions)
\item Suppose you want to create the node $e_1+e_2$ (where $e_1$ and $e_2$ are sub-expressions):
\begin{itemize}
\item the algorithm searches the binary ops table for the tuple equal to (address of $e_1$, address of $e_2$, op code of +) (it is the \alert{search key})
\item if the tuple is found in the table, the node already exists, and its memory address is returned
\item otherwise, the node is created, and is added to the table with its search key
\end{itemize}
\item Maximum sharing is achieved, because expression trees are constructed bottom-up
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Final remarks about expressions}
\begin{itemize}
\item Storage of negative constants
\begin{itemize}
\item class \texttt{NumConstNode} only accepts positive constants
\item a negative constant is stored as a unary minus applied to a positive constant
\item this is a kind of identification constraint to avoid having two ways of representing negative constants: $(-2)$ and $-(2)$
\end{itemize}
\item Widely used constants
\begin{itemize}
\item class \texttt{DataTree} has attributes containing pointers to one, zero, and minus one constants
\item these constants are used in many places (in simplification rules, in derivation algorithm...)
\item sub-expression sharing algorithm ensures that those constants will never be duplicated
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{List of statements}
\begin{itemize}
\item A statement is represented by an instance of a subclass of the abstract class \texttt{Statement}
\item Three groups of statements:
\begin{itemize}
\item initialization statements (parameter initialization with $p = \ldots$, \texttt{initval}, \texttt{histval} or \texttt{endval} block)
\item shocks blocks
\item computing tasks (\texttt{check}, \texttt{simul}, ...)
\end{itemize}
\item Each type of statement has its own class (\textit{e.g.} \texttt{InitValStatement}, \texttt{SimulStatement}, ...)
\item The class \texttt{ModFile} stores a list of pointers of type \texttt{Statement*}, corresponding to the statements of the \texttt{mod} file, in their order of declaration
\item Heavy use of polymorphism in the check pass, computing pass, and when writing outputs: abstract class \texttt{Statement} provides a virtual method for these 3 actions
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Evaluation context}
\begin{itemize}
\item The \texttt{ModFile} class contains an \alert{evaluation context}
\item It is a map associating a numerical value to some symbols
\item Filled in with \texttt{initval} block, and with parameters initializations
\item Used during equation normalization (in the block decomposition), for finding non-zero entries in the jacobian
\end{itemize}
\end{frame}
\section{Check pass}
\begin{frame}
\frametitle{Error checking during parsing}
\begin{itemize}
\item Some errors in the \texttt{mod} file can be detected during the parsing:
\begin{itemize}
\item syntax errors
\item use of undeclared symbol in model block, initval block...
\item use of a symbol incompatible with its type (\textit{e.g.} parameter in initval, local variable used both in model and outside model)
\item multiple shocks declaration for the same variable
\end{itemize}
\item But some other checks can only be done when parsing is completed
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Check pass}
\begin{itemize}
\item The check pass is implemented through method \texttt{ModFile::checkPass()}
\item Does the following checks:
\begin{itemize}
\item check there is at least one equation in the model (except if doing a standalone BVAR estimation)
\item check there is not both a \texttt{simul} and a \texttt{stoch\_simul} (or another command triggering local approximation)
\end{itemize}
\item Other checks could be added in the future, for example:
\begin{itemize}
\item check that every endogenous variable is used at least once in current period
\item check there is a single \texttt{initval} (or \texttt{histval}, \texttt{endval}) block
\item check that \texttt{varobs} is used if there is an estimation
\end{itemize}
\end{itemize}
\end{frame}
\section{Computing pass}
\begin{frame}
\frametitle{Overview of the computing pass}
\begin{itemize}
\item Computing pass implemented in \texttt{ModFile::computingPass()}
\item Begins with a determination of which derivatives to compute
\item Then, calls \texttt{ModelTree::computingPass()}, which computes:
\begin{itemize}
\item leag/lag variable incidence matrix
\item symbolic derivatives
\item equation normalization + block decomposition (only in \texttt{sparse\_dll} mode)
\item temporary terms
\item symbolic gaussian elimination (only in \texttt{sparse\_dll} mode) \textit{(actually this is done in the output writing pass, but should be moved to the computing pass)}
\end{itemize}
\item Finally, calls \texttt{Statement::computingPass()} on all statements
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The variable table}
\begin{itemize}
\item In the context of class \texttt{ModelTree}, a \alert{variable} is a pair (symbol, lead/lag)
\item The symbol must correspond to an endogenous or exogenous variable (in the sense of the model)
\item The class \texttt{VariableTable} keeps track of those pairs
\item An instance of \texttt{ModelTree} contains an instance of \texttt{VariableTable}
\item Each pair (\texttt{symbol\_id}, lead/lag) is given a unique \texttt{variable\_id}
\item After the computing pass, the class \texttt{VariableTable} writes the leag/lag incidence matrix:
\begin{itemize}
\item endogenous symbols in row
\item leads/lags in column
\item elements of the matrix are either 0 or correspond to a variable ID, depending on whether the pair (symbol, lead/lag) is used or not in the model
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Static versus dynamic model}
\begin{itemize}
\item The static model is simply the (dynamic) model from which the leads/lags have been omitted
\item Static model used to characterize the steady state
\item The jacobian of the static model is used in the (Matlab) solver for determining the steady state
\item No need to derive static and dynamic models independently: \\
static derivatives can be easily deduced from dynamic derivatives
\end{itemize}
\begin{block}{Example}
\begin{itemize}
\item suppose dynamic model is $2x \cdot x_{-1} = 0$
\item static model is $2x^2 = 0$, whose derivative w.r. to $x$ is $4x$
\item dynamic derivative w.r. to $x$ is $2x_{-1}$, and w.r. to $x_{-1}$ is $2x$
\item removing leads/lags from dynamic derivatives and summing over the two partial derivatives w.r. to $x$ and $x_{-1}$ gives $4x$
\end{itemize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Which derivatives to compute ?}
\begin{itemize}
\item In deterministic mode:
\begin{itemize}
\item static jacobian (w.r. to endogenous variables only)
\item dynamic jacobian (w.r. to endogenous variables only)
\end{itemize}
\item In stochastic mode:
\begin{itemize}
\item static jacobian (w.r. to endogenous variables only)
\item dynamic jacobian (w.r. to all variables)
\item possibly dynamic hessian (if \texttt{order} option $\geq 2$)
\item possibly dynamic 3rd derivatives (if \texttt{order} option $\geq 3$)
\end{itemize}
\item For ramsey policy: the same as above, but with one further order of derivation than declared by the user with \texttt{order} option (the derivation order is determined in the check pass, see \texttt{RamseyPolicyStatement::checkPass()})
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Derivation algorithm (1/2)}
\begin{itemize}
\item Derivation of the model implemented in \texttt{ModelTree::derive()}
\item Simply calls \texttt{ExprNode::getDerivative(varID)} on each equation node
\item Use of polymorphism:
\begin{itemize}
\item for a constant or variable node, derivative is straightforward (0 or 1)
\item for a unary or binary op node, recursively calls method \texttt{getDerivative()} on children to construct derivative of parent, using usual derivation rules, such as:
\begin{itemize}
\item $(log(e))' = \frac{e'}{e}$
\item $(e_1 + e_2)' = e'_1 + e'_2$
\item $(e_1 \cdot e_2)' = e'_1\cdot e_2 + e_1\cdot e'_2$
\item $\ldots$
\end{itemize}
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Derivation algorithm (2/2)}
\framesubtitle{Optimizations}
\begin{itemize}
\item Caching of derivation results
\begin{itemize}
\item method \texttt{ExprNode::getDerivative(varID)} memorizes its result in a member attribute the first time it is called
\item so that the second time it is called (with the same argument), simply returns the cached value without recomputation
\item caching is useful because of sub-expression sharing
\end{itemize}
\pause
\item Symbolic \textit{a priori}
\begin{itemize}
\item consider the expression $x+y^2$
\item without any computation, you know its derivative w.r. to $z$ is zero
\item each node stores in an attribute the set of variables which appear in the expression it represents ($\{x,y\}$ in the example)
\item that set is computed in the constructor (straigthforwardly for a variable or a constant, recursively for other nodes, using the sets of the children)
\item when \texttt{getDerivative(varID)} is called, immediately returns zero if \texttt{varID} is not in that set
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Temporary terms (1/2)}
\begin{itemize}
\item When the preprocessor writes equations and derivatives in its outputs, it takes advantage of sub-expression sharing
\item In Matlab static and dynamic output files, equations are preceded by a list of \alert{temporary terms}
\item Those terms are temporary variables containing expressions shared by several equations or derivatives
\item Doing so greatly enhances the computing speed of model residual, jacobian or hessian
\end{itemize}
\begin{block}{Example}
\begin{columns}[t]
\begin{column}{6cm}
The equations:
\begin{verbatim}
residual(0)=x+y^2-z^3;
residual(1)=3*(x+y^2)+1;
\end{verbatim}
\end{column}
\begin{column}{4.8cm}
Can be optimized in:
\begin{verbatim}
T01=x+y^2;
residual(0)=T01-z^3;
residual(1)=3*T01+1;
\end{verbatim}
\end{column}
\end{columns}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Temporary terms (2/2)}
\begin{itemize}
\item Expression storage in the preprocessor implements maximal sharing...
\item ...but it is not optimal for the Matlab output files, because creating a temporary variable also has a cost (in terms of CPU and of memory)
\item Computation of temporary terms implements a trade-off between:
\begin{itemize}
\item cost of duplicating sub-expressions
\item cost of creating new variables
\end{itemize}
\item Algorithm uses a recursive cost calculation, which marks some nodes as being ``temporary''
\item \textit{Problem}: redundant with optimizations done by the C/C++ compiler (when Dynare is in DLL mode) $\Rightarrow$ compilation very slow on big models
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The special case of Ramsey policy}
\begin{itemize}
\item For most statements, the method \texttt{computingPass()} is a no-op...
\item ...except for \texttt{planner\_objective} statement, which serves to declare planner objective when doing optimal policy under commitment
\item Class \texttt{PlannerObjectiveStatement} contains an instance of \texttt{ModelTree}: used to store the objective (only one equation in the tree)
\item During the computing pass, triggers the computation of the first and second order (static) derivatives of the objective
\end{itemize}
\end{frame}
\section{Writing outputs}
\begin{frame}
\frametitle{Output overview}
\begin{itemize}
\item Implemented in \texttt{ModFile::writeOutputFiles()}
\item If \texttt{mod} file is \texttt{model.mod}, all created filenames will begin with \texttt{model}
\item Main output file is \texttt{model.m}, containing:
\begin{itemize}
\item general initialization commands
\item symbol table output (from \texttt{SymbolTable::writeOutput()})
\item lead/lag incidence matrix (from \texttt{ModelTree::writeOutput()})
\item call to Matlab functions corresponding to the statements of the \texttt{mod} file (written by calling \texttt{Statement::writeOutput()} on all statements through polymorphism)
\end{itemize}
\item Subsidiary output files:
\begin{itemize}
\item one for the static model
\item one for the dynamic model
\item and one for the planner objective (if relevant)
\item written through \texttt{ModelTree} methods: \texttt{writeStaticFile()} and \texttt{writeDynamicFile()}
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Model output files}
Three possibles modes for \texttt{ModelTree} (see \texttt{mode} attribute):
\begin{itemize}
\item Standard mode: static and dynamic files in Matlab
\item DLL mode:
\begin{itemize}
\item static and dynamic files in C++ source code (with corresponding headers)
\item compiled through \texttt{mex} to allow execution from within Matlab
\end{itemize}
\item Sparse DLL mode:
\begin{itemize}
\item static file in Matlab
\item two possibilities for dynamic file:
\begin{itemize}
\item by default, a C++ source file (with header) and a binary file, to be read from the C++ code
\item or, with \texttt{no\_compiler} option, a binary file in custom format, executed from Matlab through \texttt{simulate} DLL
\item the second option serves to bypass compilation of C++ file which can be very slow
\end{itemize}
\end{itemize}
\end{itemize}
\end{frame}
\section{Conclusion}
\begin{frame}
\frametitle{Future work (1/2)}
\framesubtitle{Enhancements, optimizations}
\begin{itemize}
\item Refactor and reorganize some portions of the code
\item Create a testsuite (with unitary tests)
\item Separate computation of temporary terms between static and dynamic outputs
\item Enhance sub-expression sharing algorithm (using associativity, commutativity and factorization rules)
\item Add many checks on the structure of the \texttt{mod} file
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Future work (2/2)}
\framesubtitle{Features}
\begin{itemize}
\item Add precompiler macros (\#include, \#define, \#if)
\item Add handling for several (sub-)models
\item Add indexed variables and control statements (if, loops) both in models and command language
\item Add sum, diff, prod operators
\item For unknown functions in the model: let user provide a derivative, or trigger numerical derivation
\item Generalize binary code output
\item Generalize block decomposition ?
\end{itemize}
\end{frame}
\end{document}
\input amstex \documentclass[11pt,a4paper]{article}
\documentstyle{amsppt}
\def\vec{\mathop{\hbox{vec}}} \usepackage{amssymb}
\def\otimesi{\mathop{\overset{\ssize i}\to{\otimes}}} \usepackage{amsmath}
\def\iF{\,^i\!F} \usepackage{amsthm}
\def\imF{\,^{i-1}\!F}
\def\solvi{\bold{solv1}} \usepackage{fullpage}
\def\solvii{\bold{solv2}}
\def\solviip{\bold{solv2p}} \begin{document}
\title{Solution of Specialized Sylvester Equation}
\topmatter \author{Ondra Kamenik}
\title Solution of Specialized Sylvester Equation\endtitle \date{2004}
\author Ondra Kamenik\endauthor \maketitle
\email ondrej.kamenik@cnb.cz\endemail
\endtopmatter \renewcommand{\vec}{\mathop{\hbox{vec}}}
\newcommand{\otimesi}{\mathop{\overset{i}{\otimes}}}
\document \newcommand{\iF}{\,^i\!F}
\newcommand{\imF}{\,^{i-1}\!F}
\newcommand{\solvi}{\mathbf{solv1}}
\newcommand{\solvii}{\mathbf{solv2}}
\newcommand{\solviip}{\mathbf{solv2p}}
\newtheorem{lemma}{Lemma}
Given the following matrix equation Given the following matrix equation
$$AX+BX\left(\otimesi C\right)=D,$$ where $A$ is regular $n\times n$ $$AX+BX\left(\otimesi C\right)=D,$$ where $A$ is regular $n\times n$
matrix, $X$ is $n\times m^i$ matrix of unknowns, $B$ is singular matrix, $X$ is $n\times m^i$ matrix of unknowns, $B$ is singular
...@@ -36,173 +43,168 @@ and vectorized ...@@ -36,173 +43,168 @@ and vectorized
$$\left(I+\otimesi F^T\otimes K\right)\vec(Y)=\vec(\widehat{D})$$ $$\left(I+\otimesi F^T\otimes K\right)\vec(Y)=\vec(\widehat{D})$$
Let $\iF$ denote $\otimesi F^T$ for the rest of the text. Let $\iF$ denote $\otimesi F^T$ for the rest of the text.
\proclaim{Lemma 1} \section{Preliminary results}
\begin{lemma}
\label{sylv:first-lemma}
For any $n\times n$ matrix $A$ and $\beta_1\beta_2>0$, if there is For any $n\times n$ matrix $A$ and $\beta_1\beta_2>0$, if there is
exactly one solution of exactly one solution of
$$\left(I_2\otimes I_n + $$\left(I_2\otimes I_n +
\pmatrix\alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix \begin{pmatrix}\alpha&\beta_1\cr-\beta_2&\alpha\end{pmatrix}
\otimes A\right)\pmatrix x_1\cr x_2\endpmatrix = \otimes A\right)\begin{pmatrix} x_1\cr x_2\end{pmatrix} =
\pmatrix d_1\cr d_2\endpmatrix,$$ \begin{pmatrix} d_1\cr d_2\end{pmatrix},$$
then it can be obtained as solution of then it can be obtained as solution of
$$\align \begin{align*}
\left(I_n + 2\alpha A+(\alpha^2+\beta^2)A^2\right)x_1 & = \left(I_n + 2\alpha A+(\alpha^2+\beta^2)A^2\right)x_1 & =
\widehat{d_1}\\ \widehat{d_1}\\
\left(I_n + 2\alpha A+(\alpha^2+\beta^2)A^2\right)x_2 & = \left(I_n + 2\alpha A+(\alpha^2+\beta^2)A^2\right)x_2 & =
\widehat{d_2} \widehat{d_2}
\endalign$$ \end{align*}
where $\beta=\sqrt{\beta1\beta2}$, and where $\beta=\sqrt{\beta1\beta2}$, and
$$ $$
\pmatrix \widehat{d_1}\cr\widehat{d_2}\endpmatrix = \begin{pmatrix} \widehat{d_1}\cr\widehat{d_2}\end{pmatrix} =
\left(I_2\otimes I_n + \left(I_2\otimes I_n +
\pmatrix\alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix \begin{pmatrix}\alpha&-\beta_1\cr\beta_2&\alpha\end{pmatrix}
\otimes A\right)\pmatrix d_1\cr d_2\endpmatrix$$ \otimes A\right)\begin{pmatrix} d_1\cr d_2\end{pmatrix}$$
\endproclaim \end{lemma}
\demo{Proof} Since \begin{proof} Since
$$ $$
\pmatrix \alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix \begin{pmatrix} \alpha&\beta_1\cr-\beta_2&\alpha\end{pmatrix}
\pmatrix \alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix \begin{pmatrix} \alpha&-\beta_1\cr\beta_2&\alpha\end{pmatrix}
= =
\pmatrix \alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix \begin{pmatrix} \alpha&-\beta_1\cr\beta_2&\alpha\end{pmatrix}
\pmatrix \alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix \begin{pmatrix} \alpha&\beta_1\cr-\beta_2&\alpha\end{pmatrix}
= =
\pmatrix \alpha^2+\beta^2&0\cr 0&\alpha^2+\beta^2\endpmatrix, \begin{pmatrix} \alpha^2+\beta^2&0\cr 0&\alpha^2+\beta^2\end{pmatrix},
$$ $$
it is easy to see that if the equation is multiplied by it is easy to see that if the equation is multiplied by
$$I_2\otimes I_n + $$I_2\otimes I_n +
\pmatrix\alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix \begin{pmatrix}\alpha&-\beta_1\cr\beta_2&\alpha\end{pmatrix}
\otimes A$$ \otimes A$$
we obtain the result. We only need to prove that the matrix is we obtain the result. We only need to prove that the matrix is
regular. But this is clear because matrix regular. But this is clear because matrix
$$\pmatrix \alpha&-\beta_1\cr\beta_2&\alpha\endpmatrix$$ $$\begin{pmatrix} \alpha&-\beta_1\cr\beta_2&\alpha\end{pmatrix}$$
collapses an eigenvalue of $A$ to $-1$ iff the matrix collapses an eigenvalue of $A$ to $-1$ iff the matrix
$$\pmatrix \alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix$$ $$\begin{pmatrix} \alpha&\beta_1\cr-\beta_2&\alpha\end{pmatrix}$$
does.\qed does.
\enddemo \end{proof}
\proclaim{Lemma 2} \begin{lemma}
\label{sylv:second-lemma}
For any $n\times n$ matrix $A$ and $\delta_1\delta_2>0$, if there is For any $n\times n$ matrix $A$ and $\delta_1\delta_2>0$, if there is
exactly one solution of exactly one solution of
$$\left(I_2\otimes I_n + $$\left(I_2\otimes I_n +
2\alpha\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A 2\alpha\begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}\otimes A
+(\alpha^2+\beta^2) +(\alpha^2+\beta^2)
\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix^2\otimes A^2\right) \begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}^2\otimes A^2\right)
\pmatrix x_1\cr x_2\endpmatrix= \begin{pmatrix} x_1\cr x_2\end{pmatrix}=
\pmatrix d_1\cr d_2\endpmatrix \begin{pmatrix} d_1\cr d_2\end{pmatrix}
$$ $$
it can be obtained as it can be obtained as
$$ \begin{align*}
\align
\left(I_n+2a_1A+(a_1^2+b_1^2)A^2\right)\left(I_n+2a_2A+(a_2^2+b_2^2)A^2\right) \left(I_n+2a_1A+(a_1^2+b_1^2)A^2\right)\left(I_n+2a_2A+(a_2^2+b_2^2)A^2\right)
x_1 & =\widehat{d_1}\\ x_1 & =\widehat{d_1}\\
\left(I_n+2a_1A+(a_1^2+b_1^2)A^2\right)\left(I_n+2a_2A+(a_2^2+b_2^2)A^2\right) \left(I_n+2a_1A+(a_1^2+b_1^2)A^2\right)\left(I_n+2a_2A+(a_2^2+b_2^2)A^2\right)
x_2 & =\widehat{d_2} x_2 & =\widehat{d_2}
\endalign$$ \end{align*}
where where
$$ $$
\pmatrix \widehat{d_1}\cr\widehat{d_2}\endpmatrix = \begin{pmatrix} \widehat{d_1}\cr\widehat{d_2}\end{pmatrix} =
\left(I_2\otimes I_n + \left(I_2\otimes I_n +
2\alpha\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A 2\alpha\begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}\otimes A
+(\alpha^2+\beta^2) +(\alpha^2+\beta^2)
\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix^2\otimes A^2\right) \begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}^2\otimes A^2\right)
\pmatrix d_1\cr d_2\endpmatrix \begin{pmatrix} d_1\cr d_2\end{pmatrix}
$$ $$
and and
$$ \begin{align*}
\align
a_1 & = \alpha\gamma - \beta\delta\\ a_1 & = \alpha\gamma - \beta\delta\\
b_1 & = \alpha\delta + \gamma\beta\\ b_1 & = \alpha\delta + \gamma\beta\\
a_2 & = \alpha\gamma + \beta\delta\\ a_2 & = \alpha\gamma + \beta\delta\\
b_2 & = \alpha\delta - \gamma\beta\\ b_2 & = \alpha\delta - \gamma\beta\\
\delta & = \sqrt{\delta_1\delta_2} \delta & = \sqrt{\delta_1\delta_2}
\endalign$$ \end{align*}
\endproclaim
\demo{Proof} \end{lemma}
\begin{proof}
The matrix can be written as The matrix can be written as
$$\left(I_2\otimes I_n+(\alpha+\roman i\beta) $$\left(I_2\otimes I_n+(\alpha+\mathrm i\beta)
\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right) \begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}\otimes A\right)
\left(I_2\otimes I_n +(\alpha-\roman i\beta) \left(I_2\otimes I_n +(\alpha-\mathrm i\beta)
\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right). \begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}\otimes A\right).
$$ $$
Note that the both matrices are regular since their product is Note that the both matrices are regular since their product is
regular. For the same reason as in the previous proof, the following regular. For the same reason as in the previous proof, the following
matrix is also regular matrix is also regular
$$\left(I_2\otimes I_n+(\alpha+\roman i\beta) $$\left(I_2\otimes I_n+(\alpha+\mathrm i\beta)
\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right) \begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}\otimes A\right)
\left(I_2\otimes I_n +(\alpha-\roman i\beta) \left(I_2\otimes I_n +(\alpha-\mathrm i\beta)
\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right), \begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}\otimes A\right),
$$ $$
and we may multiply the equation by this matrix obtaining and we may multiply the equation by this matrix obtaining
$\widehat{d_1}$ and $\widehat{d_2}$. Note that the four matrices $\widehat{d_1}$ and $\widehat{d_2}$. Note that the four matrices
commute, that is why we can write the whole product as commute, that is why we can write the whole product as
$$ \begin{align*}
\align \left(I_2\otimes I_n + (\alpha+\mathrm i\beta)
\left(I_2\otimes I_n + (\alpha+\roman i\beta) \begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}\otimes A\right)\cdot
\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right)\cdot \left(I_2\otimes I_n + (\alpha+\mathrm i\beta)
\left(I_2\otimes I_n + (\alpha+\roman i\beta) \begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}\otimes A\right)&\cdot\\
\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right)&\cdot\\ \left(I_2\otimes I_n + (\alpha-\mathrm i\beta)
\left(I_2\otimes I_n + (\alpha-\roman i\beta) \begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}\otimes A\right)\cdot
\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix\otimes A\right)\cdot \left(I_2\otimes I_n + (\alpha-\mathrm i\beta)
\left(I_2\otimes I_n + (\alpha-\roman i\beta) \begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}\otimes A\right)&=\\
\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix\otimes A\right)&=\\ \left(I_2\otimes I_n + 2(\alpha + \mathrm i\beta)
\left(I_2\otimes I_n + 2(\alpha + \roman i\beta) \begin{pmatrix}\gamma&0\cr 0&\gamma\end{pmatrix}\otimes A +
\pmatrix\gamma&0\cr 0&\gamma\endpmatrix\otimes A + (\alpha + \mathrm i\beta)^2
(\alpha + \roman i\beta)^2 \begin{pmatrix}\gamma^2+\delta^2&0\cr 0&\gamma^2+\delta^2\end{pmatrix}\otimes A^2
\pmatrix\gamma^2+\delta^2&0\cr 0&\gamma^2+\delta^2\endpmatrix\otimes A^2
\right)&\cdot\\ \right)&\cdot\\
\left(I_2\otimes I_n + 2(\alpha - \roman i\beta) \left(I_2\otimes I_n + 2(\alpha - \mathrm i\beta)
\pmatrix\gamma&0\cr 0&\gamma\endpmatrix\otimes A + \begin{pmatrix}\gamma&0\cr 0&\gamma\end{pmatrix}\otimes A +
(\alpha - \roman i\beta)^2 (\alpha - \mathrm i\beta)^2
\pmatrix\gamma^2+\delta^2&0\cr 0&\gamma^2+\delta^2\endpmatrix\otimes A^2 \begin{pmatrix}\gamma^2+\delta^2&0\cr 0&\gamma^2+\delta^2\end{pmatrix}\otimes A^2
\right)& \right)&
\endalign \end{align*}
$$
The product is a diagonal consiting of two $n\times n$ blocks, which are the The product is a diagonal consiting of two $n\times n$ blocks, which are the
same. The block can be rewritten as product: same. The block can be rewritten as product:
$$ \begin{align*}
\align (I_n+(\alpha+\mathrm i\beta)(\gamma+\mathrm i\delta)A)\cdot
(I_n+(\alpha+\roman i\beta)(\gamma+\roman i\delta)A)\cdot (I_n+(\alpha+\mathrm i\beta)(\gamma-\mathrm i\delta)A)&\cdot\\
(I_n+(\alpha+\roman i\beta)(\gamma-\roman i\delta)A)&\cdot\\ (I_n+(\alpha-\mathrm i\beta)(\gamma+\mathrm i\delta)A)\cdot
(I_n+(\alpha-\roman i\beta)(\gamma+\roman i\delta)A)\cdot (I_n+(\alpha-\mathrm i\beta)(\gamma-\mathrm i\delta)A)&
(I_n+(\alpha-\roman i\beta)(\gamma-\roman i\delta)A)& \end{align*}
\endalign
$$
and after reordering and after reordering
$$ \begin{align*}
\align (I_n+(\alpha+\mathrm i\beta)(\gamma+\mathrm i\delta)A)\cdot
(I_n+(\alpha+\roman i\beta)(\gamma+\roman i\delta)A)\cdot (I_n+(\alpha-\mathrm i\beta)(\gamma-\mathrm i\delta)A)&\cdot\\
(I_n+(\alpha-\roman i\beta)(\gamma-\roman i\delta)A)&\cdot\\ (I_n+(\alpha+\mathrm i\beta)(\gamma-\mathrm i\delta)A)\cdot
(I_n+(\alpha+\roman i\beta)(\gamma-\roman i\delta)A)\cdot (I_n+(\alpha-\mathrm i\beta)(\gamma+\mathrm i\delta)A)&=\\
(I_n+(\alpha-\roman i\beta)(\gamma+\roman i\delta)A)&=\\
(I_n+2(\alpha\gamma-\beta\delta)A+ (I_n+2(\alpha\gamma-\beta\delta)A+
(\alpha^2+\beta^2)(\gamma^2+\delta^2)A^2)&\cdot\\ (\alpha^2+\beta^2)(\gamma^2+\delta^2)A^2)&\cdot\\
(I_n+2(\alpha\gamma+\beta\delta)A+ (I_n+2(\alpha\gamma+\beta\delta)A+
(\alpha^2+\beta^2)(\gamma^2+\delta^2)A^2)& (\alpha^2+\beta^2)(\gamma^2+\delta^2)A^2)&
\endalign \end{align*}
$$
Now it suffices to compare $a_1=\alpha\gamma-\beta\delta$ and verify Now it suffices to compare $a_1=\alpha\gamma-\beta\delta$ and verify
that that
$$ \begin{align*}
\align
b_1^2 & = (\alpha^2+\beta^2)(\gamma^2+\delta^2)-a_1^2 =\cr b_1^2 & = (\alpha^2+\beta^2)(\gamma^2+\delta^2)-a_1^2 =\cr
& = \alpha^2\gamma^2+\beta^2\gamma^2+\alpha^2\beta^2+\beta^2\delta^2- & = \alpha^2\gamma^2+\beta^2\gamma^2+\alpha^2\beta^2+\beta^2\delta^2-
(\alpha\gamma)^2+2\alpha\beta\gamma\delta-(\beta\delta)^2=\cr (\alpha\gamma)^2+2\alpha\beta\gamma\delta-(\beta\delta)^2=\cr
& = (\beta\gamma)^2 + (\alpha\beta)^2 + 2\alpha\beta\gamma\delta=\cr & = (\beta\gamma)^2 + (\alpha\beta)^2 + 2\alpha\beta\gamma\delta=\cr
& = (\beta\gamma +\alpha\beta)^2 & = (\beta\gamma +\alpha\beta)^2
\endalign \end{align*}
$$
For $b_2$ it is done in the same way. For $b_2$ it is done in the same way.
\qed \end{proof}
\enddemo
\head The Algorithm\endhead \section{The Algorithm}
Below we define three functions of which Below we define three functions of which
$\vec(Y)=\solvi(1,\vec(\widehat{D}),i)$ provides the solution $Y$. $X$ $\vec(Y)=\solvi(1,\vec(\widehat{D}),i)$ provides the solution $Y$. $X$
is then obtained as $X=U^TY\left(\otimesi V\right)$. is then obtained as $X=U^TY\left(\otimesi V\right)$.
\subhead Synopsis\endsubhead \subsection{Synopsis}
$F^T$ is $m\times m$ lower quasi-triangular matrix. Let $m_r$ be a $F^T$ is $m\times m$ lower quasi-triangular matrix. Let $m_r$ be a
number of real eigenvalues, $m_c$ number of complex pairs. Thus number of real eigenvalues, $m_c$ number of complex pairs. Thus
...@@ -215,7 +217,7 @@ vectors of appropriate dimensions, and $x_{\bar j}$ is $\bar j$-th ...@@ -215,7 +217,7 @@ vectors of appropriate dimensions, and $x_{\bar j}$ is $\bar j$-th
partition of $x$, and $x_j=(x_{\bar j}\quad x_{\bar j+1})^T$ if $j$-th partition of $x$, and $x_j=(x_{\bar j}\quad x_{\bar j+1})^T$ if $j$-th
eigenvalue is complex, and $x_j=x_{\bar j}$ if $j$-th eigenvalue is real. eigenvalue is complex, and $x_j=x_{\bar j}$ if $j$-th eigenvalue is real.
\subhead Function $\solvi$\endsubhead \subsection{Function $\solvi$}
The function $x=\solvi(r,d,i)$ solves equation The function $x=\solvi(r,d,i)$ solves equation
$$\left(I_{m^i}\otimes I_n+r\iF\otimes K\right)x = d.$$ $$\left(I_{m^i}\otimes I_n+r\iF\otimes K\right)x = d.$$
...@@ -226,7 +228,7 @@ quasi-triangular matrix, so this is easy. ...@@ -226,7 +228,7 @@ quasi-triangular matrix, so this is easy.
If $i>0$, then we go through diagonal blocks $F_j$ for If $i>0$, then we go through diagonal blocks $F_j$ for
$j=1,\ldots, m_r+m_c$ and perform: $j=1,\ldots, m_r+m_c$ and perform:
\roster \begin{enumerate}
\item if $F_j=(f_{\bar j\bar j}) = (f)$, then we return \item if $F_j=(f_{\bar j\bar j}) = (f)$, then we return
$x_j=\solvi(rf,d_{\bar j}, i-1)$. Then precalculate $y=d_j-x_j$, and $x_j=\solvi(rf,d_{\bar j}, i-1)$. Then precalculate $y=d_j-x_j$, and
eliminate guys below $F_j$. This is, for each $k=\bar j+1,\ldots, m$, we eliminate guys below $F_j$. This is, for each $k=\bar j+1,\ldots, m$, we
...@@ -234,46 +236,45 @@ put ...@@ -234,46 +236,45 @@ put
$$d_k = d_k-rf_{\bar jk}\left(\imF\otimes K\right)x_{\bar j}= $$d_k = d_k-rf_{\bar jk}\left(\imF\otimes K\right)x_{\bar j}=
d_k - \frac{f_{\bar jk}}{f}y$$ d_k - \frac{f_{\bar jk}}{f}y$$
\item if $F_j=\pmatrix\alpha&\beta_1\cr -\beta_2&\alpha\endpmatrix$, \item if $F_j=\begin{pmatrix}\alpha&\beta_1\cr -\beta_2&\alpha\end{pmatrix}$,
we return $x_j=\solvii(r\alpha, r\beta_1, r\beta_2, d_j, i-1)$. Then we return $x_j=\solvii(r\alpha, r\beta_1, r\beta_2, d_j, i-1)$. Then
we precalculate we precalculate
$$y=\left(\pmatrix\alpha&-\beta_1\cr \beta_2&\alpha\endpmatrix $$y=\left(\begin{pmatrix}\alpha&-\beta_1\cr \beta_2&\alpha\end{pmatrix}
\otimes I_{m^{i-1}}\otimes I_n\right) \otimes I_{m^{i-1}}\otimes I_n\right)
\pmatrix d_{\bar j} - x_{\bar j}\cr \begin{pmatrix} d_{\bar j} - x_{\bar j}\cr
d_{\bar j+1} - x_{\bar j+1} d_{\bar j+1} - x_{\bar j+1}
\endpmatrix$$ \end{pmatrix}$$
and eliminate guys below $F_j$. This is, for each $k=\bar j+2,\ldots, n$ and eliminate guys below $F_j$. This is, for each $k=\bar j+2,\ldots, n$
we put we put
$$ \begin{align*}
\align
d_k &= d_k - r(f_{{\bar j}k}\quad f_{{\bar j}+1 k}) d_k &= d_k - r(f_{{\bar j}k}\quad f_{{\bar j}+1 k})
\otimes\left(\imF\otimes K\right)x_j\\ \otimes\left(\imF\otimes K\right)x_j\\
&= d_k - \frac{1}{\alpha^2+\beta_1\beta_2} &= d_k - \frac{1}{\alpha^2+\beta_1\beta_2}
\left((f_{{\bar j}k}\quad f_{{\bar j}+1 k}) \left((f_{{\bar j}k}\quad f_{{\bar j}+1 k})
\otimes I_{m^{i-1}}\otimes I_n\right)y \otimes I_{m^{i-1}}\otimes I_n\right)y
\endalign \end{align*}
$$
\endroster \end{enumerate}
\subhead Function $\solvii$\endsubhead \subsection{Function $\solvii$}
The function $x=\solvii(\alpha, \beta_1, \beta_2, d, i)$ solves The function $x=\solvii(\alpha, \beta_1, \beta_2, d, i)$ solves
equation equation
$$ $$
\left(I_2\otimes I_{m^i}\otimes I_n + \left(I_2\otimes I_{m^i}\otimes I_n +
\pmatrix\alpha&\beta_1\cr-\beta_2&\alpha\endpmatrix \begin{pmatrix}\alpha&\beta_1\cr-\beta_2&\alpha\end{pmatrix}
\otimes\iF\otimes K\right)x=d \otimes\iF\otimes K\right)x=d
$$ $$
According to {\bf Lemma 1} the function returns According to Lemma \ref{sylv:first-lemma} the function returns
$$ $$
x=\pmatrix\solviip(\alpha,\beta_1\beta_2,\widehat{d_1},i)\cr x=\begin{pmatrix}\solviip(\alpha,\beta_1\beta_2,\widehat{d_1},i)\cr
\solviip(\alpha,\beta_1\beta_2,\widehat{d_2},i)\endpmatrix, \solviip(\alpha,\beta_1\beta_2,\widehat{d_2},i)\end{pmatrix},
$$ $$
where $\widehat{d_1}$, and $\widehat{d_2}$ are partitions of where $\widehat{d_1}$, and $\widehat{d_2}$ are partitions of
$\widehat{d}$ from the lemma. $\widehat{d}$ from the lemma.
\subhead Function $\solviip$\endsubhead \subsection{Function $\solviip$}
The function $x=\solviip(\alpha,\beta^2,d,i)$ solves equation The function $x=\solviip(\alpha,\beta^2,d,i)$ solves equation
$$ $$
...@@ -291,7 +292,7 @@ then it is lower triangular. ...@@ -291,7 +292,7 @@ then it is lower triangular.
If $i>0$, then we go through diagonal blocks $F_j$ for $j=1,\ldots, m_r+m_c$ If $i>0$, then we go through diagonal blocks $F_j$ for $j=1,\ldots, m_r+m_c$
and perform: and perform:
\roster \begin{enumerate}
\item if $F_j=(f_{\bar j\bar j})=(f)$ then $j$-th diagonal block of \item if $F_j=(f_{\bar j\bar j})=(f)$ then $j$-th diagonal block of
$$ $$
I_{m^i}\otimes I_n + 2\alpha\iF\otimes K + I_{m^i}\otimes I_n + 2\alpha\iF\otimes K +
...@@ -311,16 +312,14 @@ $$y_1 = 2\alpha f(\imF\otimes K)x_j = d_j-x_j-y_2.$$ ...@@ -311,16 +312,14 @@ $$y_1 = 2\alpha f(\imF\otimes K)x_j = d_j-x_j-y_2.$$
Let $g_{pq}$ denote element of $F^{2T}$ at position $(q,p)$. Let $g_{pq}$ denote element of $F^{2T}$ at position $(q,p)$.
The elimination is done by going through $k=\bar j+1,\ldots, m$ and The elimination is done by going through $k=\bar j+1,\ldots, m$ and
putting putting
$$ \begin{align*}
\align
d_k &= d_k - \left(2\alpha f_{\bar j k}\imF\otimes K + d_k &= d_k - \left(2\alpha f_{\bar j k}\imF\otimes K +
(\alpha^2+\beta^2)g_{\bar j k}\imF^2\otimes K^2\right)x_j\\ (\alpha^2+\beta^2)g_{\bar j k}\imF^2\otimes K^2\right)x_j\\
&= d_k - \frac{f_{\bar j k}}{f}y_1 - &= d_k - \frac{f_{\bar j k}}{f}y_1 -
\frac{g_{\bar j k}}{f^2}y_2 \frac{g_{\bar j k}}{f^2}y_2
\endalign \end{align*}
$$
\item if $F_j=\pmatrix\gamma&\delta_1\cr -\delta_2&\gamma\endpmatrix$, \item if $F_j=\begin{pmatrix}\gamma&\delta_1\cr -\delta_2&\gamma\end{pmatrix}$,
then $j$-th diagonal block of then $j$-th diagonal block of
$$ $$
I_{m^i}\otimes I_n + 2\alpha\iF\otimes K + I_{m^i}\otimes I_n + 2\alpha\iF\otimes K +
...@@ -329,26 +328,24 @@ $$ ...@@ -329,26 +328,24 @@ $$
takes the form takes the form
$$ $$
I_{m^{i-1}}\otimes I_n +2\alpha I_{m^{i-1}}\otimes I_n +2\alpha
\pmatrix\gamma&\delta_1\cr -\delta_2&\gamma\endpmatrix\imF\otimes K \begin{pmatrix}\gamma&\delta_1\cr -\delta_2&\gamma\end{pmatrix}\imF\otimes K
+(\alpha^2+\beta^2) +(\alpha^2+\beta^2)
\pmatrix\gamma&\delta_1\cr -\delta_2&\gamma\endpmatrix^2\imF^2\otimes K^2 \begin{pmatrix}\gamma&\delta_1\cr -\delta_2&\gamma\end{pmatrix}^2\imF^2\otimes K^2
$$ $$
According to {\bf Lemma 2}, we need to calculate According to Lemma \ref{sylv:second-lemma}, we need to calculate
$\widehat{d}_{\bar j}$, and $\widehat{d}_{\bar j+1}$, and $a_1$, $\widehat{d}_{\bar j}$, and $\widehat{d}_{\bar j+1}$, and $a_1$,
$b_1$, $a_2$, $b_2$. Then we obtain $b_1$, $a_2$, $b_2$. Then we obtain
$$ \begin{align*}
\align
x_{\bar j}&= x_{\bar j}&=
\solviip(a_1,b_1^2,\solviip(a_2,b_2^2,\widehat{d}_{\bar j},i-1),i-1)\\ \solviip(a_1,b_1^2,\solviip(a_2,b_2^2,\widehat{d}_{\bar j},i-1),i-1)\\
x_{\bar j+1}&= x_{\bar j+1}&=
\solviip(a_1,b_1^2,\solviip(a_2,b_2^2,\widehat{d}_{\bar j+1},i-1),i-1) \solviip(a_1,b_1^2,\solviip(a_2,b_2^2,\widehat{d}_{\bar j+1},i-1),i-1)
\endalign \end{align*}
$$
Now we need to eliminate guys below $F_j$. Since $\Vert F^2_j\Vert < Now we need to eliminate guys below $F_j$. Since $\Vert F^2_j\Vert <
\Vert F_j\Vert$, we precalculate \Vert F_j\Vert$, we precalculate
$$
\align \begin{align*}
y_2&=(\alpha^2+\beta^2)(\gamma^2+\delta^2) y_2&=(\alpha^2+\beta^2)(\gamma^2+\delta^2)
\left(I_2\otimes\imF^2\otimes K^2\right)x_j\\ \left(I_2\otimes\imF^2\otimes K^2\right)x_j\\
y_1&=2\alpha(\gamma^2+\delta^2)\left(I_2\otimes\imF\otimes y_1&=2\alpha(\gamma^2+\delta^2)\left(I_2\otimes\imF\otimes
...@@ -360,37 +357,36 @@ K\right)x_j\\ ...@@ -360,37 +357,36 @@ K\right)x_j\\
\left( \left(
F_j^2 F_j^2
\otimes I_{m^{i-1}n}\right)y_2\right)\\ \otimes I_{m^{i-1}n}\right)y_2\right)\\
&=\left(\pmatrix\gamma&-\delta_1\cr\delta_2&\gamma\endpmatrix &=\left(\begin{pmatrix}\gamma&-\delta_1\cr\delta_2&\gamma\end{pmatrix}
\otimes I_{m^{i-1}n}\right)(d_j-x_j) \otimes I_{m^{i-1}n}\right)(d_j-x_j)
-\left(\pmatrix\gamma&\delta_1\cr-\delta_2&\gamma\endpmatrix -\left(\begin{pmatrix}\gamma&\delta_1\cr-\delta_2&\gamma\end{pmatrix}
\otimes I_{m^{i-1}n}\right)y_2 \otimes I_{m^{i-1}n}\right)y_2
\endalign \end{align*}
$$
Then we go through all $k=\bar j+2,\ldots, m$. For clearer formulas, let Then we go through all $k=\bar j+2,\ldots, m$. For clearer formulas, let
$\bold f_k$ denote pair of $F^T$ elements in $k$-th line below $F_j$, $\mathbf f_k$ denote pair of $F^T$ elements in $k$-th line below $F_j$,
this is $\bold f_k=(f_{\bar jk}\quad f_{\bar j+1k})$. And let $\bold g_k$ this is $\mathbf f_k=(f_{\bar jk}\quad f_{\bar j+1k})$. And let $\mathbf g_k$
denote the same for $F^{2T}$, this is $\bold g_k=(g_{\bar jk}\quad denote the same for $F^{2T}$, this is $\mathbf g_k=(g_{\bar jk}\quad
g_{\bar j+1k})$. For each $k$ we put g_{\bar j+1k})$. For each $k$ we put
$$
\align \begin{align*}
d_k &= d_k - \left(2\alpha\bold f_k\otimes d_k &= d_k - \left(2\alpha\mathbf f_k\otimes
\imF\otimes K + \imF\otimes K +
(\alpha^2+\beta^2)\bold g_k\otimes (\alpha^2+\beta^2)\mathbf g_k\otimes
\imF^2\otimes K^2\right)x_j\\ \imF^2\otimes K^2\right)x_j\\
&= d_k - \frac{1}{\gamma^2+\delta^2} &= d_k - \frac{1}{\gamma^2+\delta^2}
\left(\bold f_k\otimes \left(\mathbf f_k\otimes
I_{m^{i-1}n}\right)y_1 I_{m^{i-1}n}\right)y_1
- \frac{1}{\gamma^2+\delta^2} - \frac{1}{\gamma^2+\delta^2}
\left(\bold g_k\otimes \left(\mathbf g_k\otimes
I_{m^{i-1}n}\right)y_2 I_{m^{i-1}n}\right)y_2
\endalign \end{align*}
$$
\endroster \end{enumerate}
\head Final Notes\endhead \section{Final Notes}
\subhead Numerical Issues of $A^{-1}B$\endsubhead \subsection{Numerical Issues of $A^{-1}B$}
We began the solution of the Sylvester equation with multiplication by We began the solution of the Sylvester equation with multiplication by
$A^{-1}$. This can introduce numerical errors, and we need more $A^{-1}$. This can introduce numerical errors, and we need more
...@@ -448,7 +444,7 @@ We try to solve $M\vec(X) = T_n(0) = 0$. Since $M$ is ...@@ -448,7 +444,7 @@ We try to solve $M\vec(X) = T_n(0) = 0$. Since $M$ is
skew symmetric, there is real orthonormal matrix $Q$, such that skew symmetric, there is real orthonormal matrix $Q$, such that
$M=Q\widehat{M}Q^T$, and $\widehat{M}$ is block diagonal matrix $M=Q\widehat{M}Q^T$, and $\widehat{M}$ is block diagonal matrix
consisting of $2\times 2$ blocks of the form consisting of $2\times 2$ blocks of the form
$$\left(\matrix 0&\alpha_i\cr-\alpha_i&0\endmatrix\right),$$ $$\left(\begin{matrix} 0&\alpha_i\cr-\alpha_i&0\end{matrix}\right),$$
and of additional zero, if $n^2$ is odd. and of additional zero, if $n^2$ is odd.
Now we solve equation $\widehat{M}y=0$, where $y=Q^T\vec(X)$. Now Now we solve equation $\widehat{M}y=0$, where $y=Q^T\vec(X)$. Now
...@@ -462,7 +458,7 @@ structural zeros, a solution is obtained by picking arbitrary numbers ...@@ -462,7 +458,7 @@ structural zeros, a solution is obtained by picking arbitrary numbers
for the same positions in $y$, and put $\vec(X)=Qy$. for the same positions in $y$, and put $\vec(X)=Qy$.
The following questions need to be answered: The following questions need to be answered:
\roster \begin{enumerate}
\item How to recognize the structural rows? \item How to recognize the structural rows?
\item Is $A^{-1}$ generated by a $y$, which has non-zero elements only \item Is $A^{-1}$ generated by a $y$, which has non-zero elements only
on structural rows? Note that $A$ can have repetitive eigenvalues. The on structural rows? Note that $A$ can have repetitive eigenvalues. The
...@@ -471,9 +467,9 @@ $y$ there is exactly one structural row. ...@@ -471,9 +467,9 @@ $y$ there is exactly one structural row.
\item And very difficult one: How to pick $y$ so that $X$ would be \item And very difficult one: How to pick $y$ so that $X$ would be
regular, or even close to orthonormal (pure orthonormality regular, or even close to orthonormal (pure orthonormality
overdeterminates $y$)? overdeterminates $y$)?
\endroster \end{enumerate}
\subhead Making Zeros in $F$\endsubhead \subsection{Making Zeros in $F$}
It is clear that the numerical complexity of the proposed algorithm It is clear that the numerical complexity of the proposed algorithm
strongly depends on a number of non-zero elements in the Schur factor strongly depends on a number of non-zero elements in the Schur factor
...@@ -482,10 +478,10 @@ substantially less zeros than $F$, then the computation would be ...@@ -482,10 +478,10 @@ substantially less zeros than $F$, then the computation would be
substantially faster. However, it seems that we have to pay price for substantially faster. However, it seems that we have to pay price for
any additional zero in terms of less numerical stability of $PFP^{-1}$ any additional zero in terms of less numerical stability of $PFP^{-1}$
multiplication. Consider $P$, and $F$ in form multiplication. Consider $P$, and $F$ in form
$$P=\pmatrix I &X\cr 0&I\endpmatrix, $$P=\begin{pmatrix} I &X\cr 0&I\end{pmatrix},
\qquad F=\pmatrix A&C\cr 0&B\endpmatrix$$ \qquad F=\begin{pmatrix} A&C\cr 0&B\end{pmatrix}$$
we obtain we obtain
$$PFP^{-1}=\pmatrix A& C + XB - AX\cr 0&B \endpmatrix$$ $$PFP^{-1}=\begin{pmatrix} A& C + XB - AX\cr 0&B \end{pmatrix}$$
Thus, we need to solve $C = AX - XB$. Its clear that numerical Thus, we need to solve $C = AX - XB$. Its clear that numerical
stability of operator $Y\mapsto PYP^{-1}$ and its inverse $Y\mapsto stability of operator $Y\mapsto PYP^{-1}$ and its inverse $Y\mapsto
...@@ -503,7 +499,7 @@ partitioning is not possible without breaking some user given limit ...@@ -503,7 +499,7 @@ partitioning is not possible without breaking some user given limit
for numerical errors. We have to keep in mind that the numerical for numerical errors. We have to keep in mind that the numerical
errors are accumulated in product of all $P$'s of every step. errors are accumulated in product of all $P$'s of every step.
\subhead Exploiting constant rows in $F$\endsubhead \subsection{Exploiting constant rows in $F$}
If some of $F$'s rows consists of the same numbers, or a number of If some of $F$'s rows consists of the same numbers, or a number of
distict values within a row is small, then this structure can be distict values within a row is small, then this structure can be
...@@ -529,13 +525,12 @@ in order to minimize $\Vert X\Vert$ if $A$, and $B$ are given. Now, in ...@@ -529,13 +525,12 @@ in order to minimize $\Vert X\Vert$ if $A$, and $B$ are given. Now, in
the next step we need to introduce zeros (or constant rows) to matrix the next step we need to introduce zeros (or constant rows) to matrix
$A$, so we seek for regular matrix $P$, doing the $A$, so we seek for regular matrix $P$, doing the
job. If found, the product looks like: job. If found, the product looks like:
$$\pmatrix P&0\cr 0&I\endpmatrix\pmatrix A&R\cr 0&B\endpmatrix $$\begin{pmatrix} P&0\cr 0&I\end{pmatrix}\begin{pmatrix} A&R\cr 0&B\end{pmatrix}
\pmatrix P^{-1}&0\cr 0&I\endpmatrix= \begin{pmatrix} P^{-1}&0\cr 0&I\end{pmatrix}=
\pmatrix PAP^{-1}&PR\cr 0&B\endpmatrix$$ \begin{pmatrix} PAP^{-1}&PR\cr 0&B\end{pmatrix}$$
Now note, that matrix $PR$ has also constant rows. Thus, Now note, that matrix $PR$ has also constant rows. Thus,
preconditioning of the matrix in upper left corner doesn't affect the preconditioning of the matrix in upper left corner doesn't affect the
property. However, a preconditioning of the matrix in lower right property. However, a preconditioning of the matrix in lower right
corner breaks the property, since we would obtain $RP^{-1}$. corner breaks the property, since we would obtain $RP^{-1}$.
\end{document}
\enddocument
\documentclass[11pt,a4paper]{article}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{fullpage}
\begin{document}
\author{Ondra Kamenik}
\title{Multidimensional Tensor Library \\ for perturbation methods applied to DSGE
models}
\date{2004}
\maketitle
\section{Introduction}
The design of the library was driven by the needs of perturbation
methods for solving Stochastic Dynamic General Equilibrium models. The
aim of the library is not to provide an exhaustive interface to
multidimensional linear algebra. The tensor library's main purposes
include:
\begin{itemize}
\item Define types for tensors, for a multidimensional index of a
tensor, and types for folded and unfolded tensors. The tensors defined
here have only one multidimensional index and one reserved
one-dimensional index. The tensors should allow modelling of higher
order derivatives with respect to a few vectors with different sizes
(for example $\left[g_{y^2u^3}\right]$). The tensors should allow
folded and unfolded storage modes and conversion between them. A
folded tensor stores symmetric elements only once, while an unfolded
stores data as a whole multidimensional cube.
\item Define both sparse and dense tensors. We need only one particular
type of sparse tensor. This in contrast to dense tensors, where we
need much wider family of types.
\item Implement the Faa Di Bruno multidimensional formula. So, the main
purpose of the library is to implement the following step of Faa Di Bruno:
$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_k}
=\left[h_{y^l}\right]_{\gamma_1\ldots\gamma_l}
\left(\sum_{c\in M_{l,k}}
\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}\right)$$
where $s$ can be a compound vector of variables, where $h_{y^l}$ and $g_{s^i}$
are tensors, $M_{l,k}$ is a set of
all equivalences of $k$ element set having $l$ classes, $c_m$ is
$m$-th class of equivalence $c$, $\vert c_m\vert$ is its
cardinality, and $c_m(\alpha)$ is a tuple of
picked indices from $\alpha$ by class $c_m$.
Note that the sparse tensors play a role of $h$ in the Faa Di Bruno, not
of $B$ nor $g$.
\end{itemize}
\section{Classes}
The following list is a road-map to various classes in the library.
\begin{description}
\item[Tensor]
Virtual base class for all dense tensors, defines \emph{index} as the
multidimensonal iterator
\item[FTensor]
Virtual base class for all folded tensors
\item[UTensor]
Virtual base class for all unfolded tensors
\item[FFSTensor]
Class representing folded full symmetry dense tensor,
for instance $\left[g_{y^3}\right]$
\item[FGSTensor]
Class representing folded general symmetry dense tensor,
for instance $\left[g_{y^2u^3}\right]$
\item[UFSTensor]
Class representing unfolded full symmetry dense tensor,
for instance $\left[g_{y^3}\right]$
\item[UGSTensor]
Class representing unfolded general symmetry dense tensor,
for instance $\left[g_{y^2u^3}\right]$
\item[URTensor]
Class representing unfolded/folded full symmetry, row-oriented,
dense tensor. Row-oriented tensors are used in the Faa Di Bruno
above as some part (few or one column) of a product of $g$'s. Their
fold/unfold conversions are special in such a way, that they must
yield equivalent results if multiplied with folded/unfolded
column-oriented counterparts.
\item[URSingleTensor]
Class representing unfolded/folded full symmetry, row-oriented,
single column, dense tensor. Besides use in the Faa Di Bruno, the
single column row oriented tensor models also higher moments of normal
distribution.
\item[UPSTensor]
Class representing unfolded, column-oriented tensor whose symmetry
is not that of the $\left[B_{y^2u^3}\right]$ but rather of something
as $\left[B_{yuuyu}\right]$. This tensor evolves during the product
operation for unfolded tensors and its basic operation is to add
itself to a tensor with nicer symmetry, here $\left[B_{y^2u^3}\right]$.
\item[FPSTensor]
Class representing partially folded, column-oriented tensor whose
symmetry is not that of the $\left[B_{y^3u^4}\right]$ but rather
something as $\left[B_{yu\vert y^3u\vert u^4}\right]$, where the
portions of symmetries represent folded dimensions which are combined
in unfolded manner. This tensor evolves during the Faa Di Bruno
for folded tensors and its basic operation is to add itself to a
tensor with nicer symmetry, here folded $\left[B_{y^3u^4}\right]$.
\item[USubTensor]
Class representing unfolded full symmetry, row-oriented tensor which
contains a few columns of huge product
$\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}$. This is
needed during the Faa Di Bruno for folded matrices.
\item[IrregTensor]
Class representing a product of columns of derivatives
$\left[z_{y^ku^l}\right]$, where $z=[y^T,v^T,w^T]^T$. Since the first
part of $z$ is $y$, the derivatives contain many zeros, which are not
stored, hence the tensor's irregularity. The tensor is used when
calculating one step of Faa Di Bruno formula, i.e.
$\left[h_{y^l}\right]_{\gamma_1\ldots\gamma_l}
\left(\sum_{c\in M_{l,k}}
\prod_{m=1}^l\left[g_{s^{\vert c_m\vert}}\right]^{\gamma_m}_{c_m(\alpha)}\right)$.
\item[FSSparseTensor]
Class representing full symmetry, column-oriented, sparse tensor. It
is able to store elements keyed by the multidimensional index, and
multiply itself with one column of row-oriented tensor.
\item[FGSContainer]
Container of \texttt{FGSTensor}s. It implements the Faa Di Bruno with
unfolded or folded tensor $h$ yielding folded $B$. The methods are
\texttt{FGSContainer::multAndAdd}.
\item[UGSContainer]
Container of \texttt{UGSTensor}s. It implements the Faa Di Bruno with
unfolded tensor $h$ yielding unfolded $B$. The method is
\texttt{UGSContainer::multAndAdd}.
\item[StackContainerInterface]
Virtual pure interface describing all logic
of stacked containers for which we will do the Faa Di Bruno operation.
\item[UnfoldedStackContainer]
Implements the Faa Di Bruno operation for stack of
containers of unfolded tensors.
\item[FoldedStackContainer]
Implements the Faa Di Bruno for stack of
containers of folded tensors.
\item[ZContainer]
The class implements the interface \texttt{StackContainerInterface} according
to $z$ appearing in context of DSGE models. By a simple inheritance,
we obtain \texttt{UnfoldedZContainer} and also \texttt{FoldedZContainer}.
\item[GContainer]
The class implements the interface \texttt{StackContainerInterface} according
to $G$ appearing in context of DSGE models. By a simple inheritance,
we obtain \texttt{UnfoldedGContainer} and also \texttt{FoldedGContainer}.
\item[Equivalence]
The class represents an equivalence on $n$-element set. Useful in the
Faa Di Bruno.
\item[EquivalenceSet]
The class representing all equivalences on $n$-element set. Useful in the
Faa Di Bruno.
\item[Symmetry]
The class defines a symmetry of general symmetry tensor. This is it
defines a basic shape of the tensor. For $\left[B_{y^2u^3}\right]$,
the symmetry is $y^2u^3$.
\item[Permutation]
The class represents a permutation of $n$ indices. Useful in the
Faa Di Bruno.
\item[IntSequence]
The class represents a sequence of integers. Useful everywhere.
\item[TwoDMatrix and ConstTwoDMatrix]
These classes provide an interface to a code handling two-dimensional
matrices. The code resides in Sylvester module, in directory {\tt
sylv/cc}. There is
no similar interface to \texttt{Vector} and \texttt{ConstVector} classes from the
Sylvester module and they are used directly.
\item[KronProdAll]
The class represents a Kronecker product of a sequence of arbitrary
matrices and is able to multiply a matrix from the right without
storing the Kronecker product in memory.
\item[KronProdAllOptim]
The same as \texttt{KronProdAll} but it optimizes the order of matrices in
the product to minimize the used memory during the Faa Di Bruno
operation. Note that it is close to optimal flops.
\item[FTensorPolynomial and UTensorPolynomial]
Abstractions representing a polynomial whose coefficients are
folded/unfolded tensors and variable is a column vector. The classes
provide methods for traditional and horner-like polynomial
evaluation. This is useful in simulation code.
\item[FNormalMoments and UNormalMoments]
These are containers for folded/unfolded single column tensors for
higher moments of normal distribution. The code contains an algorithm
for generating the moments for arbitrary covariance matrix.
\item[TLStatic]
The class encapsulates all static information needed for the
library. It includes precalculated equivalence sets.
\item[TLException]
Simple class thrown as an exception.
\end{description}
\section{Multi-threading}
The tensor library is multi-threaded. The basic property of the
thread implementation in the library is that we do not allow running
more concurrent threads than the preset limit. This prevents threads
from competing for memory in such a way that the OS constantly switches
among threads with frequent I/O for swaps. This may occur since one
thread might need much own memory. The threading support allows for
detached threads, the synchronization points during the Faa Di Bruno
operation are relatively short, so the resulting load is close to the
preset maximum number parallel threads.
\section{Tests}
A few words to the library's test suite. The suite resides in
directory {\tt tl/testing}. There is a file {\tt tests.cc} which
contains all tests and {\tt main()} function. Also there are files
{\tt factory.hh} and {\tt factory.cc} implementing random generation
of various objects. The important property of these random objects is
that they are the same for all object's invocations. This is very
important in testing and debugging. Further, one can find files {\tt
monoms.hh} and {\tt monoms.cc}. See below for their explanation.
There are a few types of tests:
\begin{itemize}
\item We test for tensor indices. We go through various tensors with
various symmetries, convert indices from folded to unfolded and
vice-versa. We test whether their coordinates are as expected.
\item We test the Faa Di Bruno by comparison of the results of
\texttt{FGSContainer::multAndAdd} against the results of \texttt{UGSContainer::multAndAdd}. The two
implementations are pretty different, so this is a good test.
\item We use a code in {\tt monoms.hh} and {\tt monoms.cc} to generate a
random vector function $f(x(y,u))$ along with derivatives of
$\left[f_x\right]$, $\left[x_{y^ku^l}\right]$, and
$\left[f_{y^ku^l}\right]$. Then we calculate the resulting derivatives
$\left[f_{y^ku^l}\right]$ using \texttt{multAndAdd} method of \texttt{UGSContainer}
or \texttt{FGSContainer} and compare the derivatives provided by {\tt
monoms}. The functions generated in {\tt monoms} are monomials with
integer exponents, so the implementation of {\tt monoms} is quite
easy.
\item We do a similar thing for sparse tensors. In this case the {\tt monoms}
generate a function $f(y,v(y,u),w(y,u))$, provide all the derivatives
and the result $\left[f_{y^ku^l}\right]$. Then we calculate the
derivatives with \texttt{multAndAdd} of \texttt{ZContainer} and compare.
\item We test the polynomial evaluation by evaluating a folded and
unfolded polynomial in traditional and Horner-like fashion. This gives
four methods in total. The four results are compared.
\end{itemize}
\end{document}
\ No newline at end of file
%% This BibTeX bibliography file was created using BibDesk.
%% http://bibdesk.sourceforge.net/
%% Created for Tommaso Mancini Griffoli at 2007-04-11 12:27:04 +0200
%% Saved with string encoding Western (ASCII)
@book{Zellner1971,
Address = {New York},
Author = {Zellner, Arnold},
Date-Added = {2007-04-09 15:21:11 +0200},
Date-Modified = {2007-04-09 15:25:37 +0200},
Publisher = {John Wiley \& Sons, Inc.},
Title = {An Introduction to Bayesian Inference in Econometrics},
Year = {1971}}
@article{ClaridaGaliGertler1999,
Author = {Richard Clarida and Jordi Gali and Mark Gertler},
Date-Added = {2007-04-07 12:26:52 +0200},
Date-Modified = {2007-04-07 12:30:46 +0200},
Journal = {Journal of Economic Literature},
Pages = {1661-1707},
Title = {The Science of Monetary Policy: A New Keynesian Perspective},
Volume = {XXXVII},
Year = {1999}}
@article{Geweke1999,
Author = {John Geweke},
Date-Added = {2007-02-27 11:20:59 +0100},
Date-Modified = {2007-02-27 11:22:10 +0100},
Journal = {Econometric Review},
Number = {1},
Pages = {1-126},
Title = {Using Simulation Methods for Bayesian Econometric Models: In- ference, Development and Communication},
Volume = {18},
Year = {1999}}
@book{DurbinKoopman2001,
Address = {Oxford, U.K.},
Author = {J. Durbin and S.J. Koopman},
Date-Added = {2007-02-25 22:51:56 +0100},
Date-Modified = {2007-02-25 22:55:08 +0100},
Publisher = {Oxford University Press},
Title = {Time Series Analysis by State Space Methods},
Year = {2001}}
@article{CogleyNason1994,
Author = {Nason, James M and Cogley, Timothy},
Date-Added = {2007-02-25 16:17:26 +0100},
Date-Modified = {2007-02-25 16:17:53 +0100},
Journal = {Journal of Applied Econometrics},
Month = {Suppl. De},
Number = {S},
Pages = {S37-70},
Title = {Testing the Implications of Long-Run Neutrality for Monetary Business Cycle Models},
Volume = {9},
Year = 1994}
@book{Hamilton1994,
Address = {Princeton, NJ},
Author = {James D. Hamilton},
Date-Added = {2007-02-25 11:16:40 +0100},
Date-Modified = {2007-02-25 11:18:17 +0100},
Publisher = {Princeton University Press},
Title = {Time Series Analysis},
Year = {1994}}
@techreport{LubikSchorfheide2005,
Author = {Thomas Lubik and Frank Schorfheide},
Date-Added = {2007-02-25 10:19:43 +0100},
Date-Modified = {2007-02-25 10:20:03 +0100},
Institution = {The Johns Hopkins University,Department of Economics},
Month = May,
Number = {521},
Title = {A Bayesian Look at New Open Economy Macroeconomics},
Type = {Economics Working Paper Archive},
Year = 2005}
@article{VillaverdeRubioRamirez2004,
Author = {Fernandez-Villaverde, Jesus and Rubio-Ramirez, Juan Francisco},
Date-Added = {2007-02-24 22:19:57 +0100},
Date-Modified = {2007-02-24 22:42:27 +0100},
Journal = {Journal of Econometrics},
Month = {November},
Number = {1},
Pages = {153-187},
Title = {Comparing dynamic equilibrium models to data: a Bayesian approach},
Volume = {123},
Year = 2004}
@article{RabanalRubioRamirez2005,
Author = {Rabanal, Pau and Rubio-Ramirez, Juan F.},
Date-Added = {2007-02-24 22:19:17 +0100},
Date-Modified = {2007-02-24 22:19:34 +0100},
Journal = {Journal of Monetary Economics},
Month = {September},
Number = {6},
Pages = {1151-1166},
Title = {Comparing New Keynesian models of the business cycle: A Bayesian approach},
Volume = {52},
Year = 2005}
@article{AnSchorfheide2006,
Author = {An, Sungbae and Schorfheide, Frank},
Date-Added = {2007-02-24 22:16:05 +0100},
Date-Modified = {2007-02-24 22:17:40 +0100},
Journal = {Econometric Review},
Title = {Bayesian Analysis of DSGE Models},
Volume = {Forthcoming},
Year = {2006}}
@techreport{LubikSchorfheide2003,
Author = {Thomas Lubik and Frank Schorfheide},
Date-Added = {2007-02-24 22:15:28 +0100},
Date-Modified = {2007-02-24 22:15:43 +0100},
Institution = {The Johns Hopkins University,Department of Economics},
Month = Nov,
Number = {505},
Title = {Do Central Banks Respond to Exchange Rate Movements? A Structural Investigation},
Type = {Economics Working Paper Archive},
Year = 2003}
@article{Schorfheide2000,
Author = {Frank Schorfheide},
Date-Added = {2007-02-24 22:00:46 +0100},
Date-Modified = {2007-02-24 22:01:00 +0100},
Journal = {Journal of Applied Econometrics},
Number = {6},
Pages = {645-670},
Title = {Loss function-based evaluation of DSGE models},
Volume = {15},
Year = 2000}
@article{Ireland2004,
Author = {Ireland, Peter N.},
Date-Added = {2007-02-24 21:58:47 +0100},
Date-Modified = {2007-02-24 21:59:01 +0100},
Journal = {Journal of Economic Dynamics and Control},
Month = {March},
Number = {6},
Pages = {1205-1226},
Title = {A method for taking models to the data},
Volume = {28},
Year = 2004}
@article{SmetsWouters2003,
Author = {Frank Smets and Raf Wouters},
Date-Added = {2007-02-24 21:57:12 +0100},
Date-Modified = {2007-02-24 21:57:32 +0100},
Journal = {Journal of the European Economic Association},
Month = {09},
Number = {5},
Pages = {1123-1175},
Title = {An Estimated Dynamic Stochastic General Equilibrium Model of the Euro Area},
Volume = {1},
Year = 2003}
@article{CollardJuillard2001b,
Author = {Collard, Fabrice and Juillard, Michel},
Date-Added = {2007-02-18 19:21:56 +0100},
Date-Modified = {2007-02-18 19:22:12 +0100},
Journal = {Journal of Economic Dynamics and Control},
Month = {June},
Number = {6-7},
Pages = {979-999},
Title = {Accuracy of stochastic perturbation methods: The case of asset pricing models},
Volume = {25},
Year = 2001}
@article{CollardJuillard2001a,
Author = {Collard, Fabrice and Juillard, Michel},
Date-Added = {2007-02-18 19:21:03 +0100},
Date-Modified = {2007-02-18 19:21:33 +0100},
Journal = {Computational Economics},
Month = {June},
Number = {2-3},
Pages = {125-39},
Title = {A Higher-Order Taylor Expansion Approach to Simulation of Stochastic Forward-Looking Models with an Application to a Nonlinear Phillips Curve Model},
Volume = {17},
Year = 2001}
@techreport{Juillard1996,
Author = {Juillard, Michel},
Date-Added = {2007-02-18 18:37:46 +0100},
Date-Modified = {2007-02-18 18:38:46 +0100},
Institution = {CEPREMAP},
Number = {9602},
Title = {Dynare : a program for the resolution and simulation of dynamic models with forward variables through the use of a relaxation algorithm},
Type = {CEPREMAP working papers},
Year = {1996}}
@unpublished{CollardJuillard2003,
Author = {Fabrice Collard and Michel Juillard},
Date-Added = {2007-02-17 13:03:34 +0100},
Date-Modified = {2007-02-18 19:45:10 +0100},
Note = {CEPREMAP mimeo},
Title = {Stochastic simulations with DYNARE. A practical guide.},
Year = {2003}}
@article{SchmittGrohe2004,
Author = {Schmitt-Grohe, Stephanie and Uribe, Martin},
Date-Added = {2007-02-17 11:51:08 +0100},
Date-Modified = {2007-02-17 12:20:24 +0100},
Journal = {Journal of Economic Dynamics and Control},
Month = {January},
Number = {4},
Pages = {755-775},
Title = {Solving dynamic general equilibrium models using a second-order approximation to the policy function},
Volume = {28},
Year = 2004}
@techreport{CollardJuillard1999,
Author = {Michel Juillard and Fabrice Collard},
Date-Added = {2007-02-17 11:48:00 +0100},
Date-Modified = {2007-02-17 13:07:56 +0100},
Institution = {Society for Computational Economics},
Month = Mar,
Number = {144},
Title = {Stochastic Simulations of a Non-Linear Phillips Curve Model},
Type = {Computing in Economics and Finance 1999},
Year = 1999}
????????
\ No newline at end of file
doc/userguide/Graphics/DynareFigures.key/QuickLook/Thumbnail.jpg

31.6 KiB

File deleted