Use Dynare (trustregion) nonlinear solver for computing the steady state.

 - Added trustregion solver (close to minpack, the main difference
   with minpack implementation is that the Jacobian is computed on
   each iteration).
 - Added unit test for trust region solver (rewrote minpack
   testsuite). All the examples are working.
 - Added unit test for the steady state numerical solver.
 - Updated the preprocessor.
parent 4900a789
......@@ -3,7 +3,7 @@ using BinaryProvider
const verbose = "--verbose" in ARGS
const prefix = Prefix(get([a for a in ARGS if a != "--verbose"], 1, joinpath(@__DIR__, "usr")))
PREPROCESSOR_VERSION = "46a2272ef929a7acbf276fea2c43089bfc7d0e34"
PREPROCESSOR_VERSION = "70ec52a6cce2ad116b0de68eec505a4092e50a82"
REMOTE_PATH = "https://dynare.adjemian.eu/preprocessor/$PREPROCESSOR_VERSION"
products = Product[
......@@ -11,11 +11,11 @@ products = Product[
]
download_info = Dict(
Linux(:i686, :glibc) => ("$REMOTE_PATH/linux/32/preprocessor.tar.gz", "3e847abfd259e3d21a28a315da322f3b326910e8f1d198efdc0bd200eaea00ec"),
Linux(:x86_64, :glibc) => ("$REMOTE_PATH/linux/64/preprocessor.tar.gz", "bd31db8a0b29f43ba63d8df7a96cde5349851bd72c562200dbfa9d37f5606219"),
MacOS() => ("$REMOTE_PATH/osx/64/preprocessor.tar.gz", "001fa0ab957623b1e5b01b8b7563b92f960adda79027f054d1f72df8ad60c30b"),
Windows(:i686) => ("$REMOTE_PATH/windows/32/preprocessor.tar.gz", "1be8d8fc8cc798dec811f36525ff5b5fd6bf78e84be659068df30d4cf737eaac"),
Windows(:x86_64) => ("$REMOTE_PATH/windows/64/preprocessor.tar.gz", "32485a1dde052e97b7844d731af1f1c0c47c5154f22e29b71789ffe9a1f25e8c"),
Linux(:i686, :glibc) => ("$REMOTE_PATH/linux/32/preprocessor.tar.gz", "dcfc2a15c35a6ee916cd0b20cec89a454082ad3aa512ea2dfd62ea2a22e58561"),
Linux(:x86_64, :glibc) => ("$REMOTE_PATH/linux/64/preprocessor.tar.gz", "1f9c619c2116542c0810ae2ba8d12525993d03a46d53322e49b3fad35f6c51e7"),
MacOS() => ("$REMOTE_PATH/osx/64/preprocessor.tar.gz", "4d082dbbee46810b7350d9594f1c1d848e37c871f2fa7456be60f2847682d3bf"),
Windows(:i686) => ("$REMOTE_PATH/windows/32/preprocessor.tar.gz", "e277c9dc9248ff57476612e42d7fd691ffd72cee1de4aea4926544906cc772e3"),
Windows(:x86_64) => ("$REMOTE_PATH/windows/64/preprocessor.tar.gz", "5a8cbe1f58664446c28e074b2e5b79aa23c64ccd84c7b0425d2003a29292af64"),
)
for p in products
......
module DynareSolvers
##
# Copyright (C) 2018 Dynare Team
#
# This file is part of Dynare.
#
# Dynare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dynare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dynare. If not, see <http://www.gnu.org/licenses/>.
##
export trustregion
"""
dogleg!(x::Vector{Float64}, r::Matrix{Float64}, b::Vector{Float64}, d::Vector{Float64}, δ::Float64)
Given an `n` by `n` matrix `r`, an `n` by 1 vector `d` with non zero entries, an `n` by `1`
vector `b`, and a positive number δ, the problem is to determine the convex combination `x`
of the gauss-newton and scaled gradient directions that minimizes (r*x - b) in the least
squares sense, subject to the restriction that the euclidean norm of d*x be at most delta.
"""
function dogleg!(x::Vector{Float64}, r::Matrix{Float64}, b::Vector{Float64}, d::Vector{Float64}, δ::Float64)
n = length(x)
@assert length(d)==n
@assert length(b)==n
@assert size(r,1)==n
@assert size(r,2)==n
# Compute the Gauss-Newton direction.
x .= r\b
# Compute norm of scaled x.
qnorm = zero(Float64)
@inbounds for i = 1:n
qnorm += (d[i]*x[i])^2
end
qnorm = sqrt(qnorm)
if qnorm<=δ
# Gauss-Newton direction is acceptable. There is nothing to do here.
else
# Gauss-Newton direction is not acceptable…
# Compute the scale gradient direction.
s = (r'*b)./d
# Compute the norm of the scaled gradient direction.
gnorm = norm(s)
if gnorm>0
# Normalize and rescale → gradient direction.
@inbounds for i = 1:n
s[i] = s[i]/(gnorm*d[i])
end
temp = norm(r*s)
sgnorm = gnorm/(temp*temp)
if sgnorm<=δ
# The scaled gradient direction is not acceptable…
# Compute the point along the dogleg at which the
# quadratic is minimized.
bnorm = norm(b)
temp1 = δ/qnorm
temp2 = sgnorm/δ
temp0 = bnorm*bnorm*temp2/(gnorm*qnorm)
temp0 -= temp1*temp2*temp2-sqrt((temp0-temp1)^2+(one(Float64)-temp1*temp1)*(one(Float64)-temp2*temp2))
α = temp1*(one(Float64)-temp2*temp2)/temp0
else
# The scaled gradient direction is acceptable.
α = zero(Float64)
end
else
# If the norm of the scaled gradient direction is zero.
α = δ/qnorm
sgnorm = .0
end
# Form the appropriate convex combination of the Gauss-Newton direction and the
# scaled gradient direction.
x .= α*x + (one(Float64)-α)*min(sgnorm, δ)*s
end
end
"""
trustregion(fj::Function, y0::Vector{Float64}, gstep::Float64, tolf::Float64, tolx::Float64, maxiter::Int)
Solves a system of nonlinear equations of several variables using a trust region method.
"""
function trustregion(f!::Function, j!::Function, x0::Vector{Float64}, tolx::Float64, tolf::Float64, maxiter::Int)
macheps = eps(Float64)
n, iter, info = length(x0), 1, 0
p1, p5, p001, p0001 = .1, .5, .001, .0001
t1, t2, t3, t4 = .1, .5, .001, 1.0e-4
fnorm, fnorm1 = one(Float64), one(Float64)
x = copy(x0)
xx = Vector{Float64}(n)
fval0 = Vector{Float64}(n) # residuals of the non linear equations
fval1 = Vector{Float64}(n) # residuals of the non linear equations (next)
fjac = Matrix{Float64}(n,n) # jacobian matrix of the non linear equations
fjaccnorm = Vector{Float64}(n) # norms of the columns of the Jacobian matrix
fjaccnorm__ = Vector{Float64}(n) # copy of fjaccnorm
w = Vector{Float64}(n)
p = Vector{Float64}(n)
# Initial evaluation of the residuals (and compute the norm of the residuals)
try
f!(fval0, x)
fnorm = norm(fval0)
catch
error("The system of nonlinear equations cannot be evaluated on the initial guess!")
end
# Initial evaluation of the Jacobian
try
j!(fjac, x)
catch
error("The Jacobian of the system of nonlinear equations cannot be evaluated on the initial guess!")
end
# Initialize counters
ncsucc = zero(Int)
nslow1 = zero(Int)
nslow2 = zero(Int)
# Newton iterations
while iter<=maxiter && info==0
# Compute columns norm for the Jacobian matrix.
@inbounds for i=1:n
fjaccnorm[i] = zero(Float64)
@inbounds for j = 1:n
fjaccnorm[i] += fjac[j,i]*fjac[j,i]
end
fjaccnorm[i] = sqrt(fjaccnorm[i])
end
if iter==1
# On the first iteration, calculate the norm of the scaled vector of unknwonws x
# and initialize the step bound δ. Scaling is done according to the norms of the
# columns of the initial jacobian.
@inbounds for i = 1:n
fjaccnorm__[i] = fjaccnorm[i]
end
fjaccnorm__[find(abs.(fjaccnorm__).<macheps)] = one(Float64)
xnorm = zero(Float64)
@inbounds for i=1:n
xnorm += (fjaccnorm__[i]*x[i])^2
end
xnorm = sqrt(xnorm)
δ = max(xnorm, one(Float64))
else
fjaccnorm__ = max.(.1*fjaccnorm__, fjaccnorm)
end
# Determine the direction p (with trust region model defined in dogleg routine).
dogleg!(p, fjac, fval0, fjaccnorm__, δ)
@inbounds for i=1:n
w[i] = fval0[i]
@inbounds for j = 1:n
w[i] -= fjac[i,j]*p[j]
end
end
# Compute the norm of p.
pnorm = zero(Float64)
@inbounds for i=1:n
pnorm += (fjaccnorm__[i]*p[i])^2
end
pnorm = sqrt(pnorm)
# On first iteration adjust the initial step bound.
if iter==1
δ = min(δ, pnorm)
end
# Evaluate the function at xx = x+p and calculate its norm.
@inbounds for i = 1:n
xx[i] = x[i]-p[i]
end
f!(fval1, xx)
fnorm1 = norm(fval1)
# Compute the scaled actual reduction.
if fnorm1<fnorm
actualreduction = one(Float64)-(fnorm1/fnorm)^2
else
actualreduction = -one(Float64)
end
# Compute the scaled predicted reduction and the ratio of the actual to the
# predicted reduction.
t = norm(w)
ratio = zero(Float64)
if t<fnorm
predictedreduction = one(Float64) - (t/fnorm)^2
ratio = actualreduction/predictedreduction
end
# Update the radius of the trust region if need be.
if ratio<p1
# Reduction is much smaller than predicted… Reduce the radius of the trust region.
ncsucc = 0
δ = p5*δ
else
ncsucc += 1
if ratio>=p5 || ncsucc>1
δ = max(δ,pnorm/p5)
end
if abs(ratio-one(Float64))<p1
δ = pnorm/p5
end
end
if ratio>=1.0e-4
# Succesfull iteration. Update x, xnorm, fval0, fnorm and fjac.
xnorm = zero(Float64)
@inbounds for i=1:n
x[i] = xx[i]
xnorm += (fjaccnorm__[i]*x[i])^2
fval0[i] = fval1[i]
end
xnorm = sqrt(xnorm)
fnorm = fnorm1
end
# Determine the progress of the iteration.
nslow1 += 1
if actualreduction>=p001
nslow1 = 0
end
# Test for convergence.
if δ<tolx*xnorm || fnorm<tolf
info = 1
continue
end
# Tests for termination and stringent tolerances.
if p1*max(p1*δ, pnorm)<=macheps*xnorm
# xtol is too small. no further improvement in
# the approximate solution x is possible.
info = 3
continue
end
if nslow1==15
# iteration is not making good progress, as
# measured by the improvement from the last
# fifteen iterations.
info = 4
continue
end
# Compute the jacobian for next the iteration.
j!(fjac, x)
iter += 1
end
if info==0 && iter>maxiter
info = 2
fill!(x, Inf)
end
return x, info
end
end
module SteadyState
##
# Copyright (C) 2016 Dynare Team
# Copyright (C) 2016-2018 Dynare Team
#
# This file is part of Dynare.
#
......@@ -19,17 +19,19 @@ module SteadyState
# along with Dynare. If not, see <http://www.gnu.org/licenses/>.
##
using NLsolve
using DynareSolvers
using AutoAligns
import DynareModel.Model
import DynareOutput.Output
export staticmodule
export steady, steady!
export steady_state, steady_state!
function steady(model::Model, oo::Output)
if model.analytical_steady_state || model.user_written_analytical_steady_state
steadystate = zeros(length(model.endo))
steadystate = zeros(model.endo_nbr)
model.steady_state(steadystate, oo.exo_steady_state, model.params)
return steadystate
else
......@@ -40,61 +42,109 @@ end
function steady!(model::Model, oo::Output)
if model.analytical_steady_state || model.user_written_analytical_steady_state
model.steady_state(oo.steady_state, oo.exo_steady_state, model.params)
return
else
error("You have to provide a closed form solution for the steady state, or declare a guess\nfor the steady state as a third input argument.")
end
end
function steady(model::Model, oo::Output, yinit::Vector{Float64})
f!(fval::Vector{Float64}, y::Vector{Float64}) = model.static(y, oo.exo_steady_state, model.params, fval)
j!(fjac::Matrix{Float64}, y::Vector{Float64}) = model.static(y, oo.exo_steady_state, model.params, fjac)
fj!(fval::Vector{Float64}, fjac::Matrix{Float64}, y::Vector{Float64}) = model.static(y, oo.exo_steady_state, model.params, fval, fjac)
r = nlsolve(f!, j!, fj!, yinit, show_trace=false)
if converged(r)
return r.zero
T = zeros(Float64, sum(model.temporaries.static[1:2]))
f!(fval::Vector{Float64}, y::Vector{Float64}) = model.static(T, fval, y , oo.exo_steady_state, model.params)
j!(fjac::Matrix{Float64}, y::Vector{Float64}) = model.static(T, fjac, y, oo.exo_steady_state, model.params)
ys, info = trustregion(f!, j!, yinit, 1.0e-6, 1.0e-6, 100)
return ys, info
end
function steady!(model::Model, oo::Output, yinit::Vector{Float64})
T = zeros(Float64, sum(model.temporaries.static[1:2]))
f!(fval::Vector{Float64}, y::Vector{Float64}) = model.static(T, fval, y , oo.exo_steady_state, model.params)
j!(fjac::Matrix{Float64}, y::Vector{Float64}) = model.static(T, fjac, y, oo.exo_steady_state, model.params)
oo.steady_state, info = trustregion(f!, j!, yinit, 1.0e-6, 1.0e-6, 100)
end
function steady_state(model::Model, oo::Output, yinit::Vector{Float64}, display::Bool=true)
ys, info = steady(model, oo, yinit)
if info==1
if display
display_steady_state(model, oo, ys)
end
else
return fill(NaN, length(yinit))
error("Steady state not found!")
end
return ys, info
end
function steady!(model::Model, oo::Output, yinit::Vector{Float64})
f!(fval::Vector{Float64}, y::Vector{Float64}) = model.static(y, oo.exo_steady_state, model.params, fval)
j!(fjac::Matrix{Float64}, y::Vector{Float64}) = model.static(y, oo.exo_steady_state, model.params, fjac)
fj!(fval::Vector{Float64}, fjac::Matrix{Float64}, y::Vector{Float64}) = model.static(y, oo.exo_steady_state, model.params, fval, fjac)
r = nlsolve(f!, j!, fj!, yinit, show_trace=false)
if converged(r)
oo.steady_state = r.zero
function steady_state!(model::Model, oo::Output, yinit::Vector{Float64}, display::Bool=true)
steady!(model, oo, yinit)
if issteadystate(model, oo, oo.steady_state)
if display
display_steady_state(model, oo)
end
else
oo.steady_state = fill(NaN, length(yinit))
error("Steady state not found!")
end
end
function steady_state(model::Model, oo::Output)
function steady_state(model::Model, oo::Output, display::Bool=true)
ys = steady(model, oo)
display_steady_state(model, oo, ys)
if !issteadystate(model, oo, ys)
error("Steady state provided in steady state block/file is not correct!")
end
if display
display_steady_state(model, oo, ys)
end
return ys, info
end
function steady_state!(model::Model, oo::Output)
function steady_state!(model::Model, oo::Output, display::Bool=true)
steady!(model, oo)
display_steady_state(model, oo, oo.steady_state)
if !issteadystate(model, oo, oo.steady_state)
error("Steady state provided in steady state block/file is not correct!")
end
if issteadystate(oo.steady_state)
if display
display_steady_state(model, oo)
end
else
error("Steady state not found!")
end
end
function display_steady_state(model::Model, oo::Output, ys::Vector{Float64})
println("\n\nSTEADY STATE:\n")
for i = 1:length(model.endo)
println(string(model.endo[i].name, " = ", ys[i]))
function display_steady_state(model::Model, oo::Output)
aa = AutoAlign(align = Dict(1 => left, 2=> left, :default => right))
println("\n Deterministic Steady State:\n")
for i = 1:model.endo_nbr
if abs(oo.steady_state[i])<1.0e-6
tmp = zero(Float64)
else
tmp = oo.steady_state[i]
end
print(aa, " ", model.endo[i].name, "\t=\t", @sprintf("%.6f", tmp))
println(aa)
end
print(STDOUT, aa)
end
function issteadystate(model::Model, oo::Output, ys::Vector{Float64})
residuals = zeros(Float64, length(ys))
compute_static_model_residuals!(model, oo, ys, residuals)
return maximum(abs(residuals))<1e-6
function display_steady_state(model::Model, oo::Output, ys::Vector{Float64})
aa = AutoAlign(align = Dict(1 => left, 2=> left, :default => right))
println("\n Deterministic Steady State:\n")
for i = 1:model.endo_nbr
if abs(ys[i])<1.0e-6
tmp = zero(Float64)
else
tmp = ys[i]
end
print(aa, " ", model.endo[i].name, "\t=\t", @sprintf("%.6f", tmp))
println(aa)
end
print(STDOUT, aa)
end
function compute_static_model_residuals!(model::Model, oo::Output, ys::Vector{Float64}, residuals::Vector{Float64})
model.static(ys, oo.exo_steady_state, model.params, residuals)
function issteadystate(model::Model, oo::Output, ys::Vector{Float64})
r = zeros(Float64, model.endo_nbr)
t = zeros(Float64, model.temporaries.static[1])
model.static(t, r, ys, oo.exo_steady_state, model.params)
return norm(r)<1e-6
end
end
This diff is collapsed.
#=
** This script performs tests for nonlinear solvers.
**
** Copyright (C) 2018 Dynare Team
**
** This file is part of Dynare.
**
** Dynare is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** Dynare is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with Dynare. If not, see <http://www.gnu.org/licenses/>.
=#
rootdir = @__DIR__
origdir = pwd()
if isempty(findin([abspath("$(rootdir)/../../src")], LOAD_PATH))
unshift!(LOAD_PATH, abspath("$(rootdir)/../../src"))
end
using Base.Test
using TestFunctions
cd("$(rootdir)")
@testset "test unit-test-routines" begin
@test begin
try
x = rosenbrock()
f = Vector{Float64}(2)
j = Matrix{Float64}(2,2)
rosenbrock!(f, x)
rosenbrock!(j, x)
true
catch
false
end
end
@test begin
try
x = powell1()
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
powell1!(f, x)
powell1!(j, x)
true
catch
false
end
end
@test begin
try
x = powell2()
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
powell2!(f, x)
powell2!(j, x)
true
catch
false
end
end
@test begin
try
x = wood()
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
wood!(f, x)
wood!(j, x)
true
catch
false
end
end
@test begin
try
x = helicalvalley()
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
helicalvalley!(f, x)
helicalvalley!(j, x)
true
catch
false
end
end
@test begin
try
x = watson(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
watson!(f, x)
watson!(j, x)
true
catch
false
end
end
@test begin
try
x = chebyquad(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
chebyquad!(f, x)
chebyquad!(j, x)
true
catch
false
end
end
@test begin
try
x = brown(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
brown!(f, x)
brown!(j, x)
true
catch
false
end
end
@test begin
try
x = discreteboundaryvalue(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
discreteboundaryvalue!(f, x)
discreteboundaryvalue!(j, x)
true
catch
false
end
end
@test begin
try
x = discreteintegralequation(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
discreteintegralequation!(f, x)
discreteintegralequation!(j, x)
true
catch
false
end
end
@test begin
try
x = trigonometric(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
trigonometric!(f, x)
trigonometric!(j, x)
true
catch
false
end
end
@test begin
try
x = variablydimensioned(10)
n = length(x)
f = Vector{Float64}(n)
j = Matrix{Float64}(n,n)
variablydimensioned!(f, x)
variablydimensioned!(j, x)