### Improved convergence by adding backtracking linesearch.

```If the nonlinear equations or the jacobian matrix cannot be evaluated,
tipically because the algorithm tries negative values for the unknowns
while they have to be positive, a naive backtracking linesearch is used.```
parent d1fac275
 ... ... @@ -30,7 +30,8 @@ const macheps = eps(Float64) type TrustRegionWA x::Vector{Float64} # Vector of unknowns xx::Vector{Float64} # Vector of unknowns fval0::Vector{Float64} # residuals of the non linear equations fval::Vector{Float64} # residuals of the non linear equations fval0::Vector{Float64} fval1::Vector{Float64} # residuals of the non linear equations (next) fjac::Matrix{Float64} # jacobian matrix of the non linear equations fjaccnorm::Vector{Float64} # norms of the columns of the Jacobian matrix ... ... @@ -41,7 +42,7 @@ type TrustRegionWA end function TrustRegionWA(n::Int) TrustRegionWA(Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Matrix{Float64}(n,n), TrustRegionWA(Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Matrix{Float64}(n,n), Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n), Vector{Float64}(n)) end ... ... @@ -162,12 +163,13 @@ which holds various working arrays used in the function (avoiding array instanti """ function trustregion(f!::Function, j!::Function, x0::Vector{Float64}, factor::Float64, tolx::Float64, tolf::Float64, maxiter::Int, wa::TrustRegionWA) n, iter, info = length(x0), 1, 0 fnorm, fnorm1 = one(Float64), one(Float64) xnorm, xnorm0 = one(Float64), one(Float64) fnorm, fnorm1, fnorm0 = one(Float64), one(Float64), one(Float64) wa.x .= copy(x0) # Initial evaluation of the residuals (and compute the norm of the residuals) try f!(wa.fval0, wa.x) fnorm = norm(wa.fval0) f!(wa.fval, wa.x) fnorm = norm(wa.fval) catch error("The system of nonlinear equations cannot be evaluated on the initial guess!") end ... ... @@ -180,7 +182,6 @@ function trustregion(f!::Function, j!::Function, x0::Vector{Float64}, factor::Fl # Initialize counters ncsucc = zero(Int) nslow1 = zero(Int) nslow2 = zero(Int) # Newton iterations while iter<=maxiter && info==0 # Compute columns norm for the Jacobian matrix. ... ... @@ -212,13 +213,7 @@ function trustregion(f!::Function, j!::Function, x0::Vector{Float64}, factor::Fl wa.fjaccnorm__ .= max.(.1*wa.fjaccnorm__, wa.fjaccnorm) end # Determine the direction p (with trust region model defined in dogleg routine). dogleg!(wa.p, wa.fjac, wa.fval0, wa.fjaccnorm__, δ, wa.s) @inbounds for i=1:n wa.w[i] = wa.fval0[i] @inbounds for j = 1:n wa.w[i] -= wa.fjac[i,j]*wa.p[j] end end dogleg!(wa.p, wa.fjac, wa.fval, wa.fjaccnorm__, δ, wa.s) # Compute the norm of p. pnorm = zero(Float64) @inbounds for i=1:n ... ... @@ -229,82 +224,133 @@ function trustregion(f!::Function, j!::Function, x0::Vector{Float64}, factor::Fl if iter==1 δ = min(δ, pnorm) end # Evaluate the function at xx = x+p and calculate its norm. @inbounds for i = 1:n wa.xx[i] = wa.x[i]-wa.p[i] end f!(wa.fval1, wa.xx) fnorm1 = norm(wa.fval1) # Compute the scaled actual reduction. if fnorm1=p5 || ncsucc>1 δ = max(δ,pnorm/p5) fwrong, jwrong, scale = true, true, one(Float64) while (fwrong || jwrong) && scale>.0005 # Move along the direction p. Set a candidate value for x and predicted improvement for f. @inbounds for i=1:n wa.xx[i] = wa.x[i]-scale*wa.p[i] wa.w[i] = wa.fval[i] @inbounds for j = 1:n wa.w[i] -= scale*wa.fjac[i,j]*wa.p[j] end end if abs(ratio-one(Float64))=1.0e-4 # Succesfull iteration. Update x, xnorm, fval0, fnorm and fjac. xnorm = zero(Float64) fnorm1 = norm(wa.fval1) # Compute the scaled actual reduction. if fnorm1=p5 || ncsucc>1 δ = max(δ,pnorm/p5) end if abs(ratio-one(Float64))=1.0e-4 # Succesfull iteration. Update x, xnorm, fval, fnorm and fjac. xnorm = zero(Float64) @inbounds for i=1:n # Update of x wa.x[i] = wa.xx[i] xnorm += (wa.fjaccnorm__[i]*wa.x[i])^2 # Update fval wa.fval[i] = wa.fval1[i] end xnorm = sqrt(xnorm) fnorm = fnorm1 end # Determine the progress of the iteration. nslow0 = nslow1 nslow1 += 1 if actualreduction>=p001 nslow1 = 0 end # Test for convergence. if δ=p001 nslow1 = 0 end # Test for convergence. if δmaxiter info = 2 fill!(wa.x, Inf) return info end return info end ... ...
 ... ... @@ -95,7 +95,7 @@ steadystate = [1.080683, 0.803593, 11.083609, 0.000000, 0.291756, 0.000000] end @test begin try yinit = [20.0, .1, 30.0, 0.0, .3, 0.1] yinit = [20.0, .1, 3.0, 0.0, .3, 0.1] steady!(model, oo, yinit) all((oo.steady_state-steadystate).<1.0e-6) catch ... ...
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!