diff --git a/matlab/dynare_estimation.m b/matlab/dynare_estimation.m
index ba49a1c1805e1a82c0047174d132fe58bad8996b..a70a760bda4bb3278cc1c5f9a0e5ecf4cc43d914 100644
--- a/matlab/dynare_estimation.m
+++ b/matlab/dynare_estimation.m
@@ -91,7 +91,7 @@ else
     dynare_estimation_1(var_list,dname);
 end
 
-if options_.mode_compute && options_.analytic_derivation,
+if isnumeric(options_.mode_compute) && options_.mode_compute && options_.analytic_derivation,
     options_.analytic_derivation=analytic_derivation0;
 end
 
diff --git a/matlab/dynare_estimation_1.m b/matlab/dynare_estimation_1.m
index 5fdd918952b8e065b84e0ff710ae598e5ba0d897..eba52ea289a107368114de7406c269713decbc4c 100644
--- a/matlab/dynare_estimation_1.m
+++ b/matlab/dynare_estimation_1.m
@@ -261,14 +261,14 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
     [xparam1, fval, exitflag, hh, options_, Scale] = dynare_minimize_objective(objective_function,xparam1,options_.mode_compute,options_,[bounds.lb bounds.ub],bayestopt_.name,bayestopt_,hh,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
     fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
 
-    if options_.mode_compute==5 && options_.analytic_derivation==-1 %reset options changed by newrat
+    if isnumeric(options_.mode_compute) && options_.mode_compute==5 && options_.analytic_derivation==-1 %reset options changed by newrat
             options_.analytic_derivation = options_analytic_derivation_old; %reset      
-    elseif options_.mode_compute==6 %save scaling factor
+    elseif isnumeric(options_.mode_compute) && options_.mode_compute==6 %save scaling factor
         save([M_.fname '_optimal_mh_scale_parameter.mat'],'Scale');
         options_.mh_jscale = Scale;
         bayestopt_.jscale = ones(length(xparam1),1)*Scale;
     end       
-    if ~isequal(options_.mode_compute,6) %always already computes covariance matrix
+    if ~isnumeric(options_.mode_compute) || ~isequal(options_.mode_compute,6) %always already computes covariance matrix
         if options_.cova_compute == 1 %user did not request covariance not to be computed
             if options_.analytic_derivation && strcmp(func2str(objective_function),'dsge_likelihood'),
                 ana_deriv_old = options_.analytic_derivation;
@@ -276,12 +276,12 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
                 [junk1, junk2, hh] = feval(objective_function,xparam1, ...
                     dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
                 options_.analytic_derivation = ana_deriv_old;
-            elseif ~(isequal(options_.mode_compute,5) && newratflag~=1), 
+            elseif ~isnumeric(options_.mode_compute) || ~(isequal(options_.mode_compute,5) && newratflag~=1), 
                 % with flag==0, we force to use the hessian from outer
                 % product gradient of optimizer 5
                 hh = reshape(hessian(objective_function,xparam1, ...
                     options_.gstep,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_),nx,nx);
-            elseif isequal(options_.mode_compute,5)
+            elseif isnumeric(options_.mode_compute) && isequal(options_.mode_compute,5)
                 % other numerical hessian options available with optimizer 5
                 %
                 % if newratflag == 0
diff --git a/matlab/global_initialization.m b/matlab/global_initialization.m
index 93deba482bfb863006b1beb50418e16f0c05bf85..4ec37b459cf3c9a6bbcb4ab0f66aecc9eab9c497 100644
--- a/matlab/global_initialization.m
+++ b/matlab/global_initialization.m
@@ -525,6 +525,31 @@ simpsa.MAX_FUN_EVALS = 20000;
 simpsa.DISPLAY = 'iter';
 options_.simpsa = simpsa;
 
+%solveopt optimizer
+solveopt.minimizer_indicator=-1; %use minimizer
+solveopt.TolX=1e-6; %accuracy of argument
+solveopt.TolFun=1e-6; %accuracy of function 
+solveopt.MaxIter=15000;
+solveopt.verbosity=1;
+solveopt.TolXConstraint=1.e-8;
+solveopt.SpaceDilation=2.5;
+solveopt.LBGradientStep=1.e-11;
+options_.solveopt=solveopt;
+
+%simulated annealing
+options_.saopt.neps=10;
+options_.saopt.maximizer_indicator=0;
+options_.saopt.rt=0.10;
+options_.saopt.MaxIter=100000;
+options_.saopt.MaxIter=100000;
+options_.saopt.verbosity=1;
+options_.saopt.TolFun=1.0e-8;
+options_.saopt.initial_temperature=15;
+options_.saopt.ns=10;
+options_.saopt.nt=10;
+options_.saopt.step_length_c=0.1;
+options_.saopt.initial_step_length=1;
+
 % prior analysis
 options_.prior_mc = 20000;
 options_.prior_analysis_endo_var_list = [];
diff --git a/matlab/optimization/apprgrdn.m b/matlab/optimization/apprgrdn.m
new file mode 100644
index 0000000000000000000000000000000000000000..9f16e44667b2c415968c541884bc1031642a04ff
--- /dev/null
+++ b/matlab/optimization/apprgrdn.m
@@ -0,0 +1,73 @@
+function g = apprgrdn(x,f,fun,deltax,obj,varargin)
+% g = apprgrdn(x,f,fun,deltax,obj,varargin)
+% Performs the finite difference approximation of the gradient <g> at a
+% point <x> used in solveopt
+% 
+% Inputs:
+% x:        point at which to evaluate gradient
+% f:        calculated function value at a point x;
+% fun:      Name of the Matlab function calculating the function values
+% deltax:   vector of the relative stepsizes,
+% obj       flag indicating whether the gradient of the objective
+%           function (1) or the constraint function (0) is to be calculated.
+%
+% Modified by Giovanni Lombardo and Johannes Pfeifer to accomodate Dynare
+% structure
+%
+% 
+% Copyright (C) 1997-2008, Alexei Kuntsevich and Franz Kappel 
+% Copyright (C) 2008-2015 Giovanni Lombardo
+% Copyright (C) 2015 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+n=max(size(x)); ee=ones(size(x));
+di=abs(x); idx=find(di<5e-15); di(idx)=5e-15*ee(idx);
+di=deltax.*di;
+if obj
+    idx=find(abs(di)<2e-10); 
+    di(idx)=2e-10*sign(di(idx));
+else
+    idx=find(abs(di)<5e-15); 
+    di(idx)=5e-15*sign(di(idx));
+end
+y=x;
+
+g=NaN(n,1);
+for i=1:n
+    y(i)=x(i)+di(i);
+    fi=feval(fun,y,varargin{:});
+    if obj
+        if fi==f,
+            for j=1:3
+                di(i)=di(i)*10;  y(i)=x(i)+di(i);
+                fi=feval(fun,y,varargin{:});
+                if fi~=f
+                    break
+                end
+            end
+        end
+    end
+    g(i)=(fi-f)/di(i);
+    if obj
+        if ~isempty(idx) && any(idx==i)
+            y(i)=x(i)-di(i);
+            fi=feval(fun,y,varargin{:});
+            g(i)=.5*(g(i)+(f-fi)/di(i));
+        end
+    end
+    y(i)=x(i);
+end
diff --git a/matlab/optimization/dynare_minimize_objective.m b/matlab/optimization/dynare_minimize_objective.m
index 4113bf119997c64623493255e48f9f80557b7aa1..c0259880b3d353d790c68f356c0dc4a9eac35c62 100644
--- a/matlab/optimization/dynare_minimize_objective.m
+++ b/matlab/optimization/dynare_minimize_objective.m
@@ -46,14 +46,14 @@ function [opt_par_values,fval,exitflag,hessian_mat,options_,Scale]=dynare_minimi
 %% set bounds and parameter names if not already set
 n_params=size(start_par_value,1);
 if isempty(bounds)
-    if minimizer_algorithm==10
+    if isnumeric(minimizer_algorithm) && minimizer_algorithm==10
         error('Algorithm 10 (simpsa) requires upper and lower bounds')
     else
         bounds=[-Inf(n_params,1) Inf(n_params,1)];
     end
 end
 
-if minimizer_algorithm==10 && any(any(isinf(bounds)))
+if isnumeric(minimizer_algorithm) && minimizer_algorithm==10 && any(any(isinf(bounds)))
     error('Algorithm 10 (simpsa) requires finite upper and lower bounds')
 end
 
@@ -87,7 +87,54 @@ switch minimizer_algorithm
     [opt_par_values,fval,exitflag,output,lamdba,grad,hessian_mat] = ...
         fmincon(objective_function,start_par_value,[],[],[],[],bounds(:,1),bounds(:,2),[],optim_options,varargin{:});
   case 2
-    error('Optimization algorithm 1 option (Lester Ingber''s Adaptive Simulated Annealing) is no longer available')
+    %simulating annealing
+    sa_options = options_.saopt;
+    if ~isempty(options_.optim_opt)
+        options_list = read_key_value_string(options_.optim_opt);
+        for i=1:rows(options_list)
+            switch options_list{i,1}
+              case 'neps'
+                sa_options.neps = options_list{i,2};
+              case 'rt'
+                sa_options.rt = options_list{i,2};
+              case 'MaxIter'
+                sa_options.MaxIter = options_list{i,2};
+              case 'TolFun'
+                sa_options.TolFun = options_list{i,2};
+              case 'verbosity'
+                sa_options.verbosity = options_list{i,2};
+              case 'initial_temperature'
+                sa_options.initial_temperature = options_list{i,2};
+              case 'ns'
+                sa_options.ns = options_list{i,2};
+              case 'nt'
+                sa_options.nt = options_list{i,2};
+              case 'step_length_c'
+                sa_options.step_length_c = options_list{i,2};
+              case 'initial_step_length'
+                sa_options.initial_step_length = options_list{i,2};
+              otherwise
+                warning(['solveopt: Unknown option (' options_list{i,1} ')!'])
+            end
+        end
+    end
+    npar=length(start_par_value);
+    LB=bounds(:,1);
+    LB(isinf(LB))=-1e6;
+    UB=bounds(:,2);
+    UB(isinf(UB))=1e6;
+    fprintf('\nNumber of parameters= %d, initial temperatur= %4.3f \n', npar,sa_options.initial_temperature);
+    fprintf('rt=  %4.3f; TolFun=  %4.3f; ns=  %4.3f;\n',sa_options.rt,sa_options.TolFun,sa_options.ns);
+    fprintf('nt=  %4.3f; neps=  %4.3f; MaxIter=  %d\n',sa_options.nt,sa_options.neps,sa_options.MaxIter);
+    fprintf('Initial step length(vm): %4.3f; step_length_c: %4.3f\n', sa_options.initial_step_length,sa_options.step_length_c);
+    fprintf('%-20s  %-6s    %-6s    %-6s\n','Name:', 'LB;','Start;','UB;');
+    for pariter=1:npar
+        fprintf('%-20s  %6.4f;   %6.4f;  %6.4f;\n',parameter_names{pariter}, LB(pariter),start_par_value(pariter),UB(pariter));
+    end
+    sa_options.initial_step_length= sa_options.initial_step_length*ones(npar,1); %bring step length to correct vector size
+    sa_options.step_length_c= sa_options.step_length_c*ones(npar,1); %bring step_length_c to correct vector size
+    [opt_par_values, fval,exitflag, n_accepted_draws, n_total_draws, n_out_of_bounds_draws, t, vm] =...
+        simulated_annealing(objective_function,start_par_value,sa_options,LB,UB,varargin{:});
   case 3
     if isoctave && ~user_has_octave_forge_package('optim')
         error('Optimization algorithm 3 requires the optim package')
@@ -278,45 +325,42 @@ switch minimizer_algorithm
      options_.cova_compute = 0 ;
      [opt_par_values,stdh,lb_95,ub_95,med_param] = online_auxiliary_filter(start_par_value,varargin{:}) ;
   case 101
-    myoptions=soptions;
-    myoptions(2)=1e-6; %accuracy of argument
-    myoptions(3)=1e-6; %accuracy of function (see Solvopt p.29)
-    myoptions(5)= 1.0;
-    [opt_par_values,fval]=solvopt(start_par_value,objective_function,[],myoptions,varargin{:});
+    solveoptoptions = options_.solveopt;
+    if ~isempty(options_.optim_opt)
+        options_list = read_key_value_string(options_.optim_opt);
+        for i=1:rows(options_list)
+            switch options_list{i,1}
+              case 'TolX'
+                solveoptoptions.TolX = options_list{i,2};
+              case 'TolFun'
+                solveoptoptions.TolFun = options_list{i,2};
+              case 'MaxIter'
+                solveoptoptions.MaxIter = options_list{i,2};
+              case 'verbosity'
+                solveoptoptions.verbosity = options_list{i,2};
+              case 'SpaceDilation'
+                solveoptoptions.SpaceDilation = options_list{i,2};
+              case 'LBGradientStep'
+                solveoptoptions.LBGradientStep = options_list{i,2};
+              otherwise
+                warning(['solveopt: Unknown option (' options_list{i,1} ')!'])
+            end
+        end
+    end
+    [opt_par_values,fval]=solvopt(start_par_value,objective_function,[],[],[],solveoptoptions,varargin{:});
   case 102
-    %simulating annealing
-    LB = start_par_value - 1;
-    UB = start_par_value + 1;
-    neps=10;
-    %  Set input parameters.
-    maxy=0;
-    epsilon=1.0e-9;
-    rt_=.10;
-    t=15.0;
-    ns=10;
-    nt=10;
-    maxevl=100000000;
-    idisp =1;
-    npar=length(start_par_value);
-
-    disp(['size of param',num2str(length(start_par_value))])
-    c=.1*ones(npar,1);
-    %*  Set input values of the input/output parameters.*
-
-    vm=1*ones(npar,1);
-    disp(['number of parameters= ' num2str(npar) 'max= '  num2str(maxy) 't=  ' num2str(t)]);
-    disp(['rt_=  '  num2str(rt_) 'eps=  '  num2str(epsilon) 'ns=  '  num2str(ns)]);
-    disp(['nt=  '  num2str(nt) 'neps= '   num2str(neps) 'maxevl=  '  num2str(maxevl)]);
-    disp '  ';
-    disp '  ';
-    disp(['starting values(x)  ' num2str(start_par_value')]);
-    disp(['initial step length(vm)  '  num2str(vm')]);
-    disp(['lower bound(lb)', 'initial conditions', 'upper bound(ub)' ]);
-    disp([LB start_par_value UB]);
-    disp(['c vector   ' num2str(c')]);
-
-    [opt_par_values, fval, nacc, nfcnev, nobds, ier, t, vm] = sa(objective_function,xparstart_par_valueam1,maxy,rt_,epsilon,ns,nt ...
-                                                          ,neps,maxevl,LB,UB,c,idisp ,t,vm,varargin{:});
+    if isoctave
+        error('Optimization algorithm 2 is not available under Octave')
+    elseif ~user_has_matlab_license('GADS_Toolbox')
+        error('Optimization algorithm 2 requires the Global Optimization Toolbox')
+    end
+    % Set default optimization options for fmincon.
+    optim_options = saoptimset('display','iter','TolFun',1e-8);
+    if ~isempty(options_.optim_opt)
+        eval(['optim_options = saoptimset(optim_options,' options_.optim_opt ');']);
+    end
+    func = @(x)objective_function(x,varargin{:});
+    [opt_par_values,fval,exitflag,output] = simulannealbnd(func,start_par_value,bounds(:,1),bounds(:,2),optim_options);
   otherwise
     if ischar(minimizer_algorithm)
         if exist(options_.mode_compute)
diff --git a/matlab/optimization/gmhmaxlik.m b/matlab/optimization/gmhmaxlik.m
index 4501f845c29ef1f6492937191faa60e1f38333bf..6ac4f79c96876940982a7550b95408a14895ad1a 100644
--- a/matlab/optimization/gmhmaxlik.m
+++ b/matlab/optimization/gmhmaxlik.m
@@ -20,7 +20,7 @@ function [PostMode, HessianMatrix, Scale, ModeValue] = gmhmaxlik(fun, xinit, Hin
 % Set default options
 
 if ~isempty(Hinit);
-    gmhmaxlikOptionsptions.varinit = 'previous';
+    gmhmaxlikOptions.varinit = 'previous';
 else
     gmhmaxlikOptions.varinit = 'prior';
 end
diff --git a/matlab/optimization/simulated_annealing.m b/matlab/optimization/simulated_annealing.m
new file mode 100644
index 0000000000000000000000000000000000000000..5b505b71d6246d288da9c2e72b60e2c46dafaaf1
--- /dev/null
+++ b/matlab/optimization/simulated_annealing.m
@@ -0,0 +1,459 @@
+function [xopt, fopt,exitflag, n_accepted_draws, n_total_draws, n_out_of_bounds_draws, t, vm] = ...
+    simulated_annealing(fcn,x,optim,lb,ub,varargin)
+% function [xopt, fopt,exitflag, n_accepted_draws, n_total_draws, n_out_of_bounds_draws, t, vm] = ...
+%     simulated_annealing(fcn,x,optim,lb,ub,varargin)
+%
+% Implements the continuous simulated annealing global optimization
+% algorithm described in Corana et al. (1987)
+% 
+%  A very quick (perhaps too quick) overview of SA:
+%     SA tries to find the global optimum of an N dimensional function.
+%  It moves both up and downhill and as the optimization process
+%  proceeds, it focuses on the most promising area.
+%     To start, it randomly chooses a trial point within the step length
+%  VM (a vector of length N) of the user selected starting point. The
+%  function is evaluated at this trial point and its value is compared
+%  to its value at the initial point.
+%     In a maximization problem, all uphill moves are accepted and the
+%  algorithm continues from that trial point. Downhill moves may be
+%  accepted  the decision is made by the Metropolis criteria. It uses T
+%  (temperature) and the size of the downhill move in a probabilistic
+%  manner. The smaller T and the size of the downhill move are, the more
+%  likely that move will be accepted. If the trial is accepted, the
+%  algorithm moves on from that point. If it is rejected, another point
+%  is chosen instead for a trial evaluation.
+%     Each element of VM periodically adjusted so that half of all
+%  function evaluations in that direction are accepted.
+%     A fall in T is imposed upon the system with the RT option by
+%  T(i+1) = RT*T(i) where i is the ith iteration. Thus, as T declines,
+%  downhill moves are less likely to be accepted and the percentage of
+%  rejections rise. Given the scheme for the selection for VM, VM falls.
+%  Thus, as T declines, VM falls and SA focuses upon the most promising
+%  area for optimization.
+%
+%  The importance of the parameter T (initial_temperature):
+%     The parameter T is crucial in using SA successfully. It influences
+%  VM, the step length over which the algorithm searches for optima. For
+%  a small intial T, the step length may be too small  thus not enough
+%  of the function might be evaluated to find the global optima. The user
+%  should carefully examine VM in the intermediate output (set verbosity =
+%  1) to make sure that VM is appropriate. The relationship between the
+%  initial temperature and the resulting step length is function
+%  dependent.
+%     To determine the starting temperature that is consistent with
+%  optimizing a function, it is worthwhile to run a trial run first. Set
+%  RT = 1.5 and T = 1.0. With RT > 1.0, the temperature increases and VM
+%  rises as well. Then select the T that produces a large enough VM.
+%
+%  Input Parameters:
+%    Note: The suggested values generally come from Corana et al. To
+%          drastically reduce runtime, see Goffe et al., pp. 90-1 for
+%          suggestions on choosing the appropriate RT and NT.
+% 
+%    fcn - function to be optimized.
+%    x - The starting values for the variables of the function to be
+%        optimized. (N)
+%    optim:     Options structure with fields
+% 
+%       optim.maximizer_indicator - Denotes whether the function should be maximized or
+%           minimized. A value =1 denotes maximization while a
+%           value =0 denotes minimization. Intermediate output (see verbosity)
+%           takes this into account.
+%       optim.RT - The temperature reduction factor 
+%       optim.TolFun - Error tolerance for termination. If the final function
+%           values from the last neps temperatures differ from the
+%           corresponding value at the current temperature by less than
+%           optim.TolFun and the final function value at the current temperature
+%           differs from the current optimal function value by less than
+%           optim.TolFun, execution terminates and exitflag = 0 is returned.
+%       optim.ns - Number of cycles. After NS*N function evaluations, each
+%           element of VM is adjusted so that approximately half of
+%           all function evaluations are accepted. The suggested value
+%           is 20.
+%       optim.nt - Number of iterations before temperature reduction. After
+%           NT*NS*N function evaluations, temperature (T) is changed
+%           by the factor optim.RT. Value suggested by Corana et al. is
+%           max(100, 5*n). See Goffe et al. for further advice.
+%       optim.neps - Number of final function values used to decide upon termi-
+%           nation. See optim.TolFun. Suggested value is 4.
+%       optim.MaxIter - Maximum number of function evaluations. If it is
+%           exceeded, exitflag = 1.
+%       optim.step_length_c - Vector that controls the step length adjustment. The suggested
+%           value for all elements is 2.0.
+%       optim.verbosity - controls printing inside SA.
+%           Values: 0 - Nothing printed.
+%                     1 - Function value for the starting value and summary results before each temperature
+%                         reduction. This includes the optimal function value found so far, the total
+%                         number of moves (broken up into uphill, downhill, accepted and rejected), the
+%                         number of out of bounds trials, the number of new optima found at this
+%                         temperature, the current optimal X and the step length VM. Note that there are
+%                         N*NS*NT function evalutations before each temperature reduction. Finally, notice is
+%                         is also given upon achieveing the termination criteria.
+%                     2 - Each new step length (VM), the current optimal X (XOPT) and the current trial X (X). This
+%                         gives the user some idea about how far X strays from XOPT as well as how VM is adapting
+%                         to the function.
+%                     3 - Each function evaluation, its acceptance or rejection and new optima. For many problems,
+%                         this option will likely require a small tree if hard copy is used. This option is best
+%                         used to learn about the algorithm. A small value for optim.MaxIter is thus recommended when
+%                         using optim.verbosity = 3.
+%    optim.initial_temperature  initial temperature. See Goffe et al. for advice.
+%    optim.initial_step_length  (VM) step length vector. On input it should encompass the
+%                               region of interest given the starting value X. For point
+%                               X(I), the next trial point is selected is from X(I) - VM(I)
+%                               to  X(I) + VM(I). Since VM is adjusted so that about half
+%                               of all points are accepted, the input value is not very
+%                               important (i.e. is the value is off, SA adjusts VM to the
+%                               correct value)
+% 
+%    lb - The lower bound for the allowable solution variables. 
+%    ub - The upper bound for the allowable solution variables. 
+%         If the algorithm chooses X(I) < LB(I) or X(I) > UB(I),
+%         I = 1, N, a point is from inside is randomly selected. 
+%         This focuses the algorithm on the region inside UB and LB.
+%         Unless the user wishes to concentrate the search to a par-
+%         ticular region, UB and LB should be set to very large positive
+%         and negative values, respectively. Note that the starting
+%         vector X should be inside this region. Also note that LB and
+%         UB are fixed in position, while VM is centered on the last
+%         accepted trial set of variables that optimizes the function.
+% 
+% 
+% Input/Output Parameters:
+%
+%  Output Parameters:
+%    xopt - The variables that optimize the function. (N)
+%    fopt - The optimal value of the function.
+%    exitflag - The error return number.
+%          Values: 0 - Normal return  termination criteria achieved.
+%                  1 - Number of function evaluations (NFCNEV) is
+%                      greater than the maximum number (optim.MaxIter).
+%                  2 - The starting value (X) is not inside the
+%                      bounds (LB and UB).
+%                  3 - The initial temperature is not positive.
+%                  99 - Should not be seen  only used internally.
+%    n_accepted_draws - The number of accepted function evaluations.
+%    n_total_draws - The total number of function evaluations. In a minor
+%             point, note that the first evaluation is not used in the
+%             core of the algorithm  it simply initializes the
+%             algorithm.
+%    n_out_of_bounds_draws - The total number of trial function evaluations that
+%            would have been out of bounds of LB and UB. Note that
+%            a trial point is randomly selected between LB and UB.
+%    t:     On output, the final temperature.
+%    vm:    Final step length vector
+%
+% Algorithm: 
+%  This routine implements the continuous simulated annealing global
+%  optimization algorithm described in Corana et al.'s article
+%  "Minimizing Multimodal Functions of Continuous Variables with the
+%  "Simulated Annealing" Algorithm" in the September 1987 (vol. 13,
+%  no. 3, pp. 262-280) issue of the ACM Transactions on Mathematical
+%  Software.
+% 
+% For modifications to the algorithm and many details on its use,
+%  (particularly for econometric applications) see Goffe, Ferrier
+%  and Rogers, "Global Optimization of Statistical Functions with
+%  Simulated Annealing," Journal of Econometrics, vol. 60, no. 1/2,
+%  Jan./Feb. 1994, pp. 65-100.
+% 
+%  Based on the Matlab code written by Thomas Werner (Bundesbank December
+%  2002), which in turn is based on the GAUSS version of Bill Goffe's simulated annealing 
+%  program for global optimization, written by E.G.Tsionas (9/4/95).
+% 
+% Copyright (C) 1995 E.G.Tsionas 
+% Copyright (C) 1995-2002 Thomas Werner 
+% Copyright (C) 2002-2015 Giovanni Lombardo
+% Copyright (C) 2015 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+c=optim.step_length_c;
+t=optim.initial_temperature;
+vm=optim.initial_step_length;
+n=size(x,1);
+xp=zeros(n,1);
+%*  Set initial values.*
+n_accepted_draws=0;
+n_out_of_bounds_draws=0;
+n_total_draws=0;
+exitflag=99;
+xopt=x;
+nacp=zeros(n,1);
+fstar=1e20*ones(optim.neps,1);
+%* If the initial temperature is not positive, notify the user and abort. *
+if(t<=0.0);
+    fprintf('\nThe initial temperature is not positive. Reset the variable t\n');
+    exitflag=3;
+    return;
+end;
+%*  If the initial value is out of bounds, notify the user and abort. *
+if(sum(x>ub)+sum(x<lb)>0);
+    fprintf('\nInitial condition out of bounds\n');
+    exitflag=2;
+    return;
+end;
+%*  Evaluate the function with input x and return value as f. *
+f=feval(fcn,x,varargin{:});
+%*
+%  If the function is to be minimized, switch the sign of the function.
+%  Note that all intermediate and final output switches the sign back
+%  to eliminate any possible confusion for the user.
+%*
+if(optim.maximizer_indicator==0);
+    f=-f;
+end;
+n_total_draws=n_total_draws+1;
+fopt=f;
+fstar(1)=f;
+if(optim.verbosity >1);
+    disp '  ';
+    disp(['initial x    ' num2str(x(:)')]);
+    if(optim.maximizer_indicator);
+        disp(['initial f    ' num2str(f)]);
+    else
+        disp(['initial f    ' num2str(-f)]);
+    end;
+end;
+%  Start the main loop. Note that it terminates if (i) the algorithm
+%  succesfully optimizes the function or (ii) there are too many
+%  function evaluations (more than optim.MaxIter).
+
+while (1>0);
+    nup=0;
+    nrej=0;
+    nnew=0;
+    ndown=0;
+    lnobds=0;
+    m=1;
+    while m<=optim.nt;
+        j=1;
+        while j<=optim.ns;
+            h=1;
+            while h<=n;
+                %*  Generate xp, the trial value of x. Note use of vm to choose xp. *
+                i=1;
+                while i<=n;
+                    if(i==h);
+                        xp(i)=x(i)+(rand(1,1)*2.0-1.0)*vm(i);
+                    else
+                        xp(i)=x(i);
+                    end;
+                    %*  If xp is out of bounds, select a point in bounds for the trial. *
+                    if((xp(i)<lb(i) || xp(i)>ub(i)));
+                        xp(i)=lb(i)+(ub(i)-lb(i))*rand(1,1);
+                        lnobds=lnobds+1;
+                        n_out_of_bounds_draws=n_out_of_bounds_draws+1;
+                        if(optim.verbosity >=3);
+                            if exist('fp','var')
+                                print_current_invalid_try(optim.maximizer_indicator,xp,x,fp,f);
+                            end
+                        end;
+                    end;
+                    i=i+1;
+                end;
+                %*  Evaluate the function with the trial point xp and return as fp. *
+                % fp=feval(fcn,xp,listarg);
+                fp=feval(fcn,xp,varargin{:});
+                if(optim.maximizer_indicator==0);
+                    fp=-fp;
+                end;
+                n_total_draws=n_total_draws+1;
+                if(optim.verbosity >=3);
+                    print_current_valid_try(optim.maximizer_indicator,xp,x,fp,f);
+                end;
+                %*  If too many function evaluations occur, terminate the algorithm. *
+                if(n_total_draws>=optim.MaxIter);
+                    fprintf('Too many function evaluations; consider\n');
+                    fprintf('increasing optim.MaxIter or optim.TolFun or decreasing\n');
+                    fprintf('optim.nt or optim.rt. These results are likely to be poor\n');
+                    if(optim.maximizer_indicator==0);
+                        fopt=-fopt;
+                    end;
+                    exitflag=1;
+                    return;
+                end;
+                %*  Accept the new point if the function value increases. *
+                if(fp>=f);
+                    if(optim.verbosity >=3);
+                        fprintf('point accepted\n');
+                    end;
+                    x=xp;
+                    f=fp;
+                    n_accepted_draws=n_accepted_draws+1;
+                    nacp(h)=nacp(h)+1;
+                    nup=nup+1;
+                    %*  If greater than any other point, record as new optimum. *
+                    if(fp>fopt);
+                        if(optim.verbosity >=3);
+                            fprintf('new optimum\n');
+                        end;
+                        xopt=xp;
+                        fopt=fp;
+                        nnew=nnew+1;
+                    end;
+                    %*
+                    % If the point is lower, use the Metropolis criteria to decide on
+                    % acceptance or rejection.
+                    %*
+                else
+                    p=exp((fp-f)/t);
+                    pp=rand(1,1);
+                    if(pp<p);
+                        if(optim.verbosity >=3);
+                            if(optim.maximizer_indicator);
+                             fprintf('though lower, point accepted\n');
+                            else
+                             fprintf('though higher, point accepted\n');
+                            end;
+                        end;
+                        x=xp;
+                        f=fp;
+                        n_accepted_draws=n_accepted_draws+1;
+                        nacp(h)=nacp(h)+1;
+                        ndown=ndown+1;
+                    else
+                        nrej=nrej+1;
+                        if(optim.verbosity >=3);
+                            if(optim.maximizer_indicator);
+                                fprintf('lower point rejected\n');
+                            else
+                                fprintf('higher point rejected\n');
+                            end;
+                        end;
+                    end;
+                end;
+                h=h+1;
+            end;
+            j=j+1;
+        end;
+        %*  Adjust vm so that approximately half of all evaluations are accepted. *
+        i=1;
+        while i<=n;
+            ratio=nacp(i)/optim.ns;
+            if(ratio>.6);
+                vm(i)=vm(i)*(1.+c(i)*(ratio-.6)/.4);
+            elseif(ratio<.4);
+                vm(i)=vm(i)/(1.+c(i)*((.4-ratio)/.4));
+            end;
+            if(vm(i)>(ub(i)-lb(i)));
+                vm(i)=ub(i)-lb(i);
+            end;
+            i=i+1;
+        end;
+        if(optim.verbosity >=2);
+            fprintf('intermediate results after step length adjustment\n');
+            fprintf('new step length(vm)  %4.3f', vm(:)');
+            fprintf('current optimal x    %4.3f', xopt(:)');
+            fprintf('current x            %4.3f', x(:)');
+        end;
+        nacp=zeros(n,1);
+        m=m+1;
+    end;
+    if(optim.verbosity >=1);
+        print_intermediate_statistics(optim.maximizer_indicator,t,xopt,vm,fopt,nup,ndown,nrej,lnobds,nnew);
+    end;
+    %*  Check termination criteria. *
+    quit=0;
+    fstar(1)=f;
+    if((fopt-fstar(1))<=optim.TolFun);
+        quit=1;
+    end;
+    if(sum(abs(f-fstar)>optim.TolFun)>0);
+        quit=0;
+    end;
+    %*  Terminate SA if appropriate. *
+    if(quit);
+        exitflag=0;
+        if(optim.maximizer_indicator==0);
+            fopt=-fopt;
+        end;
+        if(optim.verbosity >=1);
+            fprintf('SA achieved termination criteria.exitflag=0\n');
+        end;
+        return;        
+    end;
+    %*  If termination criteria are not met, prepare for another loop. *
+    t=optim.rt*t;
+    i=optim.neps;
+    while i>=2;
+        fstar(i)=fstar(i-1);
+        i=i-1;
+    end;
+    f=fopt;
+    x=xopt;
+    %*  Loop again. *
+end;
+
+end
+
+function  print_current_invalid_try(max,xp,x,fp,f)
+fprintf('\n');
+    disp(['Current x    ' num2str(x(:)')]);
+if(max);
+    disp(['Current f    ' num2str(f)]);
+else
+    disp(['Current f    ' num2str(-f)]);
+end;
+disp(['Trial x      ' num2str(xp(:)')]);
+disp 'Point rejected since out of bounds';
+end
+
+function print_current_valid_try(max,xp,x,fp,f)
+
+disp(['Current x    ' num2str(x(:)')]);
+if(max);
+    disp(['Current f   ' num2str(f)]);
+    disp(['Trial x     ' num2str(xp(:)')]);
+    disp(['Resulting f ' num2str(fp)]);
+else
+    disp(['Current f   ' num2str(-f)]);
+    disp(['Trial x     ' num2str(xp(:)')]);
+    disp(['Resulting f ' num2str(-fp)]);
+end;
+end
+
+
+function  print_intermediate_statistics(max,t,xopt,vm,fopt,nup,ndown,nrej,lnobds,nnew)
+
+totmov=nup+ndown+nrej;
+fprintf('\nIntermediate results before next temperature reduction\n');
+disp(['current temperature         ' num2str(t)]);
+
+if(max)
+    disp(['Max function value so far   ' num2str(fopt)]);
+    disp(['Total moves                 ' num2str(totmov)]);
+    disp(['Uphill                      ' num2str(nup)]);
+    disp(['Accepted downhill           ' num2str(ndown)]);
+    disp(['Rejected downhill           ' num2str(nrej)]);
+    disp(['Out of bounds trials        ' num2str(lnobds)]);
+    disp(['New maxima this temperature ' num2str(nnew)]);
+else
+    disp(['Min function value so far   ' num2str(-fopt)]);
+    disp(['Total moves                 ' num2str(totmov)]);
+    disp(['Downhill                    ' num2str(nup)]);
+    disp(['Accepted uphill             ' num2str(ndown)]);
+    disp(['Rejected uphill             ' num2str(nrej)]);
+    disp(['Trials out of bounds        ' num2str(lnobds)]);
+    disp(['New minima this temperature ' num2str(nnew)]);
+end
+xopt1=xopt(1:round(length(xopt)/2));
+xopt2=xopt(round(length(xopt)/2)+1:end);
+
+disp(['Current optimal x1           ' num2str(xopt1')]);
+disp(['Current optimal x2           ' num2str(xopt2')]);
+disp(['Strength(vm)                ' num2str(vm')]);
+fprintf('\n');
+end
\ No newline at end of file
diff --git a/matlab/optimization/solvopt.m b/matlab/optimization/solvopt.m
new file mode 100644
index 0000000000000000000000000000000000000000..9e5aa61392dc2d232def297a85f72638dbf4b433
--- /dev/null
+++ b/matlab/optimization/solvopt.m
@@ -0,0 +1,991 @@
+function [x,f,exitflag,n_f_evals,n_grad_evals,n_constraint_evals,n_constraint_gradient_evals]=solvopt(x,fun,grad,func,gradc,optim,varargin)
+% [x,f,options]=solvopt(x,fun,grad,func,gradc,options,varargin)
+% 
+% The function SOLVOPT, developed by Alexei Kuntsevich and Franz Kappe,  
+% performs a modified version of Shor's r-algorithm in
+% order to find a local minimum resp. maximum of a nonlinear function
+% defined on the n-dimensional Euclidean space or % a solution of a nonlinear 
+% constrained problem:
+% min { f(x): g(x) (<)= 0, g(x) in R(m), x in R(n) }
+%
+% Inputs:
+%   x       n-vector (row or column) of the coordinates of the starting
+%           point,
+% fun       name of an M-file (M-function) which computes the value
+%           of the objective function <fun> at a point x,
+%           synopsis: f=fun(x)
+% grad      name of an M-file (M-function) which computes the gradient
+%           vector of the function <fun> at a point x,
+%           synopsis: g=grad(x)
+% func      name of an M-file (M-function) which computes the MAXIMAL
+%           RESIDUAL(!) for a set of constraints at a point x,
+%           synopsis: fc=func(x)
+% gradc     name of an M-file (M-function) which computes the gradient
+%           vector for the maximal residual constraint at a point x,
+%           synopsis: gc=gradc(x)
+% optim     Options structure with fields:
+%    optim.minimizer_indicator= H, where sign(H)=-1 resp. sign(H)=+1 means minimize
+%        resp. maximize <fun> (valid only for unconstrained problem)
+%        and H itself is a factor for the initial trial step size
+%        (optim.minimizer_indicator=-1 by default),
+%    optim.TolX= relative error for the argument in terms of the
+%        infinity-norm (1.e-4 by default),
+%    optim.TolFun= relative error for the function value (1.e-6 by default),
+%    optim.MaxIter= limit for the number of iterations (15000 by default),
+%    optim.verbosity= control of the display of intermediate results and error
+%        resp. warning messages (default value is 0, i.e., no intermediate
+%        output but error and warning messages, see more in the manual),
+%    optim.TolXConstraint= admissible maximal residual for a set of constraints
+%        (optim.TolXConstraint=1e-8 by default, see more in the manual),
+%   *optim.SpaceDilation= the coefficient of space dilation (2.5 by default),
+%   *optim.LBGradientStep= lower bound for the stepsize used for the difference
+%        approximation of gradients (1e-12 by default, see more in the manual).
+%  (* ... changes should be done with care)
+% 
+% Outputs:
+% x                     optimal parameter vector (row resp. column),
+% f                     optimum function value
+% exitflag:             the number of iterations, if positive,
+%                       or an abnormal stop code, if negative (see more in the manual),
+% n_f_evals:            number of objective evaluations
+% n_grad_evals:         number of gradient evaluations,
+% n_constraint_evals:   number of constraint function evaluations,
+% n_constraint_gradient_evals   number of constraint gradient evaluations.
+% 
+% 
+% Algorithm: Kuntsevich, A.V., Kappel, F., The solver for local nonlinear optimization problems 
+% (version 1.1, Matlab, C, FORTRAN). University of Graz, Graz, 1997. 
+% 
+% 
+% Copyright (C) 1997-2008, Alexei Kuntsevich and Franz Kappel 
+% Copyright (C) 2008-2015 Giovanni Lombardo
+% Copyright (C) 2015 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+
+
+% strings: ----{
+errmes='SolvOpt error:';
+wrnmes='SolvOpt warning:';
+error1='No function name and/or starting point passed to the function.';
+error2='Argument X has to be a row or column vector of dimension > 1.';
+error30='<fun> returns an empty string.';
+error31='Function value does not exist (NaN is returned).';
+error32='Function equals infinity at the point.';
+error40='<grad> returns an improper matrix. Check the dimension.';
+error41='Gradient does not exist (NaN is returned by <grad>).';
+error42='Gradient equals infinity at the starting point.';
+error43='Gradient equals zero at the starting point.';
+error50='<func> returns an empty string.';
+error51='<func> returns NaN at the point.';
+error52='<func> returns infinite value at the point.';
+error60='<gradc> returns an improper vector. Check the dimension';
+error61='<gradc> returns NaN at the point.';
+error62='<gradc> returns infinite vector at the point.';
+error63='<gradc> returns zero vector at an infeasible point.';
+error5='Function is unbounded.';
+error6='Choose another starting point.';
+warn1= 'Gradient is zero at the point, but stopping criteria are not fulfilled.';
+warn20='Normal re-setting of a transformation matrix.' ;
+warn21='Re-setting due to the use of a new penalty coefficient.' ;
+warn4= 'Iterations limit exceeded.';
+warn31='The function is flat in certain directions.';
+warn32='Trying to recover by shifting insensitive variables.';
+warn09='Re-run from recorded point.';
+warn08='Ravine with a flat bottom is detected.';
+termwarn0='SolvOpt: Normal termination.';
+termwarn1='SolvOpt: Termination warning:';
+appwarn='The above warning may be reasoned by inaccurate gradient approximation';
+endwarn=[...
+    'Premature stop is possible. Try to re-run the routine from the obtained point.               ';...
+    'Result may not provide the optimum. The function apparently has many extremum points.        ';...
+    'Result may be inaccurate in the coordinates. The function is flat at the optimum.            ';...
+    'Result may be inaccurate in a function value. The function is extremely steep at the optimum.'];
+% ----}
+
+% ARGUMENTS PASSED ----{
+if nargin<2           % Function and/or starting point are not specified
+    exitflag=-1;
+    disp(errmes);
+    disp(error1);
+    return
+end
+if nargin<3
+    app=1;             % No user-supplied gradients
+elseif isempty(grad)
+    app=1;
+else
+    app=0;                     % Exact gradients are supplied
+end
+
+% OPTIONS ----{
+doptions.minimizer_indicator=1;
+doptions.TolX=1e-6; %accuracy of argument
+doptions.TolFun=1e-6; %accuracy of function (see Solvopt p.29)
+doptions.MaxIter=15000;
+doptions.verbosity=1;
+doptions.TolXConstraint=1.e-8;
+doptions.SpaceDilation=2.5;
+doptions.LBGradientStep=1.e-11;
+
+if nargin<4
+    optim=doptions;
+elseif isempty(optim)
+    optim=doptions;
+end
+% Check the values:
+optim.TolX=max(optim.TolX,1.e-12);
+optim.TolFun=max(optim.TolFun,1.e-12);
+optim.TolX=max(optim.LBGradientStep*1.e2,optim.TolX);
+optim.TolX=min(optim.TolX,1);
+optim.TolFun=min(optim.TolFun,1);
+optim.TolXConstraint=max(optim.TolXConstraint,1e-12);
+optim.SpaceDilation=max([optim.SpaceDilation,1.5]);
+optim.LBGradientStep=max(optim.LBGradientStep,1e-11);
+% ----}
+
+if isempty(func)
+    constr=0;
+else
+    constr=1;                  % Constrained problem
+    if isempty(gradc),
+        appconstr=1;
+    else
+        appconstr=0;            % Exact gradients of constraints are supplied
+    end
+end
+% ----}
+
+% STARTING POINT ----{
+if max(size(x))<=1
+    disp(errmes);
+    disp(error2);
+    exitflag=-2;
+    return
+elseif size(x,2)==1
+    n=size(x,1);
+    x=x';
+    trx=1;
+elseif size(x,1)==1
+    n=size(x,2);
+    trx=0;
+else
+    disp(errmes);
+    disp(error2);
+    exitflag=-2;
+    return
+end
+% ----}
+
+% WORKING CONSTANTS AND COUNTERS ----{
+
+n_f_evals=0; n_grad_evals=0;      % function and gradient calculations
+if constr
+    n_constraint_evals=0;
+    n_constraint_gradient_evals=0;      % same for constraints
+end
+epsnorm=1.e-15;
+epsnorm2=1.e-30;    % epsilon & epsilon^2
+
+if constr, h1=-1;                  % NLP: restricted to minimization
+    cnteps=optim.TolXConstraint;                % Max. admissible residual
+else
+    h1=sign(optim.minimizer_indicator);         % Minimize resp. maximize a function
+end
+
+k=0;                               % Iteration counter
+
+wdef=1/optim.SpaceDilation-1;               % Default space transf. coeff.
+
+%Gamma control ---{
+ajb=1+.1/n^2;                    % Base I
+ajp=20;
+ajpp=ajp;                        % Start value for the power
+ajs=1.15;                        % Base II
+knorms=0; gnorms=zeros(1,10);    % Gradient norms stored
+%---}
+
+%Display control ---{
+if optim.verbosity<=0, dispdata=0;
+    if optim.verbosity==-1
+        dispwarn=0;
+    else
+        dispwarn=1;
+    end
+else
+    dispdata=round(optim.verbosity);
+    dispwarn=1;
+end
+ld=dispdata;
+%---}
+
+%Stepsize control ---{
+dq=5.1;                          % Step divider (at f_{i+1}>gamma*f_{i})
+du20=2;du10=1.5;du03=1.05;       % Step multipliers (at certain steps made)
+kstore=3;nsteps=zeros(1,kstore); % Steps made at the last 'kstore' iterations
+if app
+    des=6.3;                 % Desired number of steps per 1-D search
+else
+    des=3.3;
+end
+mxtc=3;                          % Number of trial cycles (steep wall detect)
+%---}
+termx=0; limxterm=50;              % Counter and limit for x-criterion
+
+ddx   =max(1e-11,optim.LBGradientStep);      % stepsize for gradient approximation
+
+low_bound=-1+1e-4;                 % Lower bound cosine used to detect a ravine
+
+ZeroGrad=n*1.e-16;                 % Lower bound for a gradient norm
+
+nzero=0;                           % Zero-gradient events counter
+% Lower bound for values of variables taking into account
+lowxbound=max([optim.TolX,1e-3]);
+% Lower bound for function values to be considered as making difference
+lowfbound=optim.TolFun^2;
+krerun=0;                          % Re-run events counter
+detfr=optim.TolFun*100;              % relative error for f/f_{record}
+detxr=optim.TolX*10;               % relative error for norm(x)/norm(x_{record})
+
+warnno=0;                          % the number of warn.mess. to end with
+
+kflat=0;                           % counter for points of flatness
+stepvanish=0;                      % counter for vanished steps
+stopf=0;
+% ----}  End of setting constants
+% ----}  End of the preamble
+
+% COMPUTE THE FUNCTION  ( FIRST TIME ) ----{
+if trx
+    f=feval(fun,x',varargin{:});
+else
+    f=feval(fun,x,varargin{:});
+end
+n_f_evals=n_f_evals+1;
+if isempty(f),      if dispwarn,disp(errmes);disp(error30);end
+    exitflag=-3; if trx, x=x';end, return
+elseif isnan(f),    if dispwarn,disp(errmes);disp(error31);disp(error6);end
+    exitflag=-3; if trx, x=x';end, return
+elseif abs(f)==Inf, if dispwarn,disp(errmes);disp(error32);disp(error6);end
+    exitflag=-3; if trx, x=x';end, return
+end
+xrec=x; frec=f;     % record point and function value
+% Constrained problem
+if constr,  fp=f; kless=0;
+    if trx,
+        fc=feval(func,x');
+    else
+        fc=feval(func,x);
+    end
+    if isempty(fc),
+        if dispwarn,disp(errmes);disp(error50);end
+        exitflag=-5; if trx, x=x';end, return
+    elseif isnan(fc),
+        if dispwarn,disp(errmes);disp(error51);disp(error6);end
+        exitflag=-5; if trx, x=x';end, return
+    elseif abs(fc)==Inf,
+        if dispwarn,disp(errmes);disp(error52);disp(error6);end
+        exitflag=-5; if trx, x=x';end, return
+    end
+    n_constraint_evals=n_constraint_evals+1;
+    PenCoef=1;                              % first rough approximation
+    if fc<=cnteps
+        FP=1; fc=0;             % feasible point
+    else
+        FP=0;                   % infeasible point
+    end
+    f=f+PenCoef*fc;
+end
+% ----}
+% COMPUTE THE GRADIENT ( FIRST TIME ) ----{
+if app
+    deltax=h1*ddx*ones(size(x));
+    if constr
+        if trx
+            g=apprgrdn(x',fp,fun,deltax',1,varargin{:});
+        else
+            g=apprgrdn(x ,fp,fun,deltax,1,varargin{:}); 
+        end
+    else
+        if trx
+            g=apprgrdn(x',f,fun,deltax',1,varargin{:});
+        else
+            g=apprgrdn(x ,f,fun,deltax,1,varargin{:}); 
+        end
+    end
+    n_f_evals=n_f_evals+n;
+else
+    if trx
+        g=feval(grad,x',varargin{:});
+    else
+        g=feval(grad,x,varargin{:});   
+    end
+    n_grad_evals=n_grad_evals+1;
+end
+if size(g,2)==1, g=g'; end, ng=norm(g);
+if size(g,2)~=n,    if dispwarn,disp(errmes);disp(error40);end
+    exitflag=-4; if trx, x=x';end, return
+elseif isnan(ng),   if dispwarn,disp(errmes);disp(error41);disp(error6);end
+    exitflag=-4; if trx, x=x';end, return
+elseif ng==Inf,     if dispwarn,disp(errmes);disp(error42);disp(error6);end
+    exitflag=-4; if trx, x=x';end, return
+elseif ng<ZeroGrad, if dispwarn,disp(errmes);disp(error43);disp(error6);end
+    exitflag=-4; if trx, x=x';end, return
+end
+if constr
+    if ~FP
+        if appconstr,
+            deltax=sign(x); idx=find(deltax==0);
+            deltax(idx)=ones(size(idx));  
+            deltax=ddx*deltax;
+            if trx
+                gc=apprgrdn(x',fc,func,deltax',0);
+            else
+                gc=apprgrdn(x ,fc,func,deltax ,0); 
+            end
+            n_constraint_evals=n_constraint_evals+n;
+        else
+            if trx
+                gc=feval(gradc,x');
+            else
+                gc=feval(gradc,x); 
+            end
+            n_constraint_gradient_evals=n_constraint_gradient_evals+1;
+        end
+        if size(gc,2)==1
+            gc=gc'; 
+        end
+        ngc=norm(gc);
+        if size(gc,2)~=n,
+            if dispwarn
+                disp(errmes);
+                disp(error60);
+            end
+            exitflag=-6; 
+            if trx
+                x=x';
+            end
+            return
+        elseif isnan(ngc),
+            if dispwarn
+                disp(errmes);
+                disp(error61);
+                disp(error6);
+            end
+            exitflag=-6; 
+            if trx
+                x=x';
+            end
+            return
+        elseif ngc==Inf,
+            if dispwarn
+                disp(errmes);
+                disp(error62);
+                disp(error6);
+            end
+            exitflag=-6; 
+            if trx
+                x=x';
+            end
+            return
+        elseif ngc<ZeroGrad,
+            if dispwarn
+                disp(errmes);
+                disp(error63);
+            end
+            exitflag=-6; 
+            if trx
+                x=x';
+            end
+            return
+        end
+        g=g+PenCoef*gc; ng=norm(g);
+    end, end
+grec=g; nng=ng;
+% ----}
+% INITIAL STEPSIZE
+h=h1*sqrt(optim.TolX)*max(abs(x));     % smallest possible stepsize
+if abs(optim.minimizer_indicator)~=1,
+    h=h1*max(abs([optim.minimizer_indicator,h]));     % user-supplied stepsize
+else
+    h=h1*max(1/log(ng+1.1),abs(h));    % calculated stepsize
+end
+
+% RESETTING LOOP ----{
+while 1,
+    kcheck=0;                        % Set checkpoint counter.
+    kg=0;                            % stepsizes stored
+    kj=0;                            % ravine jump counter
+    B=eye(n);                        % re-set transf. matrix to identity
+    fst=f; g1=g;  dx=0;
+    % ----}
+    
+    % MAIN ITERATIONS ----{
+    
+    while 1,
+        k=k+1;kcheck=kcheck+1;
+        laststep=dx;
+        
+        % ADJUST GAMMA --{
+        gamma=1+max([ajb^((ajp-kcheck)*n),2*optim.TolFun]);
+        gamma=min([gamma,ajs^max([1,log10(nng+1)])]);
+        % --}
+        gt=g*B;   w=wdef;
+        % JUMPING OVER A RAVINE ----{
+        if (gt/norm(gt))*(g1'/norm(g1))<low_bound
+            if kj==2, xx=x;  end,  if kj==0, kd=4;  end,
+            kj=kj+1;  w=-.9; h=h*2;             % use large coef. of space dilation
+            if kj>2*kd,     kd=kd+1;  warnno=1;
+                if any(abs(x-xx)<epsnorm*abs(x)), % flat bottom is detected
+                    if dispwarn,disp(wrnmes);disp(warn08); end
+                end
+            end
+        else
+            kj=0;
+        end
+        % ----}
+        % DILATION ----{
+        z=gt-g1;
+        nrmz=norm(z);
+        if(nrmz>epsnorm*norm(gt))
+            z=z/nrmz;
+            g1=gt+w*(z*gt')*z;  B=B+w*(B*z')*z;
+        else
+            z=zeros(1,n); 
+            nrmz=0; 
+            g1=gt;
+        end
+        d1=norm(g1);  g0=(g1/d1)*B';
+        % ----}
+        % RESETTING ----{
+        if kcheck>1
+            idx=find(abs(g)>ZeroGrad); numelem=size(idx,2);
+            if numelem>0, grbnd=epsnorm*numelem^2;
+                if all(abs(g1(idx))<=abs(g(idx))*grbnd) | nrmz==0
+                    if dispwarn,  disp(wrnmes);  disp(warn20); end
+                    if abs(fst-f)<abs(f)*.01
+                        ajp=ajp-10*n;
+                    else
+                        ajp=ajpp; 
+                    end
+                    h=h1*dx/3; 
+                    k=k-1;
+                    break
+                end
+            end
+        end
+        % ----}
+        % STORE THE CURRENT VALUES AND SET THE COUNTERS FOR 1-D SEARCH
+        xopt=x;fopt=f;   k1=0;k2=0;ksm=0;kc=0;knan=0;  hp=h;
+        if constr, Reset=0; end
+        % 1-D SEARCH ----{
+        while 1,
+            x1=x;f1=f;
+            if constr, FP1=FP; fp1=fp; end
+            x=x+hp*g0;
+            % FUNCTION VALUE
+            if trx
+                f=feval(fun,x',varargin{:});
+            else
+                f=feval(fun,x,varargin{:});  
+            end
+            n_f_evals=n_f_evals+1;
+            if h1*f==Inf
+                if dispwarn
+                    disp(errmes); 
+                    disp(error5); 
+                end
+                exitflag=-7; 
+                if trx
+                    x=x';
+                end
+                return
+            end
+            if constr, fp=f;
+                if trx
+                    fc=feval(func,x');
+                else
+                    fc=feval(func,x);
+                end
+                n_constraint_evals=n_constraint_evals+1;
+                if  isnan(fc),
+                    if dispwarn,disp(errmes);disp(error51);disp(error6);end
+                    exitflag=-5; if trx, x=x';end, return
+                elseif abs(fc)==Inf,
+                    if dispwarn,disp(errmes);disp(error52);disp(error6);end
+                    exitflag=-5; if trx, x=x';end, return
+                end
+                if fc<=cnteps
+                    FP=1; 
+                    fc=0;
+                else
+                    FP=0;
+                    fp_rate=(fp-fp1);
+                    if fp_rate<-epsnorm
+                        if ~FP1
+                            PenCoefNew=-15*fp_rate/norm(x-x1);
+                            if PenCoefNew>1.2*PenCoef,
+                                PenCoef=PenCoefNew; Reset=1; kless=0; f=f+PenCoef*fc; break
+                            end
+                        end
+                    end
+                end
+                f=f+PenCoef*fc;
+            end
+            if abs(f)==Inf || isnan(f)
+                if dispwarn, disp(wrnmes);
+                    if isnan(f)
+                        disp(error31); 
+                    else
+                        disp(error32); 
+                    end
+                end
+                if ksm || kc>=mxtc
+                    exitflag=-3; 
+                    if trx
+                        x=x';
+                    end
+                    return
+                else
+                    k2=k2+1;
+                    k1=0; 
+                    hp=hp/dq; 
+                    x=x1;
+                    f=f1; 
+                    knan=1;
+                    if constr
+                        FP=FP1; 
+                        fp=fp1; 
+                    end
+                end
+                % STEP SIZE IS ZERO TO THE EXTENT OF EPSNORM
+            elseif all(abs(x-x1)<abs(x)*epsnorm),
+                stepvanish=stepvanish+1;
+                if stepvanish>=5,
+                    exitflag=-14;
+                    if dispwarn, disp(termwarn1);
+                        disp(endwarn(4,:)); end
+                    if trx,x=x';end,  return
+                else
+                    x=x1; 
+                    f=f1; 
+                    hp=hp*10; 
+                    ksm=1;
+                    if constr
+                        FP=FP1; 
+                        fp=fp1; 
+                    end
+                end
+                % USE SMALLER STEP
+            elseif h1*f<h1*gamma^sign(f1)*f1
+                if ksm,break,end,  k2=k2+1;k1=0; hp=hp/dq; x=x1;f=f1;
+                if constr, FP=FP1; fp=fp1; end
+                if kc>=mxtc, break, end
+                % 1-D OPTIMIZER IS LEFT BEHIND
+            else   if h1*f<=h1*f1, break,  end
+                % USE LARGER STEP
+                k1=k1+1; if k2>0, kc=kc+1; end, k2=0;
+                if k1>=20,      hp=du20*hp;
+                elseif k1>=10,  hp=du10*hp;
+                elseif k1>=3,   hp=du03*hp;
+                end
+            end
+        end
+        % ----}  End of 1-D search
+        % ADJUST THE TRIAL STEP SIZE ----{
+        dx=norm(xopt-x);
+        if kg<kstore,  kg=kg+1;  end
+        if kg>=2,  nsteps(2:kg)=nsteps(1:kg-1); end
+        nsteps(1)=dx/(abs(h)*norm(g0));
+        kk=sum(nsteps(1:kg).*[kg:-1:1])/sum([kg:-1:1]);
+        if     kk>des
+            if kg==1
+                h=h*(kk-des+1);
+            else
+                h=h*sqrt(kk-des+1); 
+            end
+        elseif kk<des
+            h=h*sqrt(kk/des);
+        end
+        
+        stepvanish=stepvanish+ksm;
+        % ----}
+        % COMPUTE THE GRADIENT ----{
+        if app,
+            deltax=sign(g0); idx=find(deltax==0);
+            deltax(idx)=ones(size(idx));  deltax=h1*ddx*deltax;
+            if constr
+                if trx
+                    g=apprgrdn(x',fp,fun,deltax',1,varargin{:});
+                else
+                    g=apprgrdn(x ,fp,fun,deltax,1,varargin{:});    
+                end
+            else
+                if trx
+                    g=apprgrdn(x',f,fun,deltax',1,varargin{:});
+                else
+                    g=apprgrdn(x ,f,fun,deltax ,1,varargin{:});    
+                end
+            end
+            n_f_evals=n_f_evals+n;
+        else
+            if trx
+                g=feval(grad,x',varargin{:});
+            else
+                g=feval(grad,x,varargin{:}); 
+            end
+            n_grad_evals=n_grad_evals+1;
+        end
+        if size(g,2)==1, g=g'; end,    ng=norm(g);
+        if isnan(ng),
+            if dispwarn, disp(errmes); disp(error41); end
+            exitflag=-4; if trx, x=x'; end, return
+        elseif ng==Inf,
+            if dispwarn,disp(errmes);disp(error42);end
+            exitflag=-4; if trx, x=x';end, return
+        elseif ng<ZeroGrad,
+            if dispwarn,disp(wrnmes);disp(warn1);end
+            ng=ZeroGrad;
+        end
+        % Constraints:
+        if constr
+            if ~FP
+                if ng<.01*PenCoef
+                    kless=kless+1;
+                    if kless>=20, PenCoef=PenCoef/10; Reset=1; kless=0; end
+                else
+                    kless=0;
+                end
+                if appconstr,
+                    deltax=sign(x); idx=find(deltax==0);
+                    deltax(idx)=ones(size(idx));  deltax=ddx*deltax;
+                    if trx
+                        gc=apprgrdn(x',fc,func,deltax',0);
+                    else
+                        gc=apprgrdn(x ,fc,func,deltax ,0); 
+                    end
+                    n_constraint_evals=n_constraint_evals+n;
+                else
+                    if trx
+                        gc=feval(gradc,x');
+                    else
+                        gc=feval(gradc,x ); 
+                    end
+                    n_constraint_gradient_evals=n_constraint_gradient_evals+1;
+                end
+                if size(gc,2)==1, gc=gc'; end, ngc=norm(gc);
+                if     isnan(ngc),
+                    if dispwarn,disp(errmes);disp(error61);end
+                    exitflag=-6; if trx, x=x';end, return
+                elseif ngc==Inf,
+                    if dispwarn,disp(errmes);disp(error62);end
+                    exitflag=-6; if trx, x=x';end, return
+                elseif ngc<ZeroGrad && ~appconstr,
+                    if dispwarn,disp(errmes);disp(error63);end
+                    exitflag=-6; if trx, x=x';end, return
+                end
+                g=g+PenCoef*gc; ng=norm(g);
+                if Reset, if dispwarn,  disp(wrnmes);  disp(warn21); end
+                    h=h1*dx/3; k=k-1; nng=ng; break
+                end
+            end, end
+        if h1*f>h1*frec, frec=f; xrec=x; grec=g; end
+        % ----}
+        if ng>ZeroGrad,
+            if knorms<10,  knorms=knorms+1;  end
+            if knorms>=2,  gnorms(2:knorms)=gnorms(1:knorms-1); end
+            gnorms(1)=ng;
+            nng=(prod(gnorms(1:knorms)))^(1/knorms);
+        end
+        
+        % DISPLAY THE CURRENT VALUES ----{
+        if k==ld
+            disp('Iter.# ..... Function ... Step Value ... Gradient Norm ');
+            disp(sprintf('%5i   %13.5e   %13.5e     %13.5e',k,f,dx,ng));
+            ld=k+dispdata;
+        end
+        %----}
+        % CHECK THE STOPPING CRITERIA ----{
+        termflag=1;
+        if constr, if ~FP, termflag=0; end, end
+        if kcheck<=5, termflag=0; end
+        if knan, termflag=0; end
+        if kc>=mxtc, termflag=0; end
+        % ARGUMENT
+        if termflag
+            idx=find(abs(x)>=lowxbound);
+            if isempty(idx) || all(abs(xopt(idx)-x(idx))<=optim.TolX*abs(x(idx)))
+                termx=termx+1;
+                % FUNCTION
+                if abs(f-frec)> detfr * abs(f)    && ...
+                        abs(f-fopt)<=optim.TolFun*abs(f) && ...
+                        krerun<=3                      && ...
+                        ~constr
+                    if any(abs(xrec(idx)-x(idx))> detxr * abs(x(idx)))
+                        if dispwarn
+                            disp(wrnmes);
+                            disp(warn09);
+                        end
+                        x=xrec; 
+                        f=frec; 
+                        g=grec; 
+                        ng=norm(g); 
+                        krerun=krerun+1;
+                        h=h1*max([dx,detxr*norm(x)])/krerun;
+                        warnno=2; 
+                        break
+                    else
+                        h=h*10;
+                    end
+                elseif  abs(f-frec)> optim.TolFun*abs(f)    && ...
+                        norm(x-xrec)<optim.TolX*norm(x) && constr
+                    
+                elseif  abs(f-fopt)<=optim.TolFun*abs(f)  || ...
+                        abs(f)<=lowfbound               || ...
+                        (abs(f-fopt)<=optim.TolFun && termx>=limxterm )
+                    if stopf
+                        if dx<=laststep
+                            if warnno==1 && ng<sqrt(optim.TolFun), warnno=0; end
+                            if ~app, if any(abs(g)<=epsnorm2), warnno=3; end, end
+                            if warnno~=0
+                                exitflag=-warnno-10;
+                                if dispwarn, disp(termwarn1);
+                                    disp(endwarn(warnno,:));
+                                    if app, disp(appwarn); end
+                                end
+                            else
+                                exitflag=k;
+                                if dispwarn
+                                    disp(termwarn0); 
+                                end
+                            end
+                            if trx
+                                x=x';
+                            end
+                            return
+                        end
+                    else
+                        stopf=1;
+                    end
+                elseif dx<1.e-12*max(norm(x),1) && termx>=limxterm
+                    exitflag=-14;
+                    if dispwarn, disp(termwarn1); disp(endwarn(4,:));
+                        if app, disp(appwarn); end
+                    end
+                    x=xrec; f=frec;
+                    if trx,x=x';end,  return
+                else
+                    stopf=0;
+                end
+            end
+        end
+        % ITERATIONS LIMIT
+        if(k==optim.MaxIter)
+            exitflag=-9; 
+            if trx
+                x=x'; 
+            end,
+            if dispwarn, disp(wrnmes);  disp(warn4); end
+            return
+        end
+        % ----}
+        % ZERO GRADIENT ----{
+        if constr
+            if ng<=ZeroGrad,
+                if dispwarn,  disp(termwarn1);  disp(warn1); end
+                exitflag=-8; 
+                if trx
+                    x=x';
+                end
+                return
+            end
+        else
+            if ng<=ZeroGrad,        nzero=nzero+1;
+                if dispwarn, disp(wrnmes);  disp(warn1);  end
+                if nzero>=3
+                    exitflag=-8; 
+                    if trx
+                        x=x';
+                    end
+                    return
+                end
+                g0=-h*g0/2;
+                for i=1:10,
+                    x=x+g0;
+                    if trx
+                        f=feval(fun,x',varargin{:});
+                    else
+                        f=feval(fun,x,varargin{:}); 
+                    end
+                    n_f_evals=n_f_evals+1;
+                    if abs(f)==Inf
+                        if dispwarn
+                            disp(errmes);  
+                            disp(error32);  
+                        end
+                        exitflag=-3;
+                        if trx
+                            x=x';
+                        end
+                        return
+                    elseif isnan(f),
+                        if dispwarn
+                            disp(errmes);  
+                            disp(error32);  
+                        end
+                        exitflag=-3;
+                        if trx
+                            x=x';
+                        end
+                        return
+                    end
+                    if app,
+                        deltax=sign(g0); 
+                        idx=find(deltax==0);
+                        deltax(idx)=ones(size(idx));  
+                        deltax=h1*ddx*deltax;
+                        if trx  
+                            g=apprgrdn(x',f,fun,deltax',1,varargin{:});
+                        else
+                            g=apprgrdn(x,f,fun,deltax,1,varargin{:});
+                        end
+                        n_f_evals=n_f_evals+n;
+                    else
+                        if trx  
+                            g=feval(grad,x',varargin{:});
+                        else
+                            g=feval(grad,x,varargin{:});   
+                        end
+                        n_grad_evals=n_grad_evals+1;
+                    end
+                    if size(g,2)==1
+                        g=g'; 
+                    end
+                    ng=norm(g);
+                    if ng==Inf
+                        if dispwarn
+                            disp(errmes);  
+                            disp(error42); 
+                        end
+                        exitflag=-4; 
+                        if trx
+                            x=x'; 
+                        end
+                        return
+                    elseif isnan(ng)
+                        if dispwarn
+                            disp(errmes);  
+                            disp(error41); 
+                        end
+                        exitflag=-4; 
+                        if trx
+                            x=x'; 
+                        end
+                        return
+                    end
+                    if ng>ZeroGrad
+                        break
+                    end
+                end
+                if ng<=ZeroGrad,
+                    if dispwarn
+                        disp(termwarn1);  
+                        disp(warn1); 
+                    end
+                    exitflag=-8; 
+                    if trx
+                        x=x';
+                    end
+                    return
+                end
+                h=h1*dx; 
+                break
+            end
+        end
+        % ----}
+        % FUNCTION IS FLAT AT THE POINT ----{
+        if ~constr && abs(f-fopt)<abs(fopt)*optim.TolFun && kcheck>5 && ng<1
+            idx=find(abs(g)<=epsnorm2); 
+            ni=size(idx,2);
+            if ni>=1 && ni<=n/2 && kflat<=3, kflat=kflat+1;
+                if dispwarn,  disp(wrnmes); disp(warn31); end, warnno=1;
+                x1=x; fm=f;
+                for j=idx, y=x(j); f2=fm;
+                    if y==0, x1(j)=1; elseif abs(y)<1, x1(j)=sign(y); else, x1(j)=y; end
+                    for i=1:20, x1(j)=x1(j)/1.15;
+                        if trx
+                            f1=feval(fun,x1',varargin{:});
+                        else
+                            f1=feval(fun,x1,varargin{:}); 
+                        end
+                        n_f_evals=n_f_evals+1;
+                        if abs(f1)~=Inf && ~isnan(f1),
+                            if h1*f1>h1*fm
+                                y=x1(j); 
+                                fm=f1;
+                            elseif h1*f2>h1*f1
+                                break
+                            elseif f2==f1
+                                x1(j)=x1(j)/1.5;
+                            end
+                            f2=f1;
+                        end
+                    end
+                    x1(j)=y;
+                end
+                if h1*fm>h1*f
+                    if app,
+                        deltax=h1*ddx*ones(size(deltax));
+                        if trx
+                            gt=apprgrdn(x1',fm,fun,deltax',1,varargin{:});
+                        else
+                            gt=apprgrdn(x1 ,fm,fun,deltax ,1,varargin{:});    
+                        end
+                        n_f_evals=n_f_evals+n;
+                    else
+                        if trx
+                            gt=feval(grad,x1',varargin{:});
+                        else
+                            gt=feval(grad,x1,varargin{:}); 
+                        end
+                        n_grad_evals=n_grad_evals+1;
+                    end
+                    if size(gt,2)==1
+                        gt=gt'; 
+                    end
+                    ngt=norm(gt);
+                    if ~isnan(ngt) && ngt>epsnorm2,
+                        if dispwarn
+                            disp(warn32); 
+                        end
+                        optim.TolFun=optim.TolFun/5;
+                        x=x1; 
+                        g=gt; 
+                        ng=ngt; 
+                        f=fm; 
+                        h=h1*dx/3; 
+                        break
+                    end
+                end
+            end
+        end
+        % ----}
+    end % iterations
+end % restart
+% end of the function
+%
+end
\ No newline at end of file
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 8120d7784b4e6a974b252a0c272247e3a4ae1199..af4d71ab3bad5e03c49d3c5817253b06c3f3e47a 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -207,6 +207,18 @@ MODFILES = \
 	smoother2histval/fs2000_simul.mod \
 	smoother2histval/fs2000_smooth.mod \
 	smoother2histval/fs2000_smooth_stoch_simul.mod \
+	optimizers/fs2000_2.mod \
+	optimizers/fs2000_3.mod \
+	optimizers/fs2000_4.mod \
+	optimizers/fs2000_5.mod \
+	optimizers/fs2000_6.mod \
+	optimizers/fs2000_7.mod \
+	optimizers/fs2000_8.mod \
+	optimizers/fs2000_9.mod \
+	optimizers/fs2000_10.mod \
+	optimizers/fs2000_101.mod \
+	optimizers/fs2000_102.mod \
+	optimizers/fs2000_w.mod \
 	reporting/example1.mod
 
 XFAIL_MODFILES = ramst_xfail.mod \
@@ -396,7 +408,9 @@ EXTRA_DIST = \
 	loglinear/results_exp.mat \
 	smoother2histval/fsdat_simul.m \
 	optimal_policy/Ramsey/find_c.m \
-	optimal_policy/Ramsey/oo_ramsey_policy_initval.mat 
+	optimal_policy/Ramsey/oo_ramsey_policy_initval.mat \
+	optimizers/optimizer_function_wrapper.m \
+	optimizers/fs2000.common.inc
 
 TARGETS =
 
diff --git a/tests/optimizers/fs2000.common.inc b/tests/optimizers/fs2000.common.inc
new file mode 100644
index 0000000000000000000000000000000000000000..b0762753f7631ef7beeebff7167a36b26a0547cd
--- /dev/null
+++ b/tests/optimizers/fs2000.common.inc
@@ -0,0 +1,133 @@
+/*
+ * This file replicates the estimation of the cash in advance model described
+ * Frank Schorfheide (2000): "Loss function-based evaluation of DSGE models",
+ * Journal of Applied Econometrics, 15(6), 645-670.
+ *
+ * The data are in file "fs2000/fsdat_simul.m", and have been artificially generated.
+ * They are therefore different from the original dataset used by Schorfheide.
+ *
+ * The equations are taken from J. Nason and T. Cogley (1994): "Testing the
+ * implications of long-run neutrality for monetary business cycle models",
+ * Journal of Applied Econometrics, 9, S37-S70.
+ * Note that there is an initial minus sign missing in equation (A1), p. S63.
+ *
+ * This implementation was written by Michel Juillard. Please note that the
+ * following copyright notice only applies to this Dynare implementation of the
+ * model.
+ */
+
+/*
+ * Copyright (C) 2004-2010 Dynare Team
+ *
+ * This file is part of Dynare.
+ *
+ * Dynare is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Dynare is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+var m P c e W R k d n l gy_obs gp_obs y dA;
+varexo e_a e_m;
+
+parameters alp bet gam mst rho psi del;
+
+alp = 0.33;
+bet = 0.99;
+gam = 0.003;
+mst = 1.011;
+rho = 0.7;
+psi = 0.787;
+del = 0.02;
+
+model;
+dA = exp(gam+e_a);
+log(m) = (1-rho)*log(mst) + rho*log(m(-1))+e_m;
+-P/(c(+1)*P(+1)*m)+bet*P(+1)*(alp*exp(-alp*(gam+log(e(+1))))*k^(alp-1)*n(+1)^(1-alp)+(1-del)*exp(-(gam+log(e(+1)))))/(c(+2)*P(+2)*m(+1))=0;
+W = l/n;
+-(psi/(1-psi))*(c*P/(1-n))+l/n = 0;
+R = P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(-alp)/W;
+1/(c*P)-bet*P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)/(m*l*c(+1)*P(+1)) = 0;
+c+k = exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)+(1-del)*exp(-(gam+e_a))*k(-1);
+P*c = m;
+m-1+d = l;
+e = exp(e_a);
+y = k(-1)^alp*n^(1-alp)*exp(-alp*(gam+e_a));
+gy_obs = dA*y/y(-1);
+gp_obs = (P/P(-1))*m(-1)/dA;
+end;
+
+initval;
+k = 6;
+m = mst;
+P = 2.25;
+c = 0.45;
+e = 1;
+W = 4;
+R = 1.02;
+d = 0.85;
+n = 0.19;
+l = 0.86;
+y = 0.6;
+gy_obs = exp(gam);
+gp_obs = exp(-gam);
+dA = exp(gam);
+end;
+
+shocks;
+var e_a; stderr 0.014;
+var e_m; stderr 0.005;
+end;
+
+steady_state_model;
+  dA = exp(gam);
+  gst = 1/dA;
+  m = mst;
+  khst = ( (1-gst*bet*(1-del)) / (alp*gst^alp*bet) )^(1/(alp-1));
+  xist = ( ((khst*gst)^alp - (1-gst*(1-del))*khst)/mst )^(-1);
+  nust = psi*mst^2/( (1-alp)*(1-psi)*bet*gst^alp*khst^alp );
+  n  = xist/(nust+xist);
+  P  = xist + nust;
+  k  = khst*n;
+
+  l  = psi*mst*n/( (1-psi)*(1-n) );
+  c  = mst/P;
+  d  = l - mst + 1;
+  y  = k^alp*n^(1-alp)*gst^alp;
+  R  = mst/bet;
+  W  = l/n;
+  ist  = y-c;
+  q  = 1 - d;
+
+  e = 1;
+  
+  gp_obs = m/dA;
+  gy_obs = dA;
+end;
+
+steady;
+
+check;
+
+estimated_params;
+alp, beta_pdf, 0.356, 0.02;
+bet, beta_pdf, 0.993, 0.002;
+gam, normal_pdf, 0.0085, 0.003;
+mst, normal_pdf, 1.0002, 0.007;
+rho, beta_pdf, 0.129, 0.223;
+psi, beta_pdf, 0.65, 0.05;
+del, beta_pdf, 0.01, 0.005;
+stderr e_a, inv_gamma_pdf, 0.035449, inf;
+stderr e_m, inv_gamma_pdf, 0.008862, inf;
+end;
+
+varobs gp_obs gy_obs;
+options_.plot_priors=0;
diff --git a/tests/optimizers/fs2000_1.mod b/tests/optimizers/fs2000_1.mod
new file mode 100644
index 0000000000000000000000000000000000000000..fe37048359d3731ca32d9bb4e68e33f18f6b89c5
--- /dev/null
+++ b/tests/optimizers/fs2000_1.mod
@@ -0,0 +1,5 @@
+@#include "fs2000.common.inc"
+
+if ~isoctave() && exist('fmincon','file')
+  estimation(mode_compute=1,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
+end
diff --git a/tests/optimizers/fs2000_10.mod b/tests/optimizers/fs2000_10.mod
new file mode 100644
index 0000000000000000000000000000000000000000..d46ef9d85f9631fa94fd03a51252849a75a112f4
--- /dev/null
+++ b/tests/optimizers/fs2000_10.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=10,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_101.mod b/tests/optimizers/fs2000_101.mod
new file mode 100644
index 0000000000000000000000000000000000000000..d301ecd186165a81dba729760d0e664e2ab7ca04
--- /dev/null
+++ b/tests/optimizers/fs2000_101.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=101,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_102.mod b/tests/optimizers/fs2000_102.mod
new file mode 100644
index 0000000000000000000000000000000000000000..e43dcb254b8ba82cc7348d72b8c87a9c1e85d0dc
--- /dev/null
+++ b/tests/optimizers/fs2000_102.mod
@@ -0,0 +1,5 @@
+@#include "fs2000.common.inc"
+
+if ~isoctave() && exist('simulannealbnd','file')
+   estimation(mode_compute=102,mode_file=fs2000_mode,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0, mh_nblocks=2, mh_jscale=0.8);
+end
diff --git a/tests/optimizers/fs2000_2.mod b/tests/optimizers/fs2000_2.mod
new file mode 100644
index 0000000000000000000000000000000000000000..bfd38c7c491cff9548284dbdf054aa13838c0a3f
--- /dev/null
+++ b/tests/optimizers/fs2000_2.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=2,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_3.mod b/tests/optimizers/fs2000_3.mod
new file mode 100644
index 0000000000000000000000000000000000000000..5efc98d2a4e87b089c3a0ab7b46a1256733d7111
--- /dev/null
+++ b/tests/optimizers/fs2000_3.mod
@@ -0,0 +1,5 @@
+@#include "fs2000.common.inc"
+
+if exist('fminunc','file')
+  estimation(mode_compute=3,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
+end
diff --git a/tests/optimizers/fs2000_4.mod b/tests/optimizers/fs2000_4.mod
new file mode 100644
index 0000000000000000000000000000000000000000..c500afa7fbc7e003c0574900f6fc5abe8628856d
--- /dev/null
+++ b/tests/optimizers/fs2000_4.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=4,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_5.mod b/tests/optimizers/fs2000_5.mod
new file mode 100644
index 0000000000000000000000000000000000000000..16fdd957e23612ccfc5ba6b0670ee438085e12de
--- /dev/null
+++ b/tests/optimizers/fs2000_5.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=5,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_6.mod b/tests/optimizers/fs2000_6.mod
new file mode 100644
index 0000000000000000000000000000000000000000..7bd0673a71079e33f2e3d2e461cf0d7084036255
--- /dev/null
+++ b/tests/optimizers/fs2000_6.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=6,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_7.mod b/tests/optimizers/fs2000_7.mod
new file mode 100644
index 0000000000000000000000000000000000000000..3a58b5fa2f210f7055dbe774d514810c2f55c9d4
--- /dev/null
+++ b/tests/optimizers/fs2000_7.mod
@@ -0,0 +1,5 @@
+@#include "fs2000.common.inc"
+
+if exist('fminsearch','file')
+  estimation(mode_compute=7,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
+end
diff --git a/tests/optimizers/fs2000_8.mod b/tests/optimizers/fs2000_8.mod
new file mode 100644
index 0000000000000000000000000000000000000000..f7415df12cb43d33f7b21165464d8883d1b2611f
--- /dev/null
+++ b/tests/optimizers/fs2000_8.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=8,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_9.mod b/tests/optimizers/fs2000_9.mod
new file mode 100644
index 0000000000000000000000000000000000000000..96d75bf96f54df7b099d617e24e74ea3af30fae4
--- /dev/null
+++ b/tests/optimizers/fs2000_9.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=9,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/fs2000_w.mod b/tests/optimizers/fs2000_w.mod
new file mode 100644
index 0000000000000000000000000000000000000000..73f6bdc62a2dcfb33e744cb97035071b6310cc7d
--- /dev/null
+++ b/tests/optimizers/fs2000_w.mod
@@ -0,0 +1,3 @@
+@#include "fs2000.common.inc"
+
+estimation(mode_compute=optimizer_function_wrapper,order=1, datafile='../fs2000/fsdat_simul', nobs=192, mh_replic=0);
diff --git a/tests/optimizers/optimizer_function_wrapper.m b/tests/optimizers/optimizer_function_wrapper.m
new file mode 100644
index 0000000000000000000000000000000000000000..a7992bab3e980c18478d6554229aa30d2f11198e
--- /dev/null
+++ b/tests/optimizers/optimizer_function_wrapper.m
@@ -0,0 +1,16 @@
+function [opt_par_values,fval,exitflag]=optimizer_function_wrapper(objective_function_handle,start_par_value,varargin)
+% function [opt_par_values,fval,exitflag]=optimizer_function_wrapper(objective_function_handle,start_par_value,varargin)
+% Demonstrates how to invoke external optimizer for mode_computation
+
+%set options of optimizer
+H0 = 1e-4*eye(length(start_par_value),length(start_par_value));
+nit=1000;
+crit = 1e-7;
+numgrad = 2; 
+epsilon = 1e-6; 
+analytic_grad=[];
+
+%call optimizer
+[fval,opt_par_values,grad,hessian_mat,itct,fcount,exitflag] = ...
+    csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
+end
\ No newline at end of file