diff --git a/matlab/bfgsi1.m b/matlab/bfgsi1.m
index 48d58562daf11bbad5d65cf6fa3a424df879f789..c3b87ca49f8d550649da7047c39be84e972c65a3 100644
--- a/matlab/bfgsi1.m
+++ b/matlab/bfgsi1.m
@@ -1,13 +1,20 @@
 function H = bfgsi1(H0,dg,dx)
 % H = bfgsi1(H0,dg,dx)
-% dg is previous change in gradient; dx is previous change in x;
+% Update Inverse Hessian matrix
+% 
+% Inputs:
+%   H0  [npar by npar]  initial inverse Hessian matrix
+%   dg  [npar by 1]     previous change in gradient
+%   dx  [npar by 1]     previous change in x;
+% 
 % 6/8/93 version that updates inverse hessian instead of hessian
 % itself.
-
+% 
 % Original file downloaded from:
 % http://sims.princeton.edu/yftp/optimize/mfiles/bfgsi.m
-
+% 
 % Copyright (C) 1993-2009 Christopher Sims
+% Copyright (C) 2009-2015 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -41,4 +48,4 @@ else
     disp(['|H*dg| = ' num2str(Hdg'*Hdg)])
     H=H0;
 end
-save H.dat H
+save('H.dat','H')
diff --git a/matlab/optimization/csminit1.m b/matlab/optimization/csminit1.m
index b6eb95da04aaf22c4c7752076be034ee6a5d0479..b6574b005ad8653d03cf5379ed709e1d58e11a02 100644
--- a/matlab/optimization/csminit1.m
+++ b/matlab/optimization/csminit1.m
@@ -1,10 +1,24 @@
 function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
-% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,...
-%                                       P1,P2,P3,P4,P5,P6,P7,P8)
-% retcodes: 0, normal step.  5, largest step still improves too fast.
-% 4,2 back and forth adjustment of stepsize didn't finish.  3, smallest
-% stepsize still improves too slow.  6, no improvement found.  1, zero
-% gradient.
+% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
+% 
+% Inputs: 
+%   fcn:    [string]        string naming the objective function to be minimized
+%   x0:     [npar by 1]     initial value of the parameter vector
+%   g0:     [npar by 1]     initial value of the gradient vector
+%   H0:     [npar by npar]  initial value for the inverse Hessian.  Must be positive definite.
+%   varargin:               Optional additional inputs that get handed off to fcn each
+%                           time it is called.
+
+% Outputs:
+%   fhat:   [scalar]        function value at minimum
+%   xhat:   [npar by 1]     parameter vector at minimum
+%   fcount  [scalar]        function iteration count upon termination
+%   retcode    [scalar]    0: normal step
+%                           1: zero gradient.
+%                           5: largest step still improves too fast.
+%                           2,4: back and forth adjustment of stepsize didn't finish.  
+%                           3: smallest stepsize still improves too slow
+%                           6: no improvement found
 %---------------------
 % Modified 7/22/96 to omit variable-length P list, for efficiency and compilation.
 % Places where the number of P's need to be altered or the code could be returned to
@@ -15,12 +29,12 @@ function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
 %
 % Fixed 7/19/93 to flip eigenvalues of H to get better performance when
 % it's not psd.
-
+% 
 % Original file downloaded from:
 % http://sims.princeton.edu/yftp/optimize/mfiles/csminit.m
-
+% 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2008-2011 Dynare Team
+% Copyright (C) 2008-2015 Dynare Team
 %
 % This file is part of Dynare.
 %
diff --git a/matlab/optimization/csminwel1.m b/matlab/optimization/csminwel1.m
index b6e38fedc0f6f0b9d61474e6a0a18b25cfd108d6..dcc579f50d4076b3a563217c56483b65ab90b552 100644
--- a/matlab/optimization/csminwel1.m
+++ b/matlab/optimization/csminwel1.m
@@ -1,29 +1,48 @@
 function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
-%[fhat,xhat,ghat,Hhat,itct,fcount,retcodehat] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
-% fcn:   string naming the objective function to be minimized
-% x0:    initial value of the parameter vector
-% H0:    initial value for the inverse Hessian.  Must be positive definite.
-% grad:  Either a string naming a function that calculates the gradient, or the null matrix.
-%        If it's null, the program calculates a numerical gradient.  In this case fcn must
-%        be written so that it can take a matrix argument and produce a row vector of values.
-% crit:  Convergence criterion.  Iteration will cease when it proves impossible to improve the
-%        function value by more than crit.
-% nit:   Maximum number of iterations.
-% method: integer scalar, 2, 3 or 5 points formula.
-% epsilon: scalar double, numerical differentiation increment
-% varargin: A list of optional length of additional parameters that get handed off to fcn each
-%        time it is called.
+%[fhat,xhat,ghat,Hhat,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
+% Inputs:
+%   fcn:    [string]        string naming the objective function to be minimized
+%   x0:     [npar by 1]     initial value of the parameter vector
+%   H0:     [npar by npar]  initial value for the inverse Hessian.  Must be positive definite.
+%   grad:   [string or empty matrix] Either a string naming a function that calculates the gradient, or the null matrix.
+%                           If it's null, the program calculates a numerical gradient.  In this case fcn must
+%                           be written so that it can take a matrix argument and produce a row vector of values.
+%   crit:   [scalar]        Convergence criterion.  Iteration will cease when it proves impossible to improve the
+%                           function value by more than crit.
+%   nit:    [scalar]        Maximum number of iterations.
+%   method: [scalar]        integer scalar for selecting gradient method: 2, 3 or 5 points formula.
+%   epsilon: [scalar]       scalar double, numerical differentiation increment
+%   varargin:               Optional additional inputs that get handed off to fcn each
+%                           time it is called.
+% 
 %        Note that if the program ends abnormally, it is possible to retrieve the current x,
 %        f, and H from the files g1.mat and H.mat that are written at each iteration and at each
 %        hessian update, respectively.  (When the routine hits certain kinds of difficulty, it
-%        write g2.mat and g3.mat as well.  If all were written at about the same time, any of them
-%        may be a decent starting point.  One can also start from the one with best function value.)
-
+%        writes g2.mat and g3.mat as well. If all were written at about the same time, any of them
+%        may be a decent starting point. One can also start from the one with best function value.)
+% 
+% Outputs:
+%   fh:     [scalar]        function value at minimum
+%   xh:     [npar by 1]     parameter vector at minimum
+%   gh      [npar by 1]     gradient vector
+%   H       [npar by npar]  inverse of the Hessian matrix
+%   itct    [scalar]        iteration count upon termination
+%   fcount  [scalar]        function iteration count upon termination
+%   retcodeh [scalar]       return code:
+%                               0: normal step
+%                               1: zero gradient 
+%                               2: back and forth on step length never finished
+%                               3: smallest step still improving too slow
+%                               4: back and forth on step length never finished
+%                               5: largest step still improving too fast
+%                               6: smallest step still improving too slow, reversed gradient
+%                               7: warning: possible inaccuracy in H matrix
+% 
 % Original file downloaded from:
 % http://sims.princeton.edu/yftp/optimize/mfiles/csminwel.m
-
+% 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2006-2012 Dynare Team
+% Copyright (C) 2006-2015 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -51,7 +70,6 @@ NumGrad= isempty(grad);
 done=0;
 itct=0;
 fcount=0;
-snit=100;
 gh = [];
 H = [];
 retcodeh = [];
@@ -74,20 +92,7 @@ if ~cost_flag
 end
 
 if NumGrad
-    switch method
-      case 2
-        [g,badg] = numgrad2(fcn, f0, x0, epsilon, varargin{:});
-      case 3
-        [g,badg] = numgrad3(fcn, f0, x0, epsilon, varargin{:});
-      case 5
-        [g,badg] = numgrad5(fcn, f0, x0, epsilon, varargin{:});
-      case 13
-        [g,badg] = numgrad3_(fcn, f0, x0, epsilon, varargin{:});
-      case 15
-        [g,badg] = numgrad5_(fcn, f0, x0, epsilon, varargin{:});
-      otherwise
-        error('csminwel1: Unknown method for gradient evaluation!')
-    end
+    [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin{:});
 elseif ischar(grad)
     [g,badg] = feval(grad,x0,varargin{:});
 else
@@ -105,19 +110,15 @@ while ~done
 
     g1=[]; g2=[]; g3=[];
     %addition fj. 7/6/94 for control
-    disp('-----------------')
-    disp('-----------------')
-    %disp('f and x at the beginning of new iteration')
-    disp(sprintf('f at the beginning of new iteration, %20.10f',f))
+    if Verbose
+        disp('-----------------')
+        disp(sprintf('f at the beginning of new iteration, %20.10f',f))
+    end
     %-----------Comment out this line if the x vector is long----------------
     %   disp([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
     %-------------------------
     itct=itct+1;
-    [f1 x1 fc retcode1] = csminit1(fcn,x,f,g,badg,H,varargin{:});
-    %ARGLIST
-    %[f1 x1 fc retcode1] = csminit(fcn,x,f,g,badg,H,P1,P2,P3,P4,P5,P6,P7,...
-    %           P8,P9,P10,P11,P12,P13);
-    % itct=itct+1;
+    [f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,varargin{:});
     fcount = fcount+fc;
     % erased on 8/4/94
     % if (retcode == 1) || (abs(f1-f) < crit)
@@ -132,22 +133,9 @@ while ~done
             wall1=1; badg1=1;
         else
             if NumGrad
-                switch method
-                  case 2
-                    [g1 badg1] = numgrad2(fcn, f1, x1, epsilon, varargin{:});
-                  case 3
-                    [g1 badg1] = numgrad3(fcn, f1, x1, epsilon, varargin{:});
-                  case 5
-                    [g1,badg1] = numgrad5(fcn, f1, x1, epsilon, varargin{:});
-                  case 13
-                    [g1,badg1] = numgrad3_(fcn, f1, x1, epsilon, varargin{:});
-                  case 15
-                    [g1,badg1] = numgrad5_(fcn, f1, x1, epsilon, varargin{:});
-                  otherwise
-                    error('csminwel1: Unknown method for gradient evaluation!')
-                end
+                [g1, badg1]=get_num_grad(method,fcn,f1,x1,epsilon,varargin{:});
             elseif ischar(grad),
-                [g1 badg1] = feval(grad,x1,varargin{:});
+                [g1, badg1] = feval(grad,x1,varargin{:});
             else
                 [junk1,g1,junk2, cost_flag] = feval(fcn,x1,varargin{:});
                 badg1 = ~cost_flag;
@@ -155,8 +143,6 @@ while ~done
             wall1=badg1;
             % g1
             save g1.mat g1 x1 f1 varargin;
-            %ARGLIST
-            %save g1 g1 x1 f1 P1 P2 P3 P4 P5 P6 P7 P8 P9 P10 P11 P12 P13;
         end
         if wall1 % && (~done) by Jinill
                  % Bad gradient or back and forth on step length.  Possibly at
@@ -164,33 +150,19 @@ while ~done
                  %
                  %fcliff=fh;xcliff=xh;
             Hcliff=H+diag(diag(H).*rand(nx,1));
-            disp('Cliff.  Perturbing search direction.')
-            [f2 x2 fc retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,varargin{:});
-            %ARGLIST
-            %[f2 x2 fc retcode2] = csminit(fcn,x,f,g,badg,Hcliff,P1,P2,P3,P4,...
-            %     P5,P6,P7,P8,P9,P10,P11,P12,P13);
+            if Verbose
+                disp('Cliff.  Perturbing search direction.')
+            end
+            [f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,varargin{:});
             fcount = fcount+fc; % put by Jinill
             if  f2 < f
                 if retcode2==2 || retcode2==4
                     wall2=1; badg2=1;
                 else
                     if NumGrad
-                        switch method
-                          case 2
-                            [g2 badg2] = numgrad2(fcn, f2, x2, epsilon, varargin{:});
-                          case 3
-                            [g2 badg2] = numgrad3(fcn, f2, x2, epsilon, varargin{:});
-                          case 5
-                            [g2,badg2] = numgrad5(fcn, f2, x2, epsilon, varargin{:});
-                          case 13
-                            [g2,badg2] = numgrad3_(fcn, f2, x2, epsilon, varargin{:});
-                          case 15
-                            [g2,badg2] = numgrad5_(fcn, f2, x2, epsilon, varargin{:});
-                          otherwise
-                            error('csminwel1: Unknown method for gradient evaluation!')
-                        end
+                        [g2, badg2]=get_num_grad(method,fcn,f2,x2,epsilon,varargin{:});
                     elseif ischar(grad),
-                        [g2 badg2] = feval(grad,x2,varargin{:});
+                        [g2, badg2] = feval(grad,x2,varargin{:});
                     else
                         [junk1,g2,junk2, cost_flag] = feval(fcn,x1,varargin{:});
                         badg2 = ~cost_flag;
@@ -199,8 +171,6 @@ while ~done
                     % g2
                     badg2
                     save g2.mat g2 x2 f2 varargin
-                    %ARGLIST
-                    %save g2 g2 x2 f2 P1 P2 P3 P4 P5 P6 P7 P8 P9 P10 P11 P12 P13;
                 end
                 if wall2
                     disp('Cliff again.  Try traversing')
@@ -208,33 +178,19 @@ while ~done
                         f3=f; x3=x; badg3=1;retcode3=101;
                     else
                         gcliff=((f2-f1)/((norm(x2-x1))^2))*(x2-x1);
-                        if(size(x0,2)>1), gcliff=gcliff', end
-                        [f3 x3 fc retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),varargin{:});
-                        %ARGLIST
-                        %[f3 x3 fc retcode3] = csminit(fcn,x,f,gcliff,0,eye(nx),P1,P2,P3,...
-                        %         P4,P5,P6,P7,P8,...
-                        %      P9,P10,P11,P12,P13);
+                        if(size(x0,2)>1)
+                            gcliff=gcliff';
+                        end
+                        [f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),varargin{:});
                         fcount = fcount+fc; % put by Jinill
                         if retcode3==2 || retcode3==4
-                            wall3=1; badg3=1;
+                            wall3=1; 
+                            badg3=1;
                         else
                             if NumGrad
-                                switch method
-                                  case 2
-                                    [g3 badg3] = numgrad2(fcn, f3, x3, epsilon, varargin{:});
-                                  case 3
-                                    [g3 badg3] = numgrad3(fcn, f3, x3, epsilon, varargin{:});
-                                  case 5
-                                    [g3,badg3] = numgrad5(fcn, f3, x3, epsilon, varargin{:});
-                                  case 13
-                                    [g3,badg3] = numgrad3_(fcn, f3, x3, epsilon, varargin{:});
-                                  case 15
-                                    [g3,badg3] = numgrad5_(fcn, f3, x3, epsilon, varargin{:});
-                                  otherwise
-                                    error('csminwel1: Unknown method for gradient evaluation!')
-                                end
+                                [g3, badg3]=get_num_grad(method,fcn,f3,x3,epsilon,varargin{:});
                             elseif ischar(grad),
-                                [g3 badg3] = feval(grad,x3,varargin{:});
+                                [g3, badg3] = feval(grad,x3,varargin{:});
                             else
                                 [junk1,g3,junk2, cost_flag] = feval(fcn,x1,varargin{:});
                                 badg3 = ~cost_flag;
@@ -242,8 +198,6 @@ while ~done
                             wall3=badg3;
                             % g3
                             save g3.mat g3 x3 f3 varargin;
-                            %ARGLIST
-                            %save g3 g3 x3 f3 P1 P2 P3 P4 P5 P6 P7 P8 P9 P10 P11 P12 P13;
                         end
                     end
                 else
@@ -292,22 +246,9 @@ while ~done
         end
         if nogh
             if NumGrad
-                switch method
-                  case 2
-                    [gh,badgh] = numgrad2(fcn, fh, xh, epsilon, varargin{:});
-                  case 3
-                    [gh,badgh] = numgrad3(fcn, fh, xh, epsilon, varargin{:});
-                  case 5
-                    [gh,badgh] = numgrad5(fcn, fh, xh, epsilon, varargin{:});
-                  case 13
-                    [gh,badgh] = numgrad3_(fcn, fh, xh, epsilon, varargin{:});
-                  case 15
-                    [gh,badgh] = numgrad5_(fcn, fh, xh, epsilon, varargin{:});
-                  otherwise
-                    error('csminwel1: Unknown method for gradient evaluation!')
-                end
+                [gh, badgh]=get_num_grad(method,fcn,fh,xh,epsilon,varargin{:});
             elseif ischar(grad),
-                [gh badgh] = feval(grad, xh,varargin{:});
+                [gh, badgh] = feval(grad, xh,varargin{:});
             else
                 [junk1,gh,junk2, cost_flag] = feval(fcn,x1,varargin{:});
                 badgh = ~cost_flag;
@@ -316,11 +257,6 @@ while ~done
         badgh=1;
     end
     %end of picking
-    %ih
-    %fh
-    %xh
-    %gh
-    %badgh
     stuck = (abs(fh-f) < crit);
     if (~badg) && (~badgh) && (~stuck)
         H = bfgsi1(H,gh-g,xh-x);
@@ -338,24 +274,47 @@ while ~done
         done = 1;
     end
     rc=retcodeh;
-    if rc == 1
-        disp('zero gradient')
-    elseif rc == 6
-        disp('smallest step still improving too slow, reversed gradient')
-    elseif rc == 5
-        disp('largest step still improving too fast')
-    elseif (rc == 4) || (rc==2)
-        disp('back and forth on step length never finished')
-    elseif rc == 3
-        disp('smallest step still improving too slow')
-    elseif rc == 7
-        disp('warning: possible inaccuracy in H matrix')
+    if Verbose || done
+        if rc ==0
+            %do nothing, just a normal step
+        elseif rc == 1
+            disp('zero gradient')
+        elseif rc == 6
+            disp('smallest step still improving too slow, reversed gradient')
+        elseif rc == 5
+            disp('largest step still improving too fast')
+        elseif (rc == 4) || (rc==2)
+            disp('back and forth on step length never finished')
+        elseif rc == 3
+            disp('smallest step still improving too slow')
+        elseif rc == 7
+            disp('warning: possible inaccuracy in H matrix')
+        else
+            error('Unaccounted Case, please contact the developers')
+        end
     end
-    % end
+     
     f=fh;
     x=xh;
     g=gh;
     badg=badgh;
 end
-% what about making an m-file of 10 lines including numgrad.m
-% since it appears three times in csminwel.m
\ No newline at end of file
+
+end
+
+function [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin)
+    switch method
+      case 2
+        [g,badg] = numgrad2(fcn, f0, x0, epsilon, varargin{:});
+      case 3
+        [g,badg] = numgrad3(fcn, f0, x0, epsilon, varargin{:});
+      case 5
+        [g,badg] = numgrad5(fcn, f0, x0, epsilon, varargin{:});
+      case 13
+        [g,badg] = numgrad3_(fcn, f0, x0, epsilon, varargin{:});
+      case 15
+        [g,badg] = numgrad5_(fcn, f0, x0, epsilon, varargin{:});
+      otherwise
+        error('csminwel1: Unknown method for gradient evaluation!')
+    end
+end
\ No newline at end of file
diff --git a/matlab/optimization/dynare_minimize_objective.m b/matlab/optimization/dynare_minimize_objective.m
index c0259880b3d353d790c68f356c0dc4a9eac35c62..9e8761bd2b61d73f7310c08e5b409dfce750c92c 100644
--- a/matlab/optimization/dynare_minimize_objective.m
+++ b/matlab/optimization/dynare_minimize_objective.m
@@ -190,8 +190,9 @@ switch minimizer_algorithm
         analytic_grad=[];
     end
     % Call csminwell.
-    [fval,opt_par_values,grad,hessian_mat,itct,fcount,exitflag] = ...
+    [fval,opt_par_values,grad,inverse_hessian_mat,itct,fcount,exitflag] = ...
         csminwel1(objective_function, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
+    hessian_mat=inv(inverse_hessian_mat);
   case 5
     if options_.analytic_derivation==-1 %set outside as code for use of analytic derivation
         analytic_grad=1;