changeset 2571:3f662c38802c octave-forge

remove use of celleval, use a more efficient direct call to feval
author mcreel
date Tue, 03 Oct 2006 09:02:42 +0000
parents ea67ff1eca1d
children 94f6074cb425
files main/optim/src/Makefile main/optim/src/__bfgsmin.cc main/optim/src/numgradient.cc main/optim/src/numhessian.cc main/optim/src/samin.cc
diffstat 5 files changed, 53 insertions(+), 66 deletions(-) [+]
line wrap: on
line diff
--- a/main/optim/src/Makefile	Tue Oct 03 09:02:33 2006 +0000
+++ b/main/optim/src/Makefile	Tue Oct 03 09:02:42 2006 +0000
@@ -1,4 +1,4 @@
-all: leval.oct __bfgsmin.oct celleval.oct numgradient.oct numhessian.oct samin.oct
+all: leval.oct __bfgsmin.oct numgradient.oct numhessian.oct samin.oct
 
 %.oct: %.cc
 	mkoctfile -s $<
--- a/main/optim/src/__bfgsmin.cc	Tue Oct 03 09:02:33 2006 +0000
+++ b/main/optim/src/__bfgsmin.cc	Tue Oct 03 09:02:42 2006 +0000
@@ -30,16 +30,13 @@
 #include "error.h"
 
 
-int __bfgsmin_obj(double &obj, std::string f, Cell f_args, ColumnVector theta, int minarg)
+int __bfgsmin_obj(double &obj, const std::string f, octave_value_list f_args, const ColumnVector theta, const int minarg)
 {
 	octave_value_list f_return;
-	octave_value_list c_args(2,1); // for cellevall {f, f_args}
 	int success = 1;
 
-	c_args(0) = f;
 	f_args(minarg - 1) = theta;
-	c_args(1) = f_args;
-	f_return = feval("celleval", c_args);
+	f_return = feval(f, f_args);
 	obj = f_return(0).double_value();
 	// bullet-proof the objective function
 	if (error_state)
@@ -55,7 +52,7 @@
 // __numgradient: numeric central difference gradient for bfgs.
 // This is the same as numgradient, except the derivative is known to be a vector, it's defined as a column,
 // and the finite difference delta is incorporated directly rather than called from a function
-int __numgradient(ColumnVector &derivative, std::string f, Cell f_args, int minarg)
+int __numgradient(ColumnVector &derivative, const std::string f, const octave_value_list f_args, const int minarg)
 {
 	double SQRT_EPS, diff, delta, obj_left, obj_right, p;
 	int j, test, success;
@@ -89,27 +86,22 @@
 }
 
 
-int __bfgsmin_gradient(ColumnVector &derivative, std::string f, Cell f_args, ColumnVector theta, int minarg, int try_analytic_gradient, int &have_analytic_gradient) {
+int __bfgsmin_gradient(ColumnVector &derivative, const std::string f, octave_value_list f_args, const ColumnVector theta, const int minarg, int try_analytic_gradient, int &have_analytic_gradient) {
 	octave_value_list f_return;
-	octave_value_list c_args(2,1); // for cellevall {f, f_args}
 	int k = theta.rows();
 	int success;
 	ColumnVector g(k);
 	Matrix check_gradient(k,1);
 
 	if (have_analytic_gradient) {
-		c_args(0) = f;
 		f_args(minarg - 1) = theta;
-		c_args(1) = f_args;
-		f_return = feval("celleval", c_args);
+		f_return = feval(f, f_args);
 		g = f_return(1).column_vector_value();
 	}
 
 	else if (try_analytic_gradient) {
-		c_args(0) = f;
 		f_args(minarg - 1) = theta;
-		c_args(1) = f_args;
-		f_return = feval("celleval", c_args);
+		f_return = feval(f, f_args);
 		if (f_return.length() > 1) {
 			if (f_return(1).is_real_matrix()) {
         			if ((f_return(1).rows() == k) & (f_return(1).columns() == 1)) {
@@ -139,7 +131,7 @@
 
 
 // this is the lbfgs direction, used if control has 5 elements
-ColumnVector lbfgs_recursion(int memory, Matrix sigmas, Matrix gammas, ColumnVector d)
+ColumnVector lbfgs_recursion(const int memory, const Matrix sigmas, const Matrix gammas, ColumnVector d)
 {
 	if (memory == 0)
   	{
@@ -174,7 +166,7 @@
 }
 
 // __bisectionstep: fallback stepsize method if __newtonstep fails
-int __bisectionstep(double &step, double &obj, std::string f, Cell f_args, ColumnVector dx, int minarg, int verbose)
+int __bisectionstep(double &step, double &obj, const std::string f, const octave_value_list f_args, const ColumnVector dx, const int minarg, const int verbose)
 {
 	double obj_0, a;
 	int found_improvement;
@@ -226,7 +218,7 @@
 }
 
 // __newtonstep: default stepsize algorithm
-int __newtonstep(double &a, double &obj, std::string f, Cell f_args, ColumnVector dx, int minarg, int verbose)
+int __newtonstep(double &a, double &obj, const std::string f, const octave_value_list f_args, const ColumnVector dx, const int minarg, const int verbose)
 {
 	double obj_0, obj_left, obj_right, delta, inv_delta_sq, gradient, hessian;
 	int found_improvement = 0;
@@ -280,8 +272,8 @@
 DEFUN_DLD(__bfgsmin, args, ,"__bfgsmin: backend for bfgs minimization\n\
 Users should not use this directly. Use bfgsmin.m instead") {
 	std::string f (args(0).string_value());
-  	Cell f_args (args(1).cell_value());
-  	octave_value_list f_return; // holder for return items
+  	Cell f_args_cell (args(1).cell_value());
+	octave_value_list f_args, f_return; // holder for return items
 
 	int max_iters, verbosity, criterion, minarg, convergence, iter, memory, \
 		gradient_ok, i, j, k, conv_fun, conv_param, conv_grad, have_gradient, \
@@ -306,6 +298,11 @@
 	warnings = 0;
 	if (verbosity == 3) warnings = 1;
 
+	// copy cell contents over to octave_value_list to use feval()
+	k = f_args_cell.length();
+	f_args(k); // resize only once
+	for (i = 0; i<k; i++) f_args(i) = f_args_cell(i);
+
 	// get the minimization argument
 	ColumnVector theta  = f_args(minarg - 1).column_vector_value();
 	k = theta.rows();
--- a/main/optim/src/numgradient.cc	Tue Oct 03 09:02:33 2006 +0000
+++ b/main/optim/src/numgradient.cc	Tue Oct 03 09:02:42 2006 +0000
@@ -86,8 +86,7 @@
 ")
 {
 	int nargin = args.length();
-	if (!((nargin == 2)|| (nargin == 3)))
-	{
+	if (!((nargin == 2)|| (nargin == 3))) {
 		error("numgradient: you must supply 2 or 3 arguments");
 		return octave_value_list();
 	}
@@ -96,33 +95,34 @@
 	if (any_bad_argument(args)) return octave_value_list();
 
 	std::string f (args(0).string_value());
-	Cell f_args (args(1).cell_value());
-	octave_value_list c_args(2,1); // for cellevall {f, f_args}
-	octave_value_list f_return;
-	c_args(0) = f;
-	c_args(1) = f_args;
+	Cell f_args_cell (args(1).cell_value());
+	octave_value_list f_args, f_return;
 	Matrix obj_value, obj_left, obj_right;
 	double SQRT_EPS, p, delta, diff;
-	int i, j, minarg, test;
+	int i, j, k, n, minarg, test;
 
 	// Default values for controls
 	minarg = 1; // by default, first arg is one over which we minimize
 
-	// possibly minimization not over 1st arg
+	// copy cell contents over to octave_value_list to use feval()
+	k = f_args_cell.length();
+	f_args(k); // resize only once
+	for (i = 0; i<k; i++) f_args(i) = f_args_cell(i);
+
+	// check which arg w.r.t which we need to differentiate
 	if (args.length() == 3) minarg = args(2).int_value();
 	Matrix parameter = f_args(minarg - 1).matrix_value();
 
 	// initial function value
-	f_return = feval("celleval", c_args);
+	f_return = feval(f, f_args);
 	obj_value = f_return(0).matrix_value();
 
-	const int n = obj_value.rows(); // find out dimension
-	const int k = parameter.rows();
+	n = obj_value.rows(); // find out dimension
+	k = parameter.rows();
 	Matrix derivative(n, k);
 	Matrix columnj;
 
-	for (j=0; j<k; j++) // get 1st derivative by central difference
-	{
+	for (j=0; j<k; j++) { // get 1st derivative by central difference
 		p = parameter(j);
 
 		// determine delta for finite differencing
@@ -135,15 +135,13 @@
 		// right side
 		parameter(j) = p + delta;
 		f_args(minarg - 1) = parameter;
-		c_args(1) = f_args;
-		f_return = feval("celleval", c_args);
+		f_return = feval(f, f_args);
 		obj_right = f_return(0).matrix_value();
 
 		// left size
 		parameter(j) = p - delta;
 		f_args(minarg - 1) = parameter;
-		c_args(1) = f_args;
-		f_return = feval("celleval", c_args);
+		f_return = feval(f, f_args);
 		obj_left = f_return(0).matrix_value();
 
 		parameter(j) = p;  // restore original parameter
--- a/main/optim/src/numhessian.cc	Tue Oct 03 09:02:33 2006 +0000
+++ b/main/optim/src/numhessian.cc	Tue Oct 03 09:02:42 2006 +0000
@@ -100,42 +100,41 @@
 	if (any_bad_argument(args)) return octave_value_list();
 
 	std::string f (args(0).string_value());
-	Cell f_args (args(1).cell_value());
-	octave_value_list c_args(2,1); // for cellevall {f, f_args}
-	octave_value_list f_return;
-	c_args(0) = f;
-	c_args(1) = f_args;
-	int i, j, minarg;
+	Cell f_args_cell (args(1).cell_value());
+	octave_value_list f_args, f_return;
+	int i, j, k, minarg;
 	bool test;
 	double di, hi, pi, dj, hj, pj, hia, hja, fpp, fmm, fmp, fpm, obj_value, SQRT_EPS, diff;
 
 	// Default values for controls
 	minarg = 1; // by default, first arg is one over which we minimize
 
-	// possibly minimization not over 1st arg
-	if (args.length() == 3) minarg = args(2).int_value();
+	// copy cell contents over to octave_value_list to use feval()
+	k = f_args_cell.length();
+	f_args(k); // resize only once
+	for (i = 0; i<k; i++) f_args(i) = f_args_cell(i);
 
+	// check which arg w.r.t which we need to differentiate
+	if (args.length() == 3) minarg = args(2).int_value();
 	Matrix parameter = f_args(minarg - 1).matrix_value();
-	const int k = parameter.rows();
+	k = parameter.rows();
 	Matrix derivative(k, k);
 
-	f_return = feval("celleval", c_args);
+	f_return = feval(f, f_args);
 	obj_value = f_return(0).double_value();
 
 	diff = exp(log(DBL_EPSILON)/4);
 	SQRT_EPS = sqrt(DBL_EPSILON);
 
 
-	for (i = 0; i<k;i++)	// approximate 2nd deriv. by central difference
-	{
+	for (i = 0; i<k;i++) {	// approximate 2nd deriv. by central difference
 		pi = parameter(i);
 		test = (fabs(pi) + SQRT_EPS) * SQRT_EPS > diff;
 		if (test) hi = (fabs(pi) + SQRT_EPS) * SQRT_EPS;
 		else hi = diff;
 
 
-		for (j = 0; j < i; j++) // off-diagonal elements
-		{
+		for (j = 0; j < i; j++) { // off-diagonal elements
 			pj = parameter(j);
 			test = (fabs(pj) + SQRT_EPS) * SQRT_EPS > diff;
 			if (test) hj = (fabs(pj) + SQRT_EPS) * SQRT_EPS;
@@ -147,8 +146,7 @@
 			hia = di - pi;
 			hja = dj - pj;
 			f_args(minarg - 1) = parameter;
-			c_args(1) = f_args;
-			f_return = feval("celleval", c_args);
+			f_return = feval(f, f_args);
 			fpp = f_return(0).double_value();
 
 			// -1 -1
@@ -157,24 +155,21 @@
 			hia = hia + pi - di;
 			hja = hja + pj - dj;
 			f_args(minarg - 1) = parameter;
-			c_args(1) = f_args;
-			f_return = feval("celleval", c_args);
+			f_return = feval(f, f_args);
 			fmm = f_return(0).double_value();
 
 			// +1 -1
 			parameter(i) = pi + hi;
 			parameter(j) = pj - hj;
 			f_args(minarg - 1) = parameter;
-			c_args(1) = f_args;
-			f_return = feval("celleval", c_args);
+			f_return = feval(f, f_args);
 			fpm = f_return(0).double_value();
 
 			// -1 +1
 			parameter(i) = pi - hi;
 			parameter(j) = pj + hj;
 			f_args(minarg - 1) = parameter;
-			c_args(1) = f_args;
-			f_return = feval("celleval", c_args);
+			f_return = feval(f, f_args);
 			fmp = f_return(0).double_value();
 
 			derivative(j,i) = ((fpp - fpm) + (fmm - fmp)) / (hia * hja);
@@ -187,16 +182,14 @@
 		// +1 +1
 		parameter(i) = di = pi + 2 * hi;
 		f_args(minarg - 1) = parameter;
-		c_args(1) = f_args;
-		f_return = feval("celleval", c_args);
+		f_return = feval(f, f_args);
 		fpp = f_return(0).double_value();
 		hia = (di - pi) / 2;
 
 		// -1 -1
 		parameter(i) = di = pi - 2 * hi;
 		f_args(minarg - 1) = parameter;
-		c_args(1) = f_args;
-		f_return = feval("celleval", c_args);
+		f_return = feval(f, f_args);
 		fmm = f_return(0).double_value();
 		hia = hia + (pi - di) / 2;
 
--- a/main/optim/src/samin.cc	Tue Oct 03 09:02:33 2006 +0000
+++ b/main/optim/src/samin.cc	Tue Oct 03 09:02:42 2006 +0000
@@ -167,8 +167,7 @@
 }
 
 //-------------- The annealing algorithm --------------
-DEFUN_DLD(samin, args, ,
-	  "samin: simulated annealing minimization of a function. See samin_example.m\n\
+DEFUN_DLD(samin, args, , "samin: simulated annealing minimization of a function. See samin_example.m\n\
 \n\
 [x, obj, convergence, details] = samin(\"f\", {args}, {control})\n\
 \n\