# HG changeset patch # User Jaroslav Hajek # Date 1252500186 -7200 # Node ID 00958d0c4e3cf232305d184c5d519906633a1447 # Parent d52e405df4f757a1e56d6fbaf52a4f13b2f46b5c split __dogleg__ > __doglegm__ diff -r d52e405df4f7 -r 00958d0c4e3c scripts/ChangeLog --- a/scripts/ChangeLog Tue Sep 08 23:37:00 2009 -0400 +++ b/scripts/ChangeLog Wed Sep 09 14:43:06 2009 +0200 @@ -1,3 +1,9 @@ +2009-09-06 Jaroslav Hajek + + * optimization/__dogleg__.m: Revert to revision 22c8272af34b. + * optimization/__doglegm__.m: New source. + * optimization/fminunc.m: Use it. + 2009-09-08 John W. Eaton * io/dlmwrite.m: Fix typo. diff -r d52e405df4f7 -r 00958d0c4e3c scripts/optimization/__dogleg__.m --- a/scripts/optimization/__dogleg__.m Tue Sep 08 23:37:00 2009 -0400 +++ b/scripts/optimization/__dogleg__.m Wed Sep 09 14:43:06 2009 +0200 @@ -17,42 +17,24 @@ ## . ## -*- texinfo -*- -## @deftypefn{Function File} {@var{x}} = __dogleg__ (@var{r}, @var{b}, @var{x}, @var{d}, @var{delta}, @var{ismin}) -## Solve the double dogleg trust-region problem: -## Minimize -## @example -## norm(@var{r}*@var{x}-@var{b}) -## @end example -## subject to the constraint -## @example -## norm(@var{d}.*@var{x}) <= @var{delta} , -## @end example -## x being a convex combination of the gauss-newton and scaled gradient. -## If @var{ismin} is true (default false), minimizes instead -## @example -## norm(@var{r}*@var{x})^2-2*@var{b}'*@var{x} -## @end example +## @deftypefn{Function File} {@var{x}} = __dogleg__ (@var{r}, @var{b}, @var{x}, @var{d}, @var{delta}) +## Undocumented internal function. ## @end deftypefn +## Solve the double dogleg trust-region least-squares problem: +## Minimize norm(r*x-b) subject to the constraint norm(d.*x) <= delta, +## x being a convex combination of the gauss-newton and scaled gradient. ## TODO: error checks ## TODO: handle singularity, or leave it up to mldivide? -function x = __dogleg__ (r, b, d, delta, ismin = false) +function x = __dogleg__ (r, b, d, delta) ## Get Gauss-Newton direction. - if (ismin) - g = b; - b = r' \ g; - endif x = r \ b; xn = norm (d .* x); if (xn > delta) ## GN is too big, get scaled gradient. - if (ismin) - s = g ./ d; - else - s = (r' * b) ./ d; - endif + s = (r' * b) ./ d; sn = norm (s); if (sn > 0) ## Normalize and rescale. diff -r d52e405df4f7 -r 00958d0c4e3c scripts/optimization/__doglegm__.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/optimization/__doglegm__.m Wed Sep 09 14:43:06 2009 +0200 @@ -0,0 +1,63 @@ +## Copyright (C) 2008, 2009 Jaroslav Hajek +## +## This file is part of Octave. +## +## Octave is free software; you can redistribute it and/or modify it +## under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 3 of the License, or (at +## your option) any later version. +## +## Octave is distributed in the hope that it will be useful, but +## WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +## General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with Octave; see the file COPYING. If not, see +## . + +## -*- texinfo -*- +## @deftypefn{Function File} {@var{x}} = __doglegm__ (@var{r}, @var{b}, @var{x}, @var{d}, @var{delta}) +## @end deftypefn + +## Solve the double dogleg trust-region minimization problem: +## Minimize 1/2*norm(r*x)^2 subject to the constraint norm(d.*x) <= delta, +## x being a convex combination of the gauss-newton and scaled gradient. + +## TODO: error checks +## TODO: handle singularity, or leave it up to mldivide? + +function x = __doglegm__ (r, g, d, delta) + ## Get Gauss-Newton direction. + b = r' \ g; + x = r \ b; + xn = norm (d .* x); + if (xn > delta) + ## GN is too big, get scaled gradient. + s = g ./ d; + sn = norm (s); + if (sn > 0) + ## Normalize and rescale. + s = (s / sn) ./ d; + ## Get the line minimizer in s direction. + tn = norm (r*s); + snm = (sn / tn) / tn; + if (snm < delta) + ## Get the dogleg path minimizer. + bn = norm (b); + dxn = delta/xn; snmd = snm/delta; + t = (bn/sn) * (bn/xn) * snmd; + t -= dxn * snmd^2 - sqrt ((t-dxn)^2 + (1-dxn^2)*(1-snmd^2)); + alpha = dxn*(1-snmd^2) / t; + else + alpha = 0; + endif + else + alpha = delta / xn; + snm = 0; + endif + ## Form the appropriate convex combination. + x = alpha * x + ((1-alpha) * min (snm, delta)) * s; + endif +endfunction + diff -r d52e405df4f7 -r 00958d0c4e3c scripts/optimization/fminunc.m --- a/scripts/optimization/fminunc.m Tue Sep 08 23:37:00 2009 -0400 +++ b/scripts/optimization/fminunc.m Wed Sep 09 14:43:06 2009 +0200 @@ -209,7 +209,7 @@ ## Inner loop. while (! suc && niter <= maxiter && nfev < maxfev && ! info) - s = - __dogleg__ (hesr, grad, dg, delta, true); + s = - __doglegm__ (hesr, grad, dg, delta); sn = norm (dg .* s); if (niter == 1)