changeset 0:8f23314345f4 draft

Create local repository for matrix toolboxes. Step #0 done.
author Antonio Pino Robles <data.script93@gmail.com>
date Wed, 06 May 2015 14:56:53 +0200
parents
children e471a92d17be
files funm_files/blocking.m funm_files/fun_cos.m funm_files/fun_exp.m funm_files/fun_log.m funm_files/fun_sin.m funm_files/funm_atom.m funm_files/logm_isst.m funm_files/myfunm.m funm_files/swap.c funm_files/swap.mexglx funm_files/swapping.m funm_files/sylv_tri.m matrixcomp/Contents.m matrixcomp/adsmax.m matrixcomp/augment.m matrixcomp/cholp.m matrixcomp/chop.m matrixcomp/cod.m matrixcomp/cpltaxes.m matrixcomp/dual.m matrixcomp/fv.m matrixcomp/gep.m matrixcomp/gersh.m matrixcomp/gfpp.m matrixcomp/gj.m matrixcomp/gqr.m matrixcomp/gs_c.m matrixcomp/gs_m.m matrixcomp/ldlt_skew.m matrixcomp/ldlt_symm.m matrixcomp/ldlt_sytr.m matrixcomp/lse.m matrixcomp/makejcf.m matrixcomp/matrix.m matrixcomp/matsignt.m matrixcomp/mctdemo.m matrixcomp/mctoolbox.pdf matrixcomp/mdsmax.m matrixcomp/nmsmax.m matrixcomp/pnorm.m matrixcomp/poldec.m matrixcomp/ps.m matrixcomp/pscont.m matrixcomp/readme.m matrixcomp/rootm.m matrixcomp/rschur.m matrixcomp/see.m matrixcomp/seqcheb.m matrixcomp/seqm.m matrixcomp/show.m matrixcomp/signm.m matrixcomp/skewpart.m matrixcomp/sparsify.m matrixcomp/strassen.m matrixcomp/strassenw.m matrixcomp/sub.m matrixcomp/symmpart.m matrixcomp/trap2tri.m matrixcomp/treshape.m matrixcomp/vand.m matrixcomp/vecperm.m mftoolbox/Contents.m mftoolbox/arnoldi.m mftoolbox/ascent_seq.m mftoolbox/cosm.m mftoolbox/cosm_pade.m mftoolbox/cosmsinm.m mftoolbox/cosmsinm_pade.m mftoolbox/expm_cond.m mftoolbox/expm_frechet_pade.m mftoolbox/expm_frechet_quad.m mftoolbox/fab_arnoldi.m mftoolbox/funm_condest1.m mftoolbox/funm_condest_fro.m mftoolbox/funm_ev.m mftoolbox/funm_simple.m mftoolbox/log_pade_err_opt.mat mftoolbox/logm_cond.m mftoolbox/logm_frechet_pade.m mftoolbox/logm_iss.m mftoolbox/logm_pade_pf.m mftoolbox/mft_test.m mftoolbox/mft_tolerance.m mftoolbox/polar_newton.m mftoolbox/polar_svd.m mftoolbox/polyvalm_ps.m mftoolbox/power_binary.m mftoolbox/quasitriang_struct.m mftoolbox/readme.m mftoolbox/riccati_xaxb.m mftoolbox/rootpm_newton.m mftoolbox/rootpm_real.m mftoolbox/rootpm_schur_newton.m mftoolbox/rootpm_sign.m mftoolbox/signm.m mftoolbox/signm_newton.m mftoolbox/sqrtm_db.m mftoolbox/sqrtm_dbp.m mftoolbox/sqrtm_newton.m mftoolbox/sqrtm_newton_full.m mftoolbox/sqrtm_pd.m mftoolbox/sqrtm_pulay.m mftoolbox/sqrtm_real.m mftoolbox/sqrtm_triang_min_norm.m mftoolbox/sylvsol.m mftoolbox/tau_r8_zeros.mat toolbox/augment.m toolbox/bandred.m toolbox/cauchy.m toolbox/chebspec.m toolbox/chebvand.m toolbox/cholp.m toolbox/chop.m toolbox/chow.m toolbox/circul.m toolbox/clement.m toolbox/cod.m toolbox/comp.m toolbox/compan.m toolbox/cond.m toolbox/condex.m toolbox/contents.m toolbox/cpltaxes.m toolbox/cycol.m toolbox/diagpiv.m toolbox/dingdong.m toolbox/dorr.m toolbox/dramadah.m toolbox/dual.m toolbox/eigsens.m toolbox/fdemo.m toolbox/fiedler.m toolbox/forsythe.m toolbox/frank.m toolbox/fv.m toolbox/gallery.m toolbox/gearm.m toolbox/gersh.m toolbox/gfpp.m toolbox/gj.m toolbox/grcar.m toolbox/hadamard.m toolbox/hanowa.m toolbox/hilb.m toolbox/house.m toolbox/invhess.m toolbox/invol.m toolbox/ipjfact.m toolbox/jordbloc.m toolbox/kahan.m toolbox/kms.m toolbox/krylov.m toolbox/lauchli.m toolbox/lehmer.m toolbox/lesp.m toolbox/lotkin.m toolbox/makejcf.m toolbox/matrix.m toolbox/matsignt.m toolbox/minij.m toolbox/moler.m toolbox/neumann.m toolbox/ohess.m toolbox/orthog.m toolbox/parter.m toolbox/pascal.m toolbox/pdtoep.m toolbox/pei.m toolbox/pentoep.m toolbox/pnorm.m toolbox/poisson.m toolbox/poldec.m toolbox/prolate.m toolbox/ps.m toolbox/pscont.m toolbox/qmult.m toolbox/rando.m toolbox/randsvd.m toolbox/redheff.m toolbox/riemann.m toolbox/rq.m toolbox/rschur.m toolbox/see.m toolbox/seqa.m toolbox/seqcheb.m toolbox/seqm.m toolbox/show.m toolbox/signm.m toolbox/skewpart.m toolbox/smoke.m toolbox/sparsify.m toolbox/sub.m toolbox/symmpart.m toolbox/tmtdemo.m toolbox/trap2tri.m toolbox/tridiag.m toolbox/triw.m toolbox/vand.m toolbox/wathen.m toolbox/wilk.m
diffstat 200 files changed, 9298 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/blocking.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,73 @@
+function m = blocking(A,delta,showplot)
+%BLOCKING  Produce blocking pattern for block Parlett recurrence.
+%          M = BLOCKING(A, DELTA, SHOWPLOT) accepts an upper triangular matrix
+%          A and produces a blocking pattern, specified by the vector M,
+%          for the block Parlett recurrence.
+%          M(i) is the index of the block into which A(i,i) should be placed.
+%          DELTA is a gap parameter (default 0.1) used to determine the
+%          blocking.
+%          Setting SHOWPLOT nonzero produces a plot of the eigenvalues
+%          that indicates the blocking:
+%            - Black circles show a set of 1 eigenvalue.
+%            - Blue circles show a set of >1 eigenvalues.
+%              The lines connect eigenvalues in the same set.
+%              Red squares show the mean of each set.
+
+%          For A coming from a real matrix it should be posible to take
+%          advantage of the symmetry about the real axis.  This code does not.
+
+a = diag(A); n = length(a);
+m = zeros(1,n); maxM = 0;
+
+if nargin < 2 | isempty(delta), delta = 0.1; end
+if nargin < 3, showplot = 0;  end
+
+if showplot, clf, hold on, end
+
+for i = 1:n
+
+    if m(i) == 0
+        m(i) = maxM + 1; % If a(i) hasn`t been assigned to a set
+        maxM = maxM + 1; % then make a new set and assign a(i) to it.
+    end
+
+    for j = i+1:n
+        if m(i) ~= m(j)    % If a(i) and a(j) are not in same set.
+            if abs(a(i)-a(j)) <= delta
+                if showplot
+                    plot(real([a(i) a(j)]),imag([a(i) a(j)]),'o-')
+                end
+
+                if m(j) == 0
+                    m(j) = m(i); % If a(j) hasn`t been assigned to a
+                                 % set, assign it to the same set as a(i).
+                else
+                    p = max(m(i),m(j)); q = min(m(i),m(j));
+                    m(m==p) = q; % If a(j) has been assigned to a set
+                                 % place all the elements in the set
+                                 % containing a(j) into the set
+                                 % containing a(i) (or vice versa).
+                    m(m>p) = m(m>p) -1;
+                    maxM = maxM - 1;
+                                 % Tidying up. As we have deleted set
+                                 % p we reduce the index of the sets
+                                 % > p by 1.
+                end
+            end
+        end
+    end
+end
+
+if showplot
+    for i = 1:max(m)
+        a_ind = a(m==i);
+        if length(a_ind) == 1
+            plot(real(a_ind),imag(a_ind),'ok' )
+        else
+%            plot(real(mean(a_ind)),imag(mean(a_ind)),'sr' )
+        end
+    end
+    grid
+    hold off
+    box on
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/fun_cos.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,14 @@
+function f = fun_cos(x,k)
+%FUN_COS
+
+if nargin < 2 | k == 0
+   f = cos(x);
+else
+   g = mod(ceil(k/2),2);
+   h = mod(k,2);
+   if h == 1
+      f = sin(x)*(-1)^g; 
+   else
+      f = cos(x)*(-1)^g; 
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/fun_exp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,4 @@
+function f = fun_exp(x,k)
+%FUN_EXP
+
+f = exp(x);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/fun_log.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,5 @@
+function f = fun_log(x)
+%FUN_LOG
+%         Only to be called for plain log evaluation.
+
+f = log(x);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/fun_sin.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,15 @@
+function f = fun_sin(x,k)
+%FUN_SIN
+
+if nargin < 2 | k == 0
+   f = sin(x);
+else
+   k = k - 1;
+   g = mod(ceil(k/2),2);
+   h = mod(k,2);
+   if h == 1
+      f = sin(x)*(-1)^g; 
+   else
+      f = cos(x)*(-1)^g; 
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/funm_atom.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,78 @@
+function [F,n_terms] = fun_atom(T,fun,tol,prnt)
+%FUNM_ATOM  Function of triangular matrix with nearly constant diagonal.
+%           [F, N_TERMS] = FUNM_ATOM(T, FUN, TOL, PRNT) evaluates function
+%           FUN at the upper triangular matrix T, where T has nearly constant
+%           diagonal.  A Taylor series is used.
+%           FUN(X,K) must return the K'th derivative of
+%           the function represented by FUN evaluated at the vector X.
+%           TOL is a convergence tolerance for the Taylor series,
+%           defaulting to EPS.
+%           If PRNT ~= 0 trace information is printed.
+%           N_TERMS is the number of terms taken in the Taylor series.
+%           N_TERMS  = -1 signals lack of convergence.
+
+if nargin < 3 | isempty(tol), tol = eps; end
+if nargin < 4, prnt = 0; end
+
+if isequal(fun,@fun_log)   % LOG is special case.
+   [F,n_terms]  = logm_isst(T,prnt);
+   return
+end
+
+itmax = 500;
+
+n = length(T);
+if n == 1, F = feval(fun,T,0); n_terms = 1; return, end
+
+lambda = sum(diag(T))/n;
+F = eye(n)*feval(fun,lambda,0);
+f_deriv_max = zeros(itmax+n-1,1);
+N = T - lambda*eye(n);
+mu = norm( (eye(n)-abs(triu(T,1)))\ones(n,1),inf );
+
+P = N;
+max_d = 1;
+
+for k = 1:itmax
+
+    f = feval(fun,lambda,k);
+    F_old = F;
+    F = F + P*f;
+    rel_diff = norm(F - F_old,inf)/(tol+norm(F_old,inf));
+    if prnt
+        fprintf('%3.0f: coef = %5.0e', k, abs(f)/factorial(k));
+        fprintf('  N^k/k! = %7.1e', norm(P,inf));
+        fprintf('  rel_d = %5.0e',rel_diff);
+        fprintf('  abs_d = %5.0e',norm(F - F_old,inf));
+    end
+    P = P*N/(k+1);
+
+    if rel_diff <= tol
+
+      % Approximate the maximum of derivatives in convex set containing
+      % eigenvalues by maximum of derivatives at eigenvalues.
+      for j = max_d:k+n-1
+          f_deriv_max(j) = norm(feval(fun,diag(T),j),inf);
+      end
+      max_d = k+n;
+      omega = 0;
+      for j = 0:n-1
+          omega = max(omega,f_deriv_max(k+j)/factorial(j));
+      end
+
+      trunc = norm(P,inf)*mu*omega;  % norm(F) moved to RHS to avoid / 0.
+      if prnt
+          fprintf('  [trunc,test] = [%5.0e %5.0e]', ...
+                   trunc, tol*norm(F,inf))
+      end
+      if prnt == 5, trunc = 0; end % Force simple stopping test.
+      if trunc <= tol*norm(F,inf)
+         n_terms = k;
+         if prnt, fprintf('\n'), end, return
+      end
+    end
+
+    if prnt, fprintf('\n'), end
+
+end
+n_terms = -1;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/logm_isst.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,80 @@
+function [X, iter] = logm_isst(T, prnt)
+%LOGM_ISST   Log of triangular matrix by Schur-Pade method with scaling.
+%         X = LOGM_ISST(A) computes the logarithm of an upper triangular
+%         matrix A, for a matrix with no nonpositive real eigenvalues,
+%         using the inverse scaling and squaring method with Pade
+%         approximation.  TOL is an error tolerance.
+%         [X, ITER] = LOGM_ISST(A, PRNT) returns the number ITER of square
+%         roots computed and prints this information if PRNT is nonzero.
+
+% References:
+% S. H. Cheng, N. J. Higham, C. S. Kenney, and A. J. Laub, Approximating the
+%    logarithm of a matrix to specified accuracy, SIAM J. Matrix Anal. Appl.,
+%    22(4):1112-1125, 2001.
+% N. J. Higham, Evaluating Pade approximants of the matrix logarithm,
+%    SIAM J. Matrix Anal. Appl., 22(4):1126-1135, 2001.
+
+if nargin < 2, prnt = 0; end
+n = length(T);
+
+if any( imag(diag(T)) == 0 & real(diag(T)) <= 0 )
+   error('A must not have nonpositive real eigenvalues!')
+end
+
+if n == 1, X = log(T); iter = 0; return, end
+
+R = T;
+maxlogiter = 50;
+
+for iter = 0:maxlogiter
+
+    phi  = norm(T-eye(n),'fro');
+
+    if phi <= 0.25
+       if prnt, fprintf('LOGM_ISST computed %g square roots. \n', iter), end
+       break
+    end
+    if iter == maxlogiter, error('Too many square roots in LOGM_ISST.\n'), end
+
+    % Compute upper triangular square root R of T, a column at a time.
+    for j=1:n
+        R(j,j) = sqrt(T(j,j));
+        for i=j-1:-1:1
+            R(i,j) = (T(i,j) - R(i,i+1:j-1)*R(i+1:j-1,j))/(R(i,i) + R(j,j));
+        end
+    end
+    T = R;
+end
+
+X = 2^(iter)*logm_pf(T-eye(n),8);
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function S = logm_pf(A,m)
+%LOGM_PF   Pade approximation to matrix log by partial fraction expansion.
+%          Y = LOGM_PF(A,m) approximates LOG(I+A).
+
+[nodes,wts] = gauss_legendre(m);
+% Convert from [-1,1] to [0,1].
+nodes = (nodes + 1)/2;
+wts = wts/2;
+
+n = length(A);
+S = zeros(n);
+
+for j=1:m
+    S = S + wts(j)*(A/(eye(n) + nodes(j)*A));
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function [x,w] = gauss_legendre(n)
+%GAUSS_LEGENDRE  Nodes and weights for Gauss-Legendre quadrature.
+
+% Reference:
+% G. H. Golub and J. H. Welsch, Calculation of Gauss quadrature
+% rules, Math. Comp., 23(106):221-230, 1969.
+
+i = 1:n-1;
+v = i./sqrt((2*i).^2-1);
+[V,D] = eig( diag(v,-1)+diag(v,1) );
+x = diag(D);
+w = 2*(V(1,:)'.^2);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/myfunm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,96 @@
+function [F,n_swaps,n_calls,terms,ind,T] = myfunm(A,fun,delta,tol,prnt,m)
+%MYFUNM  Evaluate general matrix function.
+%        F = FUNM(A,FUN), for a square matrix argument A, evaluates the
+%        function FUN at the square matrix A.
+%        FUN(X,K) must return the K'th derivative of
+%        the function represented by FUN evaluated at the vector X.
+%        The MATLAB functions COS, SIN, EXP, LOG can be passed as FUN,
+%        i.e. FUNM(A,@COS), FUNM(A,@SIN), FUNM(@EXP), FUNM(A,@LOG).
+%        For matrix square roots use SQRTM(A) instead.
+%        For matrix exponentials, either of EXPM(A) and FUNM(A,@EXPM)
+%        may be the faster or the more accurate, depending on A.
+%
+%        F = FUNM(A,FUN,DELTA,TOL,PRNT,M) specifies a tolerance
+%        DELTA used in determining the blocking (default 0.1),
+%        and a tolerance TOL used in a convergence test for evaluating the
+%        Taylor series (default EPS).
+%        If PRNT is nonzero then information describing the
+%        behaviour of the algorithm is printed.
+%        M, if supplied, defines a blocking.
+%
+%        [F,N_SWAPS,N_CALLS,TERMS,IND,T] = FUNM(A,FUN,...) also returns
+%        N_SWAPS:  the total number of swaps in the Schur re-ordering.
+%        N_CALLS:  the total number of calls to ZTREXC for the re-ordering.
+%        TERMS(I): the number of Taylor series terms used when evaluating
+%                  the I'th atomic triangular block.
+%        IND:      a cell array specifying the blocking: the (I,J) block of
+%                  the re-ordered Schur factor T is T(IND{I},IND{j}),
+%        T:        the re-ordered Schur form.
+
+if isequal(fun,@cos) || isequal(fun,'cos'), fun = @fun_cos; end
+if isequal(fun,@sin) || isequal(fun,'sin'), fun = @fun_sin; end
+if isequal(fun,@exp) || isequal(fun,'exp'), fun = @fun_exp; end
+
+if nargin < 3 || isempty(delta), delta = 0.1; end
+if nargin < 4 || isempty(tol), tol = eps; end
+if nargin < 5 || isempty(prnt), prnt = 0;  end
+if nargin < 6, m = []; end
+
+n = length(A);
+
+% First form complex Schur form (if A not already upper triangular).
+if isequal(A,triu(A))
+   T = A; U = eye(n);
+else
+   [U,T] = schur(A,'complex');
+end
+
+if isequal(T,tril(T)) % Handle special case of diagonal T.
+   F = U*diag(feval(fun,diag(T)))*U';
+   n_swaps = 0; n_calls = 0; terms = 0; ind = {1:n};
+   return
+end
+
+% Determine reordering of Schur form into block form.
+if isempty(m), m = blocking(T,delta,abs(prnt)>=3); end
+
+if prnt, fprintf('delta (blocking) = %9.2e, tol (TS) = %9.2e\n', delta, tol),
+end
+
+[M,ind,n_swaps] = swapping(m);
+n_calls = size(M,1);
+if n_calls > 0            % If there are swaps to do...
+    [U,T] = swap(U,T,M);  % MEX file
+end
+
+m = length(ind);
+
+% Calculate F(T)
+F = zeros(n);
+
+for col=1:m
+   j = ind{col};
+   [F(j,j), n_terms] = funm_atom(T(j,j),fun,tol,abs(prnt)*(prnt ~= 1));
+   terms(col) = n_terms;
+
+   for row=col-1:-1:1
+      i = ind{row};
+      if length(i) == 1 && length(j) == 1
+         % Scalar case.
+         k = i+1:j-1;
+         temp = T(i,j)*(F(i,i) - F(j,j)) + F(i,k)*T(k,j) - T(i,k)*F(k,j);
+         F(i,j) = temp/(T(i,i)-T(j,j));
+      else
+         k = cat(2,ind{row+1:col-1});
+         rhs = F(i,i)*T(i,j) - T(i,j)*F(j,j) + F(i,k)*T(k,j) - T(i,k)*F(k,j);
+         F(i,j) = sylv_tri(T(i,i),-T(j,j),rhs);
+      end
+   end
+end
+
+F = U*F*U';
+
+% As in FUNM:
+if isreal(A) && norm(imag(F),1) <= 10*n*eps*norm(F,1)
+   F = real(F);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/swap.c	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,101 @@
+/*
+ *  C mex file for MATLAB that implements LAPACK ctrexc_ for
+ *  reordering the complex Schur decomposition A = UTU^* such that
+ *  T is in block form. This program combines Cswap and Crealswap
+ *  which deal with the complex and the real case respectively.
+ *
+ *  Input matrices 'U' and 'T' are those from Schur decompositon
+ *
+ *  Called by [U,T] = swap(U,T,M)
+ *
+ *  The matrix M is produced using the m-files
+ *  >> m = blocking(T,noplot,delta); where delta is some tolerance
+ *                                   default: delta = 0.1;
+ *  >> [n,M,ind,totalswaps] = swapping5(m);
+ *
+ *  Output matrices 'U' and 'T' are the updated Schur decomposition
+ *
+ */
+
+#include "mex.h"
+#include "matrix.h"
+#include "fort.h"      /* defines mat2fort and fort2mat */
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{
+
+  /* compq=V then the matrix of Schur vectors is updated */
+
+  char *compq = "V", msg[80];
+  int n, ldt, ldq, i, info, ifst, ilst, lengthm;
+  double *t, *q, *m, *work;
+  mxArray  *mm;
+
+  /* expect 3 inputs and 2 outputs */
+  if ((nrhs != 3) || (nlhs != 2)){
+     mexErrMsgTxt("Expected 3 inputs and 2 outputs");
+  }
+
+  /* Sizes of stuff */
+
+  n = mxGetN( prhs[0] );
+  ldt = n;
+  ldq = n;
+  lengthm = mxGetM( prhs[2] );
+
+  /* Copy input and output matrices to stuff */
+
+  mm = mxDuplicateArray( prhs[2] );
+  m = mxGetPr(mm);
+
+  if (mxIsComplex( prhs[1] )) {
+
+      /* Convert complex input to complex FORTRAN format */
+
+      q = mat2fort( prhs[0], ldq, n );
+      t = mat2fort( prhs[1], ldt, n ); }
+
+  else {
+
+      plhs[0] = mxCreateDoubleMatrix(n,n,mxREAL);
+      plhs[1] = mxCreateDoubleMatrix(n,n,mxREAL);
+
+      plhs[1] = mxDuplicateArray( prhs[1] );
+      t = mxGetPr(plhs[1]);
+      plhs[0] = mxDuplicateArray( prhs[0] );
+      q = mxGetPr(plhs[0]);
+  }
+
+  /* Allocate workspace */
+
+  work = (double *)mxCalloc(n,sizeof(double));
+
+  /* Do the loop */
+
+  for ( i = 0; i < lengthm; i++) {
+    info = 0;
+    ifst = m[lengthm + i];
+    ilst = m[i];
+    if (mxIsComplex( prhs[1] )) {
+        ztrexc(compq,&n,t,&ldt,q,&ldq,&ifst,&ilst,&info); }
+    else {
+        dtrexc(compq,&n,t,&ldt,q,&ldq,&ifst,&ilst,work,&info);
+    }
+    if (info < 0){
+      sprintf(msg, "The routine DTREXC has detected an error");
+      mexErrMsgTxt(msg);
+    }
+  }
+
+  /* Convert output to MATLAB format */
+
+  if (mxIsComplex( prhs[1] )) {
+      plhs[0] = fort2mat( q, ldq, ldq, n );
+      plhs[1] = fort2mat( t, ldt, ldt, n );
+  }
+
+  /* Free up memory */
+
+  mxFree(work);
+}
+
Binary file funm_files/swap.mexglx has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/swapping.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,47 @@
+function [M,ind,n_swaps] = swapping(m)
+%SWAPPING  Confluent permutation by swapping adjacent elements.
+%          [ISWAP,IND,N_SWAPS] = SWAPPING(M) takes a vector M containing
+%          the integers 1:k (some repeated if K < LENGTH(M))
+%          and constructs a swapping scheme that produces
+%          a confluent permutation, with elements ordered by ascending
+%          average position. The confluent permutation is obtained by using
+%          the LAPACK routine ZTREX to move m(ISWAP(i,2)) to m(ISWAP(i,1))
+%          by swapping adjacent elements, for i = 1:SIZE(M,1).
+%          The cell array vector IND defines the resulting block form:
+%          IND{i} contains the indices of the i'th block in the permuted form.
+%          N_SWAPS is the total number of swaps required.
+
+mmax = max(m); M = []; ind = {}; h = zeros(1,mmax);
+g = zeros(1,mmax);
+
+for i = 1:mmax
+    p = find(m==i);
+    h(i) = length(p);
+    g(i) = sum(p)/h(i);
+end
+
+[x,y] = sort(g);
+mdone = 1;
+
+for i = y
+    if any(m(mdone:mdone+h(i)-1) ~= i)
+        f = find(m==i); g = mdone:mdone+h(i)-1;
+        ff = f(f~=g); gg = g(f~=g);
+
+      % Create vector v = mdone:f(end) with all elements of f deleted.
+        v = mdone-1 + find(m(mdone:f(end)) ~= i);
+
+      %  v = zeros(1,f(end)-g(1)+1);
+      %  v(f-g(1)+1) = 1; v = g(1)-1 + find(v==0);
+
+        M(end+1:end+length(gg),:) = [gg' ff'];
+
+        m(g(end)+1:f(end)) = m(v);
+        m(g) = i*ones(1,h(i));
+        ind = cat(2,ind,{mdone:mdone+h(i)-1}); mdone = mdone + h(i);
+    else
+        ind = cat(2,ind,{mdone:mdone+h(i)-1}); mdone = mdone + h(i);
+    end
+end
+
+n_swaps = sum(abs(diff(M')));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/funm_files/sylv_tri.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,13 @@
+function X = sylv_tri(T,U,B)
+%SYLV_TRI    Solves triangular Sylvester equation.
+%            x = SYLV_TRI(T,U,B) solves the Sylvester equation
+%            T*X + X*U = B, where T and U are square upper triangular matrices.
+
+m = length(T);
+n = length(U);
+X = zeros(m,n);
+
+% Forward substitution.
+for i = 1:n
+    X(:,i) = (T + U(i,i)*eye(m)) \ (B(:,i) - X(:,1:i-1)*U(1:i-1,i));
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/Contents.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,67 @@
+% Matrix Computation Toolbox.
+% Version 1.2             5-Sep-2002
+% Copyright (c) 2002 by N. J. Higham
+%
+% Demonstration
+%   mctdemo - Demonstration of Matrix Computation Toolbox.
+%
+% Test Matrices
+%   augment   - Augmented system matrix.
+%   gfpp      - Matrix giving maximal growth factor for Gaussian elimination
+%               with partial pivoting.
+%   makejcf   - A matrix with specified Jordan canonical form.
+%   rschur    - An upper quasi-triangular matrix.
+%   vand      - Vandermonde matrix.
+%   vecperm   - Vec-permutation matrix.
+%
+% Visualization
+%   fv        - Field of values (or numerical range).
+%   gersh     - Gershgorin disks.
+%   ps        - Dot plot of a pseudospectrum.
+%   pscont    - Contours and colour pictures of pseudospectra.
+%   see       - Pictures of a matrix.
+%
+% Factorizations and Decompositions
+%   cholp     - Cholesky factorization with pivoting of a positive semidefinite
+%               matrix.
+%   cod       - Complete orthogonal decomposition.
+%   gep       - Gaussian elimination with pivoting: none, complete, partial or
+%               rook.
+%   gj        - Gauss-Jordan elimination with partial pivoting to solve Ax = b.
+%   gqr       - Generalized QR factorization.
+%   gs_c      - Classical Gram-Schmidt QR factorization.
+%   gs_m      - Modified Gram-Schmidt QR factorization.
+%   ldlt_skew - Block LDL^T factorization for a skew-symmetric matrix.
+%   ldlt_symm - Block LDL^T factorization for a symmetric indefinite matrix.
+%   ldlt_sytr - Block LDL^T factorization for a symmetric tridiagonal
+%               matrix.
+%   matsignt  - Matrix sign function of a triangular matrix.
+%   poldec    - Polar decomposition.
+%   signm     - Matrix sign decomposition.
+%   trap2tri  - Unitary reduction of trapezoidal matrix to triangular form.
+%
+% Direct Search Optimization
+%   adsmax    - Alternating directions method.
+%   mdsmax    - Multidirectional search method.
+%   mmsmax    - Nelder-Mead simplex method.
+%
+% Miscellaneous
+%   chop      - Round matrix elements.
+%   cpltaxes  - Determine suitable AXIS for plot of complex vector.
+%   dual      - Dual vector with respect to Holder p-norm.
+%   lse       - Solve the equality constrained least squares problem.
+%   matrix    - Matrix Computation Toolbox information and matrix access by
+%               number.
+%   pnorm     - Estimate of matrix p-norm (1 <= p <= inf).
+%   rootm     - P'th root of a matrix.
+%   seqcheb   - Sequence of points related to Chebyshev polynomials.
+%   seqm      - Multiplicative sequence.
+%   show      - Display signs of matrix elements.
+%   skewpart  - Skew-symmetric (skew-Hermitian) part.
+%   sparsify  - Randomly set matrix elements to zero.
+%   strassen  - Strassen's fast matrix multiplication algorithm.
+%   strassenw - Strassen's fast matrix multiplication algorithm (Winograd
+%               variant).
+%   sub       - Principal submatrix.
+%   symmpart  - Symmetric (Hermitian) part.
+%   treshape  - Reshape vector to or from (unit) triangular matrix.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/adsmax.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,141 @@
+function [x, fmax, nf] = adsmax(f, x, stopit, savit, P, varargin)
+%ADSMAX  Alternating directions method for direct search optimization.
+%        [x, fmax, nf] = ADSMAX(FUN, x0, STOPIT, SAVIT, P) attempts to
+%        maximize the function FUN, using the starting vector x0.
+%        The alternating directions direct search method is used.
+%        Output arguments:
+%               x    = vector yielding largest function value found,
+%               fmax = function value at x,
+%               nf   = number of function evaluations.
+%        The iteration is terminated when either
+%               - the relative increase in function value between successive
+%                 iterations is <= STOPIT(1) (default 1e-3),
+%               - STOPIT(2) function evaluations have been performed
+%                 (default inf, i.e., no limit), or
+%               - a function value equals or exceeds STOPIT(3)
+%                 (default inf, i.e., no test on function values).
+%        Progress of the iteration is not shown if STOPIT(5) = 0 (default 1).
+%        If a non-empty fourth parameter string SAVIT is present, then
+%        `SAVE SAVIT x fmax nf' is executed after each inner iteration.
+%        By default, the search directions are the co-ordinate directions.
+%        The columns of a fifth parameter matrix P specify alternative search
+%        directions (P = EYE is the default).
+%        NB: x0 can be a matrix.  In the output argument, in SAVIT saves,
+%            and in function calls, x has the same shape as x0.
+%        ADSMAX(fun, x0, STOPIT, SAVIT, P, P1, P2,...) allows additional
+%        arguments to be passed to fun, via feval(fun,x,P1,P2,...).
+
+%     Reference:
+%     N. J. Higham, Optimization by direct search in matrix computations,
+%        SIAM J. Matrix Anal. Appl, 14(2): 317-333, 1993.
+%     N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%        Second edition, Society for Industrial and Applied Mathematics,
+%        Philadelphia, PA, 2002; sec. 20.5.
+
+x0 = x(:);  % Work with column vector internally.
+n = length(x0);
+
+mu = 1e-4;  % Initial percentage change in components.
+nstep = 25; % Max number of times to double or decrease h.
+
+% Set up convergence parameters.
+if nargin < 3 | isempty(stopit), stopit(1) = 1e-3; end
+tol = stopit(1); % Required rel. increase in function value over one iteration.
+if length(stopit) == 1, stopit(2) = inf; end  % Max no. of f-evaluations.
+if length(stopit) == 2, stopit(3) = inf; end  % Default target for f-values.
+if length(stopit) <  5, stopit(5) = 1; end    % Default: show progress.
+trace  = stopit(5);
+if nargin < 4, savit = []; end                   % File name for snapshots.
+
+if nargin < 5 | isempty(P)
+   P = eye(n);             % Matrix of search directions.
+else
+   if ~isequal(size(P),[n n])  % Check for common error.
+      error('P must be of dimension the number of elements in x0.')
+   end
+end
+
+fmax = feval(f,x,varargin{:}); nf = 1;
+if trace, fprintf('f(x0) = %9.4e\n', fmax), end
+
+steps = zeros(n,1);
+it = 0; y = x0;
+
+while 1    % Outer loop.
+it = it+1;
+if trace, fprintf('Iter %2.0f  (nf = %2.0f)\n', it, nf), end
+fmax_old = fmax;
+
+for i=1:n  % Loop over search directions.
+
+    pi = P(:,i);
+    flast = fmax;
+    yi = y;
+    h = sign(pi'*yi)*norm(pi.*yi)*mu;   % Initial step size.
+    if h == 0, h = max(norm(yi,inf),1)*mu; end
+    y = yi + h*pi;
+    x(:) = y; fnew = feval(f,x,varargin{:}); nf = nf + 1;
+    if fnew > fmax
+       fmax = fnew;
+       if fmax >= stopit(3)
+           if trace
+              fprintf('Comp. = %2.0f,  steps = %2.0f,  f = %9.4e*\n', i,0,fmax)
+              fprintf('Exceeded target...quitting\n')
+           end
+           x(:) = y; return
+       end
+       h = 2*h; lim = nstep; k = 1;
+    else
+       h = -h; lim = nstep+1; k = 0;
+    end
+
+    for j=1:lim
+        y = yi + h*pi;
+        x(:) = y; fnew = feval(f,x,varargin{:}); nf = nf + 1;
+        if fnew <= fmax, break, end
+        fmax = fnew; k = k + 1;
+        if fmax >= stopit(3)
+           if trace
+              fprintf('Comp. = %2.0f,  steps = %2.0f,  f = %9.4e*\n', i,j,fmax)
+              fprintf('Exceeded target...quitting\n')
+           end
+           x(:) = y; return
+        end
+        h = 2*h;
+   end
+
+   steps(i) = k;
+   y = yi + 0.5*h*pi;
+   if k == 0, y = yi; end
+
+   if trace
+      fprintf('Comp. = %2.0f,  steps = %2.0f,  f = %9.4e', i, k, fmax)
+      fprintf('  (%2.1f%%)\n', 100*(fmax-flast)/(abs(flast)+eps))
+   end
+
+
+   if nf >= stopit(2)
+      if trace
+         fprintf('Max no. of function evaluations exceeded...quitting\n')
+      end
+      x(:) = y; return
+   end
+
+   if fmax > flast & ~isempty(savit)
+      x(:) = y;
+      eval(['save ' savit ' x fmax nf'])
+   end
+
+end  % Loop over search directions.
+
+if isequal(steps,zeros(n,1))
+   if trace, fprintf('Stagnated...quitting\n'), end
+   x(:) = y; return
+end
+
+if fmax-fmax_old <= tol*abs(fmax_old)
+   if trace, fprintf('Function values ''converged''...quitting\n'), end
+   x(:) = y; return
+end
+
+end %%%%%% Of outer loop.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/augment.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,40 @@
+function C = augment(A, alpha)
+%AUGMENT  Augmented system matrix.
+%         AUGMENT(A, ALPHA) is the square matrix
+%         [ALPHA*EYE(m) A; A' ZEROS(n)] of dimension m+n, where A is m-by-n.
+%         It is the symmetric and indefinite coefficient matrix of the
+%         augmented system associated with a least squares problem
+%         minimize NORM(A*x-b).  ALPHA defaults to 1.
+%         Special case: if A is a scalar, n say, then AUGMENT(A) is the
+%                       same as AUGMENT(RANDN(p,q)) where n = p+q and
+%                       p = ROUND(n/2), that is, a random augmented matrix
+%                       of dimension n is produced.
+%         The eigenvalues of AUGMENT(A,ALPHA) are given in terms of the
+%         singular values s(i) of A (where m>n) by
+%           ALPHA/2 +/- SQRT( s(i)^2*ALPHA^2 + 1/4 ),  i=1:n  (2n eigenvalues),
+%           ALPHA,  (m-n eigenvalues).
+%         If m < n then the first expression provides 2m eigenvalues and the
+%         remaining n-m eigenvalues are zero.
+%
+%         See also SPAUGMENT.
+
+%         References:
+%         G. H. Golub and C. F. Van Loan, Matrix Computations, third
+%            Edition, Johns Hopkins University Press, Baltimore, Maryland,
+%            1996; sec. 5.6.4.
+%         N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%            Second edition, Society for Industrial and Applied Mathematics,
+%            Philadelphia, PA, 2002; sec. 20.5.
+
+[m, n] = size(A);
+if nargin < 2, alpha = 1; end
+
+if max(m,n) == 1
+   n = A;
+   p = round(n/2);
+   q = n - p;
+   A = randn(p,q);
+   m = p; n = q;
+end
+
+C = [alpha*eye(m) A; A' zeros(n)];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/cholp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,78 @@
+function [R, P, I] = cholp(A, piv)
+%CHOLP  Cholesky factorization with pivoting of a positive semidefinite matrix.
+%       [R, P] = CHOLP(A) returns an upper triangular matrix R and a
+%       permutation matrix P such that R'*R = P'*A*P.  Only the upper
+%       triangular part of A is used. If A is not positive semidefinite,
+%       an error message is printed.
+%
+%       [R, P, I] = CHOLP(A) never produces an error message.
+%       If A is positive semidefinite then I = 0 and R is the Cholesky factor.
+%       If A is not positive semidefinite then I is positive and
+%       R is (I-1)-by-N with P'*A*P - R'*R zero in columns 1:I-1 and
+%       rows 1:I-1.
+%       [R, I] = CHOLP(A, 0) forces P = EYE(SIZE(A)), and therefore behaves
+%       like [R, I] = CHOL(A).
+
+%       This routine is based on the LINPACK routine CCHDC.  It works
+%       for both real and complex matrices.
+%
+%       Reference:
+%       N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%       Second edition, Society for Industrial and Applied Mathematics,
+%       Philadelphia, PA, 2002; sec. 10.3.
+
+if nargin == 1, piv = 1; end
+
+n = length(A);
+pp = 1:n;
+I = 0;
+
+for k = 1:n
+
+    if piv
+       d = diag(A);
+       [big, m] = max( d(k:n) );
+       m = m+k-1;
+    else
+       big = A(k,k);  m = k;
+    end
+    if big < 0, I = k; break, end
+
+%   Symmetric row/column permutations.
+    if m ~= k
+       A(:, [k m]) = A(:, [m k]);
+       A([k m], :) = A([m k], :);
+       pp( [k m] ) = pp( [m k] );
+    end
+
+    if big == 0
+      if norm(A(k+1:n,k)) ~= 0
+         I = k; break
+      else
+         continue
+      end
+    end
+
+    A(k,k) = sqrt( A(k,k) );
+    if k == n, break, end
+    A(k, k+1:n) = A(k, k+1:n) / A(k,k);
+
+%   For simplicity update the whole of the remaining submatrix (rather
+%   than just the upper triangle).
+
+    j = k+1:n;
+    A(j,j) = A(j,j) - A(k,j)'*A(k,j);
+
+end
+
+R = triu(A);
+if I > 0
+    if nargout < 3, error('Matrix must be positive semidefinite.'), end
+    R = R(1:I-1,:);
+end
+
+if piv == 0
+   P = I;
+else
+   P = eye(n); P = P(:,pp);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/chop.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,18 @@
+function c = chop(x, t)
+%CHOP    Round matrix elements.
+%        CHOP(X, t) is the matrix obtained by rounding the elements of X
+%        to t significant binary places.
+%        Default is t = 24, corresponding to IEEE single precision.
+
+if nargin < 2, t = 24; end
+[m, n] = size(x);
+
+%  Use the representation:
+%  x(i,j) = 2^e(i,j) * .d(1)d(2)...d(s) * sign(x(i,j))
+
+%  On the next line `+(x==0)' avoids passing a zero argument to LOG, which
+%  would cause a warning message to be generated.
+
+y = abs(x) + (x==0);
+e = floor(log2(y) + 1);
+c = pow2(round( pow2(x, t-e) ), e-t);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/cod.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function [U, R, V] = cod(A, tol)
+%COD    Complete orthogonal decomposition.
+%       [U, R, V] = COD(A, TOL) computes a decomposition A = U*T*V,
+%       where U and V are unitary, T = [R 0; 0 0] has the same dimensions as
+%       A, and R is upper triangular and nonsingular of dimension rank(A).
+%       Rank decisions are made using TOL, which defaults to approximately
+%       LENGTH(A)*NORM(A)*EPS.
+%       By itself, COD(A, TOL) returns R.
+
+%       Reference:
+%       G. H. Golub and C. F. Van Loan, Matrix Computations, third
+%       edition, Johns Hopkins University Press, Baltimore, Maryland,
+%       1996; sec. 5.4.2.
+
+[m, n] = size(A);
+
+% QR decomposition.
+[U, R, P] = qr(A);    % AP = UR
+V = P';               % A = URV;
+if nargin == 1, tol = max(m,n)*eps*abs(R(1,1)); end  % |R(1,1)| approx NORM(A).
+
+% Determine r = effective rank.
+r = sum(abs(diag(R)) > tol);
+r = r(1);             % Fix for case where R is vector.
+R = R(1:r,:);         % Throw away negligible rows (incl. all zero rows, m>n).
+
+if r ~= n
+
+   % Reduce nxr R' =  r  [L]  to lower triangular form: QR' = [Lbar].
+   %                 n-r [M]                                  [0]
+
+   [Q, R] = trap2tri(R');
+   V = Q*V;
+   R = R';
+
+end
+
+if nargout <= 1, U = R; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/cpltaxes.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,37 @@
+function x = cpltaxes(z)
+%CPLTAXES   Determine suitable AXIS for plot of complex vector.
+%           X = CPLTAXES(Z), where Z is a complex vector,
+%           determines a 4-vector X such that AXIS(X) sets axes for a plot
+%           of Z that has axes of equal length and leaves a reasonable amount
+%           of space around the edge of the plot.
+
+%           Called by FV, GERSH, PS and PSCONT.
+
+% Set x and y axis ranges so both have the same length.
+
+xmin = min(real(z)); xmax = max(real(z));
+ymin = min(imag(z)); ymax = max(imag(z));
+
+% Fix for rare case of `trivial data'.
+if xmin == xmax, xmin = xmin - 1/2; xmax = xmax + 1/2; end
+if ymin == ymax, ymin = ymin - 1/2; ymax = ymax + 1/2; end
+
+if xmax-xmin >= ymax-ymin
+   ymid = (ymin + ymax)/2;
+   ymin =  ymid - (xmax-xmin)/2; ymax = ymid + (xmax-xmin)/2;
+else
+   xmid = (xmin + xmax)/2;
+   xmin = xmid - (ymax-ymin)/2; xmax = xmid + (ymax-ymin)/2;
+end
+axis('square')
+
+% Scale ranges by 1+2*alpha to give extra space around edges of plot.
+
+alpha = 0.1;
+x(1) = xmin - alpha*(xmax-xmin);
+x(2) = xmax + alpha*(xmax-xmin);
+x(3) = ymin - alpha*(ymax-ymin);
+x(4) = ymax + alpha*(ymax-ymin);
+
+if x(1) == x(2), x(2) = x(2) + 0.1; end
+if x(3) == x(4), x(4) = x(3) + 0.1; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/dual.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,46 @@
+function y = dual(x, p)
+%DUAL    Dual vector with respect to Holder p-norm.
+%        Y = DUAL(X, p), where 1 <= p <= inf, is a vector of unit q-norm
+%        that is dual to X with respect to the p-norm, that is,
+%        norm(Y, q) = 1 where 1/p + 1/q = 1 and there is
+%        equality in the Holder inequality: X'*Y = norm(X, p)*norm(Y, q).
+%        Special case: DUAL(X), where X >= 1 is a scalar, returns Y such
+%                      that 1/X + 1/Y = 1.
+
+%        Called by PNORM.
+
+warns = warning;
+warning('off')
+
+if nargin == 1
+   if length(x) == 1
+       y = 1/(1-1/x);
+       return
+   else
+       error('Second argument missing.')
+   end
+end
+
+q = 1/(1-1/p);
+
+if norm(x,inf) == 0, y = x; return, end
+
+if p == 1
+
+   y = sign(x) + (x == 0);   % y(i) = +1 or -1 (if x(i) real).
+
+elseif p == inf
+
+   [xmax, k] = max(abs(x));
+   f = find(abs(x)==xmax); k = f(1);
+   y = zeros(size(x));
+   y(k) = sign(x(k));        % y is a multiple of unit vector e_k.
+
+else  % 1 < p < inf.  Dual is unique in this case.
+
+  x = x/norm(x,inf);         % This scaling helps to avoid under/over-flow.
+  y = abs(x).^(p-1) .* ( sign(x) + (x==0) );
+  y = y / norm(y,q);         % Normalize to unit q-norm.
+
+end
+warning(warns)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/fv.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,101 @@
+function [f, e] = fv(B, nk, thmax, noplot)
+%FV     Field of values (or numerical range).
+%       FV(A, NK, THMAX) evaluates and plots the field of values of the
+%       NK largest leading principal submatrices of A, using THMAX
+%       equally spaced angles in the complex plane.
+%       The defaults are NK = 1 and THMAX = 16.
+%       (For a `publication quality' picture, set THMAX higher, say 32.)
+%       The eigenvalues of A are displayed as `x'.
+%       Alternative usage: [F, E] = FV(A, NK, THMAX, 1) suppresses the
+%       plot and returns the field of values plot data in F, with A's
+%       eigenvalues in E.   Note that NORM(F,INF) approximates the
+%       numerical radius,
+%                 max {abs(z): z is in the field of values of A}.
+
+%       Theory:
+%       Field of values FV(A) = set of all Rayleigh quotients. FV(A) is a
+%       convex set containing the eigenvalues of A.  When A is normal FV(A) is
+%       the convex hull of the eigenvalues of A (but not vice versa).
+%               z = x'Ax/(x'x),  z' = x'A'x/(x'x)
+%               => REAL(z) = x'Hx/(x'x),   H = (A+A')/2
+%       so      MIN(EIG(H)) <= REAL(z) <= MAX(EIG(H)),
+%       with equality for x = corresponding eigenvectors of H.  For these x,
+%       RQ(A,x) is on the boundary of FV(A).
+%
+%       Based on an original routine by A. Ruhe.
+%
+%       References:
+%       R. A. Horn and C. R. Johnson, Topics in Matrix Analysis, Cambridge
+%            University Press, 1991; sec. 1.5.
+%       A. S. Householder, The Theory of Matrices in Numerical Analysis,
+%            Blaisdell, New York, 1964; sec. 3.3.
+%       C. R. Johnson, Numerical determination of the field of values of a
+%            general complex matrix, SIAM J. Numer. Anal., 15 (1978),
+%            pp. 595-602.
+
+if nargin < 2 | isempty(nk), nk = 1; end
+if nargin < 3 | isempty(thmax), thmax = 16; end
+thmax = thmax - 1;  % Because code below uses thmax + 1 angles.
+
+iu = sqrt(-1);
+[n, p] = size(B);
+if n ~= p, error('Matrix must be square.'), end
+f = [];
+z = zeros(2*thmax+1,1);
+e = eig(B);
+
+% Filter out cases where B is Hermitian or skew-Hermitian, for efficiency.
+if isequal(B,B')
+
+   f = [min(e) max(e)];
+
+elseif isequal(B,-B')
+
+   e = imag(e);
+   f = [min(e) max(e)];
+   e = iu*e; f = iu*f;
+
+else
+
+for m = 1:nk
+
+   ns = n+1-m;
+   A = B(1:ns, 1:ns);
+
+   for i = 0:thmax
+      th = i/thmax*pi;
+      Ath = exp(iu*th)*A;               % Rotate A through angle th.
+      H = 0.5*(Ath + Ath');             % Hermitian part of rotated A.
+      [X, D] = eig(H);
+      [lmbh, k] = sort(real(diag(D)));
+      z(1+i) = rq(A,X(:,k(1)));         % RQ's of A corr. to eigenvalues of H
+      z(1+i+thmax) = rq(A,X(:,k(ns)));  % with smallest/largest real part.
+   end
+
+   f = [f; z];
+
+end
+% Next line ensures boundary is `joined up' (needed for orthogonal matrices).
+f = [f; f(1,:)];
+
+end
+if thmax == 0; f = e; end
+
+if nargin < 4
+
+   ax = cpltaxes(f);
+   plot(real(f), imag(f))      % Plot the field of values
+   axis(ax);
+   axis('square');
+
+   hold on
+   plot(real(e), imag(e), 'x')    % Plot the eigenvalues too.
+   hold off
+
+end
+
+function z = rq(A,x)
+%RQ      Rayleigh quotient.
+%        RQ(A,x) is the Rayleigh quotient of A and x, x'*A*x/(x'*x).
+
+z = x'*A*x/(x'*x);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gep.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,119 @@
+function [L, U, P, Q, rho, ncomp] = gep(A, piv)
+%GEP    Gaussian elimination with pivoting: none, complete, partial or rook.
+%       [L, U, P, Q, RHO] = GEP(A, piv) computes the factorization P*A*Q = L*U
+%       of the m-by-n matrix A, where m >= n,
+%       where L is m-by-n unit lower triangular, U is n-by-n upper triangular,
+%       and P and Q are permutation matrices.  RHO is the growth factor.
+%       PIV controls the pivoting strategy:
+%          PIV = 'c': complete pivoting,
+%          PIV = 'p': partial pivoting,
+%          PIV = 'r': rook pivoting.
+%       The default is no pivoting (PIV = '').
+%       For PIV = 'r' only, NCOMP is the total number of comparisons.
+%
+%       By itself, GEP(A) returns the final reduced matrix from the
+%       elimination containing both L and U.
+
+%       Reference:
+%       N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%       Second edition, Society for Industrial and Applied Mathematics,
+%       Philadelphia, PA, 2002; chap. 9.
+
+[m, n] = size(A);
+if m < n, error('Matrix must be m-by-n with m >= n.'), end
+if nargin < 2, piv = ''; end
+pp = 1:m; qq = 1:n;
+if nargout >= 5
+   maxA = norm(A(:), inf);
+   rho = maxA;
+end
+ncomp = 0;
+
+for k = 1:min(m-1,n)
+
+    if findstr(piv, 'cpr')
+       if strcmp(piv, 'c')
+
+          % Find largest element in remaining square submatrix.
+          % Note: when tie for max, no guarantee which element is chosen.
+          [colmaxima, rowindices] = max( abs(A(k:m, k:n)) );
+          [biggest, colindex] = max(colmaxima);
+          row = rowindices(colindex)+k-1; col = colindex+k-1;
+
+       elseif strcmp(piv, 'p')
+
+          % Find largest element in k'th column.
+          [colmaxima, rowindices] = max( abs(A(k:m, k)) );
+          row = rowindices(1)+k-1; col = k;
+
+       elseif strcmp(piv, 'r')
+
+          % Find element that is largest in its row and its column.
+          col_last = k;
+          for it = 1:inf
+            [colmaxima, rowindices] = max( abs(A(k:m, col_last)) );
+            ncomp = ncomp + m-k;
+            row = rowindices(1)+k-1;
+            new_abs = abs(A(row,col_last));
+            if it > 1
+               if new_abs == last_abs
+                  row = row_last;
+                  break
+               end
+            end
+            last_abs = new_abs;
+            row_last = row;
+            [rowmaxima, colindices] = max( abs(A(row, k:n)) );
+            ncomp = ncomp + n-k;
+            col = colindices(1)+k-1;
+            new_abs = abs(A(row,col));
+            if new_abs == last_abs
+               col = col_last;
+               break
+            end
+            last_abs = new_abs;
+            col_last = col;
+          end
+
+       end
+
+       % Permute largest element into pivot position.
+       A( [k, row], : ) = A( [row, k], : );
+       A( :, [k, col] ) = A( :, [col, k] );
+       pp( [k, row] ) = pp( [row, k] ); qq( [k, col] ) = qq( [col, k] );
+    end
+
+    if A(k,k) == 0
+      if findstr(piv, 'c')
+         break
+      elseif strcmp(piv, '') % Zero pivot is problem only for no pivoting.
+         error('Elimination breaks down with zero pivot.  Quitting...')
+      end
+    end
+
+    i = k+1:m;
+    if A(k,k) ~= 0  % Simplest way to handle zero pivot for partial and rook.
+       A(i,k) = A(i,k)/A(k,k);          % Multipliers.
+    end
+
+    if k+1 <= n
+       % Elimination
+       j = k+1:n;
+       A(i,j) = A(i,j) - A(i,k) * A(k,j);
+       if nargout >= 5, rho = max( rho, max(max(abs(A(i,j)))) ); end
+    end
+
+end
+
+if nargout <= 1
+   L = A;
+   return
+end
+
+L = tril(A,-1) + eye(m,n);
+U = triu(A);
+U = U(1:n,:);
+
+if nargout >= 3, P = eye(m); P = P(pp,:); end
+if nargout >= 4, Q = eye(n); Q = Q(:,qq); end
+if nargout >= 5, rho = rho/maxA; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gersh.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function  [G, e] = gersh(A, noplot)
+%GERSH    Gershgorin disks.
+%         GERSH(A) draws the Gershgorin disks for the square matrix A.
+%         The eigenvalues are plotted as crosses `x'.
+%         Alternative usage: [G, E] = GERSH(A, 1) suppresses the plot
+%         and returns the data in G, with A's eigenvalues in E.
+%
+%         Try GERSH(GALLERY('LESP',N)) and GERSH(GALLERY('SMOKE',N)).
+
+if diff(size(A)), error('Matrix must be square.'), end
+
+n = length(A);
+m = 40;
+G = zeros(m,n);
+
+d = diag(A);
+r = sum( abs( A-diag(d) )' )';
+e = eig(A);
+
+radvec = exp(i * linspace(0,2*pi,m)');
+
+for j=1:n
+    G(:,j) = d(j)*ones(m,1) + r(j)*radvec;
+end
+
+if nargin < 2
+
+   ax = cpltaxes(G(:));
+   for j=1:n
+       plot(real(G(:,j)), imag(G(:,j)),'-')      % Plot the disks.
+       hold on
+   end
+   plot(real(e), imag(e), 'x')    % Plot the eigenvalues too.
+   axis(ax)
+   axis('square')
+   hold off
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gfpp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,45 @@
+function A = gfpp(T, c)
+%GFPP   Matrix giving maximal growth factor for Gaussian elim. with pivoting.
+%       GFPP(T) is a matrix of order N for which Gaussian elimination
+%       with partial pivoting yields a growth factor 2^(N-1).
+%       T is an arbitrary nonsingular upper triangular matrix of order N-1.
+%       GFPP(T, C) sets all the multipliers to C  (0 <= C <= 1)
+%       and gives growth factor (1+C)^(N-1) - but note that for T ~= EYE
+%       it is advisable to set C < 1, else rounding errors may cause
+%       computed growth factors smaller than expected.
+%       GFPP(N, C) (a special case) is the same as GFPP(EYE(N-1), C) and
+%       generates the well-known example of Wilkinson.
+
+%       Reference:
+%       N. J. Higham and D. J. Higham, Large growth factors in
+%          Gaussian elimination with pivoting, SIAM J. Matrix Analysis and
+%          Appl., 10 (1989), pp. 155-164.
+%       N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%         Second edition, Society for Industrial and Applied Mathematics,
+%         Philadelphia, PA, 2002; sec. 9.4.
+
+if ~isequal(T,triu(T)) | any(~diag(T))
+   error('First argument must be a nonsingular upper triangular matrix.')
+end
+
+if nargin == 1, c = 1; end
+
+if c < 0 | c > 1
+   error('Second parameter must be a scalar between 0 and 1 inclusive.')
+end
+
+m = length(T);
+if m == 1    % Handle the special case T = scalar
+   n = T;
+   m = n-1;
+   T = eye(n-1);
+else
+   n = m+1;
+end
+
+A = zeros(n);
+L = eye(n) - c*tril(ones(n), -1);
+A(:,1:n-1) = L*[T; zeros(1,n-1)];
+theta = max(abs(A(:)));
+A(:,n) = theta * ones(n,1);
+A = A/theta;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gj.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,36 @@
+function x = gj(A, b, piv)
+%GJ        Gauss-Jordan elimination to solve Ax = b.
+%          x = GJ(A, b, PIV) solves Ax = b by Gauss-Jordan elimination,
+%          where A is a square, nonsingular matrix.
+%          PIV determines the form of pivoting:
+%              PIV = 0:           no pivoting,
+%              PIV = 1 (default): partial pivoting.
+
+%          Reference:
+%          N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%          Second edition, Society for Industrial and Applied Mathematics,
+%          Philadelphia, PA, 2002; sec. 14.4.
+
+n = length(A);
+if nargin < 3, piv = 1; end
+
+for k=1:n
+    if piv
+       % Partial pivoting (below the diagonal).
+       [colmax, i] = max( abs(A(k:n, k)) );
+       i = k+i-1;
+       if i ~= k
+          A( [k, i], : ) = A( [i, k], : );
+          b( [k, i] ) = b( [i, k] );
+       end
+    end
+
+    irange = [1:k-1 k+1:n];
+    jrange = k:n;
+    mult = A(irange,k)/A(k,k); % Multipliers.
+    A(irange, jrange) =  A(irange, jrange) - mult*A(k, jrange);
+    b(irange) =  b(irange) - mult*b(k);
+
+end
+
+x = diag(diag(A))\b;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gqr.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,55 @@
+function [U, Q, L, S] = gqr(A, B, partial)
+%GQR     Generalized QR factorization.
+%        [U, Q, L, S] = GQR(A, B, partial) factorizes
+%        the m-by-n A and p-by-n B, where m >= n >= p, as
+%        A = U*L*Q^T, B = S*Q^T, with U and Q orthogonal
+%        and L = [0; L1], S = [S1 0], with L1 and S1 lower triangular.
+%        If a nonzero third argument is present then only a partial reduction
+%        of A is performed: the first p columns of A are not reduced to
+%        triangular form (which is sufficient for solving the LSE problem).
+
+%        Reference:
+%        N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%        Second edition, Society for Industrial and Applied Mathematics,
+%        Philadelphia, PA, 2002; sec. 20.9.
+
+[m n]  = size(A);
+[p n1] = size(B);
+if nargin < 3, partial = 0; end
+
+if n ~= n1, error('A and B must have same number of columns!'), end
+
+if partial
+   limit = p+1;
+else
+   limit = 1;
+end
+
+[Q, S] = qr(B');
+S = S';
+
+U = eye(m);
+A = A*Q;
+
+% QL factorization of AQ.
+
+for i = n:-1:limit
+
+    % Vector-reversal so that Householder eliminates leading
+    % rather than trailing elements.
+    temp = A(1:m-n+i,i); temp = temp(end:-1:1);
+    [v, beta] = gallery('house',temp);
+    v = v(end:-1:1);
+
+    temp = A(1:m-n+i,1:i);
+    A(1:m-n+i,1:i) = temp - beta*v*(v'*temp);
+
+    % Put zeros where they're supposed to be!
+    A(1:m-n+i-1,i) = zeros(m-n+i-1,1);
+
+    temp = U(:,1:m-n+i);
+    U(:,1:m-n+i) = temp - beta*temp*v*v';
+
+end
+
+L = A;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gs_c.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function [Q, R] = gs_c(A)
+%GS_C    Classical Gram-Schmidt QR factorization.
+%        [Q, R] = GS_C(A) uses the classical Gram-Schmidt method to compute the
+%        factorization A = Q*R for m-by-n A of full rank,
+%        where Q is m-by-n with orthonormal columns and R is n-by-n.
+
+%        Reference:
+%        N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%        Second edition, Society for Industrial and Applied Mathematics,
+%        Philadelphia, PA, 2002; sec 19.8.
+
+[m, n] = size(A);
+Q = zeros(m,n);
+R = zeros(n);
+
+for j=1:n
+    R(1:j-1,j) = Q(:,1:j-1)'*A(:,j);
+    temp = A(:,j) - Q(:,1:j-1)*R(1:j-1,j);
+    R(j,j) = norm(temp);
+    Q(:,j) = temp/R(j,j);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/gs_m.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function [Q, R] = gs_m(A)
+%GS_M    Modified Gram-Schmidt QR factorization.
+%        [Q, R] = GS_M(A) uses the modified Gram-Schmidt method to compute the
+%        factorization A = Q*R for m-by-n A of full rank,
+%        where Q is m-by-n with orthonormal columns and R is n-by-n.
+
+%        Reference:
+%        N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%        Second edition, Society for Industrial and Applied Mathematics,
+%        Philadelphia, PA, 2002; sec 19.8.
+
+[m, n] = size(A);
+Q = zeros(m,n);
+R = zeros(n);
+
+for k=1:n
+    R(k,k) = norm(A(:,k));
+    Q(:,k) = A(:,k)/R(k,k);
+    R(k,k+1:n) = Q(:,k)'*A(:,k+1:n);
+    A(:,k+1:n) = A(:,k+1:n) - Q(:,k)*R(k,k+1:n);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/ldlt_skew.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,83 @@
+function [L, D, P, rho] = ldlt_skew(A)
+%LDLT_SKEW  Block LDL^T factorization for a skew-symmetric matrix.
+%           Given a real, skew-symmetric A,
+%           [L, D, P, RHO] = LDLT_SKEW(A) computes a permutation P,
+%           a unit lower triangular L, and a block diagonal D
+%           with 1x1 and 2x2 diagonal blocks, such that P*A*P' = L*D*L'.
+%           A partial pivoting strategy of Bunch is used.
+%           RHO is the growth factor.
+
+%           Reference:
+%           J. R. Bunch, A note on the stable decomposition of skew-symmetric
+%              matrices. Math. Comp., 38(158):475-479, 1982.
+%           N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%              Second edition, Society for Industrial and Applied Mathematics,
+%              Philadelphia, PA, 2002; chap. 11.
+
+%           This routine does not exploit skew-symmetry and is not designed to
+%           be efficient.
+
+if ~isreal(A) | ~isequal(triu(A,1)',-tril(A,-1))
+    error('Must supply real, skew-symmetric matrix.')
+end
+
+n = length(A);
+k = 1;
+D = zeros(n);
+L = eye(n);
+pp = 1:n;
+if nargout >= 4
+   maxA = norm(A(:), inf);
+   rho = maxA;
+end
+
+while k < n
+
+      if max( abs(A(k+1:n,k)) ) == 0
+
+         s = 1;
+         % Nothing to do.
+
+      else
+
+         s = 2;
+
+         if k < n-1
+            [colmaxima, rowindices] = max( abs(A(k+1:n, k:k+1)) );
+            [biggest, colindex] = max(colmaxima);
+            row = rowindices(colindex)+k; col = colindex+k-1;
+
+            % Permute largest element into (k+1,k) position.
+            % NB: k<->col permutation must be done before k+1<->row one.
+            A( [k, col], : ) = A( [col, k], : );
+            A( :, [k, col] ) = A( :, [col, k] );
+            A( [k+1, row], : ) = A( [row, k+1], : );
+            A( :, [k+1, row] ) = A( :, [row, k+1] );
+            L( [k, col], : ) = L( [col, k], : );
+            L( :, [k, col] ) = L( :, [col, k] );
+            L( [k+1, row], : ) = L( [row, k+1], : );
+            L( :, [k+1, row] ) = L( :, [row, k+1] );
+            pp( [k, col] ) = pp( [col, k] );
+            pp( [k+1, row] ) = pp( [row, k+1] );
+         end
+
+         E = A(k:k+1,k:k+1);
+         D(k:k+1,k:k+1) = E;
+         C = A(k+2:n,k:k+1);
+         temp = C/E;
+         L(k+2:n,k:k+1) = temp;
+         A(k+2:n,k+2:n) = A(k+2:n,k+2:n) + temp*C';  % Note the plus sign.
+         % Restore skew-symmetry.
+         A(k+2:n,k+2:n) = 0.5 * (A(k+2:n,k+2:n) - A(k+2:n,k+2:n)');
+
+         if nargout >= 4, rho = max(rho, max(max(abs(A(k+2:n,k+2:n)))) ); end
+
+     end
+
+     k = k + s;
+     if k >= n-2, D(k:n,k:n) = A(k:n,k:n); break, end;
+
+end
+
+if nargout >= 3, P = eye(n); P = P(pp,:); end
+if nargout >= 4, rho = rho/maxA; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/ldlt_symm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,161 @@
+function [L, D, P, rho, ncomp] = ldlt_symm(A, piv)
+%LDLT_SYMM  Block LDL^T factorization for a symmetric indefinite matrix.
+%           Given a Hermitian matrix A,
+%           [L, D, P, RHO, NCOMP] = LDLT_SYMM(A, PIV) computes a permutation P,
+%           a unit lower triangular L, and a real block diagonal D
+%           with 1x1 and 2x2 diagonal blocks, such that  P*A*P' = L*D*L'.
+%           PIV controls the pivoting strategy:
+%             PIV = 'p': partial pivoting (Bunch and Kaufman),
+%             PIV = 'r': rook pivoting (Ashcraft, Grimes and Lewis).
+%           The default is partial pivoting.
+%           RHO is the growth factor.
+%           For PIV = 'r' only, NCOMP is the total number of comparisons.
+
+%           References:
+%           J. R. Bunch and L. Kaufman, Some stable methods for calculating
+%              inertia and solving symmetric linear systems, Math. Comp.,
+%              31(137):163-179, 1977.
+%           C. Ashcraft, R. G. Grimes and J. G. Lewis, Accurate symmetric
+%              indefinite linear equation solvers. SIAM J. Matrix Anal. Appl.,
+%              20(2):513-561, 1998.
+%           N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%              Second edition, Society for Industrial and Applied Mathematics,
+%              Philadelphia, PA, 2002; chap. 11.
+
+%           This routine does not exploit symmetry and is not designed to be
+%           efficient.
+
+if ~isequal(triu(A,1)',tril(A,-1)), error('Must supply Hermitian matrix.'), end
+if nargin < 2, piv = 'p'; end
+
+n = length(A);
+k = 1;
+D = eye(n); L = eye(n);  if n == 1, D = A; end
+pp = 1:n;
+if nargout >= 4
+   maxA = norm(A(:), inf);
+   rho = maxA;
+end
+ncomp = 0;
+
+alpha = (1 + sqrt(17))/8;
+
+while k < n
+      [lambda, r] = max( abs(A(k+1:n,k)) );
+      r = r(1) + k;
+
+      if lambda > 0
+          swap = 0;
+          if abs(A(k,k)) >= alpha*lambda
+             s = 1;
+          else
+             if piv == 'p'
+                temp = A(k:n,r); temp(r-k+1) = 0;
+                sigma = norm(temp, inf);
+                if alpha*lambda^2 <= abs(A(k,k))*sigma
+                   s = 1;
+                elseif abs(A(r,r)) >= alpha*sigma
+                   swap = 1;
+                   m1 = k; m2 = r;
+                   s = 1;
+                else
+                   swap = 1;
+                   m1 = k+1; m2 = r;
+                   s = 2;
+                end
+                if swap
+                   A( [m1, m2], : ) = A( [m2, m1], : );
+                   L( [m1, m2], : ) = L( [m2, m1], : );
+                   A( :, [m1, m2] ) = A( :, [m2, m1] );
+                   L( :, [m1, m2] ) = L( :, [m2, m1] );
+                   pp( [m1, m2] ) = pp( [m2, m1] );
+                end
+             elseif piv == 'r'
+                j = k;
+                pivot = 0;
+                lambda_j = lambda;
+                while ~pivot
+                      [temp, r] = max( abs(A(k:n,j)) );
+                      ncomp = ncomp + n-k;
+                      r = r(1) + k - 1;
+                      temp = A(k:n,r); temp(r-k+1) = 0;
+                      lambda_r = max( abs(temp) );
+                      ncomp = ncomp + n-k;
+                      if alpha*lambda_r <= abs(A(r,r))
+                         pivot = 1;
+                         s = 1;
+                         A( [k, r], : ) = A( [r, k], : );
+                         L( [k, r], : ) = L( [r, k], : );
+                         A( :, [k, r] ) = A( :, [r, k] );
+                         L( :, [k, r] ) = L( :, [r, k] );
+                         pp( [k, r] ) = pp( [r, k] );
+                      elseif lambda_j == lambda_r
+                         pivot = 1;
+                         s = 2;
+                         A( [k, j], : ) = A( [j, k], : );
+                         L( [k, j], : ) = L( [j, k], : );
+                         A( :, [k, j] ) = A( :, [j, k] );
+                         L( :, [k, j] ) = L( :, [j, k] );
+                         pp( [k, j] ) = pp( [j, k] );
+                         k1 = k+1;
+                         A( [k1, r], : ) = A( [r, k1], : );
+                         L( [k1, r], : ) = L( [r, k1], : );
+                         A( :, [k1, r] ) = A( :, [r, k1] );
+                         L( :, [k1, r] ) = L( :, [r, k1] );
+                         pp( [k1, r] ) = pp( [r, k1] );
+                      else
+                         j = r;
+                         lambda_j = lambda_r;
+                      end
+                end
+             end
+          end
+
+          if s == 1
+
+             D(k,k) = A(k,k);
+             A(k+1:n,k) = A(k+1:n,k)/A(k,k);
+             L(k+1:n,k) = A(k+1:n,k);
+             i = k+1:n;
+             A(i,i) = A(i,i) - A(i,k) * A(k,i);
+             A(i,i) = 0.5 * (A(i,i) + A(i,i)');
+
+          elseif s == 2
+
+             E = A(k:k+1,k:k+1);
+             D(k:k+1,k:k+1) = E;
+             C = A(k+2:n,k:k+1);
+             temp = C/E;
+             L(k+2:n,k:k+1) = temp;
+             A(k+2:n,k+2:n) = A(k+2:n,k+2:n) - temp*C';
+             A(k+2:n,k+2:n) = 0.5 * (A(k+2:n,k+2:n) + A(k+2:n,k+2:n)');
+
+         end
+
+         % Ensure diagonal real (see LINPACK User's Guide, p. 5.17).
+         for i=k+s:n
+             A(i,i) = real(A(i,i));
+         end
+
+         if nargout >= 4 & k+s <= n
+            rho = max(rho, max(max(abs(A(k+s:n,k+s:n)))) );
+         end
+
+      else  % Nothing to do.
+
+         s = 1;
+         D(k,k) = A(k,k);
+
+      end
+
+      k = k + s;
+
+      if k == n
+         D(n,n) = A(n,n);
+         break
+      end
+
+end
+
+if nargout >= 3, P = eye(n); P = P(pp,:); end
+if nargout >= 4, rho = rho/maxA; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/ldlt_sytr.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,52 @@
+function [L, D] = ldlt_sytr(A)
+%LDLT_SYTR  Block LDL^T factorization for a symmetric tridiagonal matrix.
+%           [L, D] = LDLT_SYTR(A) factorizes A = L*D*L', where A is
+%           Hermitian tridiagonal, L is unit lower triangular, and D is
+%           block diagonal with 1x1 and 2x2 diagonal blocks.  It uses
+%           Bunch's strategy for choosing the pivots.
+
+%           References:
+%           J. R. Bunch, Partial pivoting strategies for symmetric
+%              matrices.  SIAM J. Numer. Anal., 11(3):521-528, 1974.
+%           N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%              Second edition, Society for Industrial and Applied Mathematics,
+%              Philadelphia, PA, 2002; sec. 11.1.4.
+
+n = length(A);
+if norm( tril(A,-2), 1) | norm( triu(A,2), 1) | ~isequal(A,A')
+   error('Matrix must be Hermitian tridiagonal.')
+end
+
+s = norm(A(:), inf);
+a = (sqrt(5)-1)/2;
+L = eye(n);
+D = zeros(n);
+k = 1;
+
+while k < n
+
+      if s*abs(A(k,k)) >= a*abs(A(k+1,k))^2
+
+         % 1-by-1 pivot.
+         D(k,k) = A(k,k);
+         L(k+1,k) = A(k+1,k)/A(k,k);
+         A(k+1,k+1) = A(k+1,k+1) - abs(A(k+1,k))^2/A(k,k);
+         k = k+1;
+
+      else
+
+         % 2-by-2 pivot.
+         E = A(k:k+1,k:k+1);
+         D(k:k+1,k:k+1) = E;
+         if k+2 <= n
+            L(k+2:n,k:k+1) = A(k+2:n,k:k+1)/E;
+            A(k+2,k+2) = A(k+2,k+2) - abs(A(k+2,k+1))^2*A(k,k)/det(E);
+         end
+         k = k+2;
+
+      end
+
+end
+if k == n
+   D(k,k) = A(k,k);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/lse.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,61 @@
+function x = lse(A, b, B, d, method)
+%LSE     Solve the equality constrained least squares problem.
+%        [x, flopcount] = LSE(A, b, B, d, METHOD) finds the least squares
+%        solution of the system Ax = b subject to the constraint Bx = d.
+%        METHOD = 1 (default), 2 or 3 specifies the variant of the null-space
+%        method to be used.  METHOD 3 is the most efficient.
+
+%        Note: Should really apply Householder transformations in factored form.
+%        Not done here.
+%
+%        Reference:
+%        A. J. Cox and N. J. Higham. Accuracy and stability of the null space
+%           method for solving the equality constrained least squares problem.
+%           BIT, 39(1):34-50, 1999.
+%        N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%           Second edition, Society for Industrial and Applied Mathematics,
+%           Philadelphia, PA, 2002; sec. 20.9.
+
+b = b(:); d = d(:);
+[m n] = size(A);
+m2 = length(b);
+[p n2] = size(B);
+p2 = length(d);
+
+if m ~= m2 | p ~= p2 | n ~= n2
+ error('Dimensions do not match!')
+end
+
+if nargin < 5, method = 1; end
+
+partial = (method == 2 | method == 3);
+[U, Q, L, S] = gqr(A, B, partial);
+y1 = S(:,1:p)\d;
+
+L21 = L(m-n+p+1:m,1:p);
+L22 = L(m-n+p+1:m,p+1:n);
+
+if method == 1
+
+   c = U'*b;
+   c3 = c(m-n+p+1:m);
+   y2 = L22 \ (c3 - L21*y1);
+
+elseif method == 2
+
+   W1 = A*Q(:,1:p);
+   g = U'*(b - W1*y1);
+   g2 = g(m-n+p+1:m);
+   y2 = L22\g2;
+
+elseif method == 3
+
+   W1y1 = A*(Q(:,1:p)*y1);
+   g = U'*(b - W1y1);
+   g2 = g(m-n+p+1:m);
+   y2 = L22\g2;
+
+end
+
+y = [y1; y2];
+x = Q*y;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/makejcf.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,39 @@
+function A = makejcf(n, e, m, X)
+%MAKEJCF   A matrix with specified Jordan canonical form.
+%          MAKEJCF(N, E, M) is a matrix having the Jordan canonical form
+%          whose i'th Jordan block is of dimension M(i) with eigenvalue E(i),
+%          and where N = SUM(M).
+%          Defaults: E = 1:N, M = ONES(SIZE(E)) with M(1) so that SUM(M) = N.
+%          The matrix is constructed by applying a random similarity
+%          transformation to the Jordan form.
+%          Alternatively, the matrix used in the similarity transformation
+%          can be specified as a fifth parameter.
+%          In particular, MAKEJCF(N, E, M, EYE(N)) returns the Jordan form
+%          itself.
+%          NB: The JCF is very sensitive to rounding errors.
+
+if nargin < 2, e = 1:n; end
+if nargin < 3, m = ones(size(e)); m(1) = m(1) + n - sum(m); end
+
+if length(e) ~= length(m)
+   error('Parameters E and M must be of same dimension.')
+end
+
+if sum(m) ~= n, error('Block dimensions must add up to N.'), end
+
+A = zeros(n);
+j = 1;
+for i=1:max(size(m))
+    if m(i) > 1
+        Jb = gallery('jordbloc',m(i),e(i));
+    else
+        Jb = e(i);  % JORDBLOC fails in n = 1 case.
+    end
+    A(j:j+m(i)-1,j:j+m(i)-1) = Jb;
+    j = j + m(i);
+end
+
+if nargin < 4
+   X = randn(n);
+end
+A = X\A*X;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/matrix.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,85 @@
+function A = matrix(k, n)
+%MATRIX  Test matrices accessed by number.
+%        MATRIX(K, N) is the N-by-N instance of matrix number K in
+%        a set of test matrices comprising those in MATLAB plus those
+%        in the Matrix Computation Toolbox,
+%        with all other parameters set to their default.
+%        N.B. - Only those matrices which are full and take an arbitrary
+%               dimension N are included.
+%             - Some of these matrices are random.
+%        MATRIX(K) is a string containing the name of the K'th matrix.
+%        MATRIX(0) is the number of matrices, i.e. the upper limit for K.
+%        Thus to set A to each N-by-N test matrix in turn use a loop like
+%             for k=1:matrix(0)
+%                 A = matrix(k, N);
+%                 Aname = matrix(k);   % The name of the matrix
+%             end
+%        MATRIX(-1) returns the version number and date of the
+%        Matrix Computation Toolbox.
+%        MATRIX with no arguments lists the names and numbers of the M-files in the
+%        collection.
+
+%         References:
+%         N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%            Second edition, Society for Industrial and Applied Mathematics,
+%            Philadelphia, PA, 2002; sec. 20.5.
+
+% Matrices from gallery:
+matrices = [%
+'cauchy  '; 'chebspec'; 'chebvand'; 'chow    ';
+'circul  '; 'clement '; 'condex  ';
+'cycol   '; 'dramadah'; 'fiedler ';
+'forsythe'; 'frank   '; 'gearmat '; 'grcar   ';
+'invhess '; 'invol   '; 'ipjfact '; 'jordbloc';
+'kahan   '; 'kms     '; 'krylov  '; 'lehmer  ';
+'lesp    '; 'lotkin  '; 'minij   '; 'moler   ';
+'orthog  '; 'parter  '; 'pei     '; 'prolate ';
+'randcolu'; 'randcorr'; 'rando   '; 'randsvd ';
+'redheff '; 'riemann '; 'ris     '; 'smoke   ';
+'toeppd  '; 'triw    ';];
+n_gall = length(matrices);
+
+% Other MATLAB matrices:
+matrices = [matrices;
+'hilb    '; 'invhilb '; 'magic   '; 'pascal  ';
+'rand    '; 'randn   ';];
+n_MATLAB = length(matrices);
+
+% Matrices from Matrix Computation Toolbox:
+matrices = [matrices;
+'augment '; 'gfpp    '; 'magic   '; 'makejcf ';
+'rschur  '; 'vand    '];
+n_mats = length(matrices);
+
+if nargin == 0
+
+   rows = ceil(n_mats/5);
+   temp = zeros(rows,5);
+   temp(1:n_mats) = 1:n_mats;
+
+   for i = 1:rows
+      for j = 1:5
+        if temp(i,j) == 0, continue, end
+        fprintf(['%2.0f: ' sprintf('%s',matrices(temp(i,j),:)) '  '], ...
+                temp(i,j))
+      end
+      fprintf('\n')
+   end
+   fprintf('Matrices 1 to %1.0f are from MATLAB\.', n_MATLAB)
+
+elseif nargin == 1
+   if k == 0
+      A = length(matrices);
+   elseif k > 0
+      A = deblank(matrices(k,:));
+   else
+      % Version number and date of collection.
+      A = 'Version 1.2, September 5 2002';
+   end
+else
+   if k <= n_gall
+      A = eval( ['gallery(''' deblank(matrices(k,:)) ''',n)'] );
+   else
+      A = eval( [deblank(matrices(k,:)) '(n)'] );
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/matsignt.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function S = matsignt(T)
+%MATSIGNT    Matrix sign function of a triangular matrix.
+%            S = MATSIGN(T) computes the matrix sign function S of the
+%            upper triangular matrix T using a recurrence.
+
+%            Called by SIGNM.
+
+if ~isequal(T,triu(T)), error('Matrix must be upper triangular.'), end
+
+n = length(T);
+
+S = diag( sign( diag(real(T)) ) );
+for p = 1:n-1
+   for i = 1:n-p
+
+      j = i+p;
+      d = T(j,j) - T(i,i);
+
+      if S(i,i) ~= -S(j,j)  % Solve via S^2 = I if we can.
+
+         % Get S(i,j) from S^2 = I.
+         k = i+1:j-1;
+         S(i,j) = -S(i,k)*S(k,j) / (S(i,i)+S(j,j));
+
+      else
+
+         % Get S(i,j) from S*T = T*S.
+         s = T(i,j)*(S(j,j)-S(i,i));
+         if p > 1
+            k = i+1:j-1;
+            s = s + T(i,k)*S(k,j) - S(i,k)*T(k,j);
+         end
+         S(i,j) = s/d;
+
+      end
+
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/mctdemo.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,233 @@
+clc
+format compact
+echo on
+%MCTDEMO       Demonstration of Matrix Computation Toolbox.
+%              N. J. Higham.
+
+% The Matrix Computation Toolbox contains test matrices, matrix
+% factorizations, visualization functions, direct search optimization
+% functions, and other miscellaneous functions.
+
+% The version of the toolbox is
+
+matrix(-1)
+echo on
+
+% For this demonstration you will need to view both the command window
+% and one figure window.
+% This demonstration emphasises graphics and shows only
+% some of the features of the toolbox.
+
+pause  % Press any key to continue after pauses.
+
+% A list of test matrices available in MATLAB and in the Toolbox (all full,
+% square, and of arbitrary dimension) is obtained by typing `matrix':
+
+matrix
+
+pause
+
+% The FV command plots the boundary of the field of values of a matrix
+% (the set of all Rayleigh quotients) and plots the eigenvalues as
+% crosses (`x').  Here are some examples:
+
+% Here is the field of values of the 10-by-10 Grcar matrix:
+
+clf
+fv(gallery('grcar',10));
+title('fv(gallery(''grcar'',10))')
+
+pause
+
+% Next, we form a random orthogonal matrix and look at its field of values.
+% The boundary is the convex hull of the eigenvalues since A is normal.
+
+A = gallery('randsvd',10, 1);
+fv(A);
+title('fv(gallery(''randsvd'',10, 1))')
+pause
+
+% The PS command plots an approximation to a pseudospectrum of A,
+% which is the set of complex numbers that are eigenvalues of some
+% perturbed matrix A + E, with the norm of E at most epsilon
+% (default: epsilon = 1E-3).
+% The eigenvalues of A are plotted as crosses (`x').
+% Here are some interesting PS plots.
+
+% First, we use the Kahan matrix, a triangular matrix made up of sines and
+% cosines.  Here is an approximate pseudospectrum of the 10-by-10 matrix:
+
+ps(gallery('kahan',10),25);
+title('ps(gallery(''kahan'',10),25)')
+pause
+
+% Next, a different way of looking at pseudospectra, via norms of
+% the resolvent.  (The resolvent of A is INV(z*I-A), where z is a complex
+% variable).  PSCONT gives a color map with a superimposed contour
+% plot.  Here we specify a region of the complex plane in
+% which the 8-by-8 Kahan matrix is interesting to look at.
+
+pscont(gallery('kahan',8), 0, 20, [0.2 1.2 -0.5 0.5]);
+title('pscont(gallery(''kahan'',8))')
+pause
+
+% The triw matrix is upper triangular, made up of 1s and -1s:
+
+gallery('triw',4)
+
+% Here is a combined surface and contour plot of the resolvent for N = 11.
+% Notice how the repeated eigenvalue 1 `sucks in' the resolvent.
+
+pscont(gallery('triw',11), 2, 15, [-2 2 -2 2]);
+title('pscont(gallery(''triw'',11))')
+pause
+
+% The next PSCONT plot is for the companion matrix of the characteristic
+% polynomial of the CHEBSPEC matrix:
+
+A = gallery('chebspec',8); C = compan(poly(A));
+
+% The SHOW command shows the +/- pattern of the elements of a matrix, with
+% blanks for zero elements:
+
+show(C)
+
+pscont(C, 2, 20, [-.1 .1 -.1 .1]);
+title('pscont(gallery(''chebspec'',8))')
+pause
+
+% The following matrix has a pseudospectrum in the form of a limacon.
+
+n = 25; A = gallery('triw',n,1,2) - eye(n);
+sub(A, 6)               % Leading principal 6-by-6 submatrix of A.
+ps(A);
+pause
+
+% We can get a visual representation of a matrix using the SEE
+% command, which produces subplots with the following layout:
+%     /---------------------------------\
+%     | MESH(A)        SEMILOGY(SVD(A)) |
+%     | PS(A)               FV(A)       |
+%     \---------------------------------/
+% where PS is the 1e-3 pseudospectrum and FV is the field of values.
+% RSCHUR is an upper quasi-triangular matrix:
+
+see(rschur(16,18));
+
+pause
+
+% Matlab's MAGIC function produces magic squares:
+
+A = magic(5)
+
+% Using the toolbox routine PNORM we can estimate the matrix p-norm
+% for any value of p.
+
+[pnorm(A,1) pnorm(A,1.5) pnorm(A,2) pnorm(A,pi) pnorm(A,inf)]
+
+% As this example suggests, the p-norm of a magic square is
+% constant for all p!
+
+pause
+
+% GERSH plots Gershgorin disks.  Here are some interesting examples.
+clf
+gersh(gallery('lesp',12));
+title('gersh(gallery(''lesp'',12))')
+pause
+
+gersh(gallery('hanowa',10));
+title('gersh(gallery(''hanowa'',10))')
+pause
+
+gersh(gallery('ipjfact',6,1));
+title('gersh(gallery(''ipjfact'',6,1))')
+pause
+
+gersh(gallery('smoke',16,1));
+title('gersh(gallery(''smoke'',16,1))')
+pause
+
+% GFPP generates matrices for which Gaussian elimination with partial
+% pivoting produces a large growth factor.
+
+gfpp(6)
+pause
+
+% Let's find the growth factor RHO for partial pivoting and complete pivoting
+% for a bigger matrix:
+
+A = gfpp(20);
+
+[L, U, P, Q, rho] = gep(A,'p'); % Partial pivoting using Toolbox function GEP.
+[rho, 2^19]
+
+[L, U, P, Q, rho] = gep(A,'c'); % Complete pivoting using Toolbox function GEP.
+rho
+% As expected, complete pivoting does not produce large growth here.
+pause
+
+% Function MATRIX allows test matrices in the Toolbox and MATLAB to be
+% accessed by number.  The following piece of code steps through all the
+% non-Hermitian matrices of arbitrary dimension, setting A to each
+% 10-by-10 matrix in turn.  It evaluates the 2-norm condition number and the
+% ratio of the largest to smallest eigenvalue (in absolute values).
+
+% c = []; e = []; j = 1;
+% for i=1:matrix(0)
+%     % Double on next line avoids bug in MATLAB 6.5 re. i = 35.
+%     A = double(matrix(i, 10));
+%     if ~isequal(A,A')  % If not Hermitian...
+%        c1 = cond(A);
+%        eg = eig(A);
+%        e1 = max(abs(eg)) / min(abs(eg));
+%        % Filter out extremely ill-conditioned matrices.
+%        if c1 <= 1e10, c(j) = c1; e(j) = e1; j = j + 1; end
+%     end
+% end
+echo off
+
+c = []; e = []; j = 1;
+for i=1:matrix(0)
+    % Double on next line avoids bug in MATLAB 6.5 re. i = 35.
+    A = double(matrix(i, 10));
+    if ~isequal(A,A')  % If not Hermitian...
+       c1 = cond(A);
+       eg = eig(A);
+       e1 = max(abs(eg)) / min(abs(eg));
+       % Filter out extremely ill-conditioned matrices.
+       if c1 <= 1e10, c(j) = c1; e(j) = e1; j = j + 1; end
+    end
+end
+echo on
+
+% The following plots confirm that the condition number can be much
+% larger than the extremal eigenvalue ratio.
+echo off
+j = max(size(c));
+subplot(2,1,1)
+semilogy(1:j, c, 'x', 1:j, e, 'o'), hold on
+semilogy(1:j, c, '-', 1:j, e, '--'), hold off
+title('cond: x, eig\_ratio: o')
+subplot(2,1,2)
+semilogy(1:j, c./e)
+title('cond/eig\_ratio')
+echo on
+pause
+
+% Finally, here are three interesting pseudospectra based on pentadiagonal
+% Toeplitz matrices:
+
+clf
+ps(full(gallery('toeppen',32,0,1/2,0,0,1)));            % Propeller
+pause
+
+ps(inv(full(gallery('toeppen',32,0,1,1,0,.25))));       % Man in the moon
+pause
+
+ps(full(gallery('toeppen',32,0,1/2,1,1,1)));            % Fish
+pause
+
+echo off
+clear A L U P Q V D
+format
Binary file matrixcomp/mctoolbox.pdf has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/mdsmax.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,170 @@
+function [x, fmax, nf] = mdsmax(fun, x, stopit, savit, varargin)
+%MDSMAX  Multidirectional search method for direct search optimization.
+%        [x, fmax, nf] = MDSMAX(FUN, x0, STOPIT, SAVIT) attempts to
+%        maximize the function FUN, using the starting vector x0.
+%        The method of multidirectional search is used.
+%        Output arguments:
+%               x    = vector yielding largest function value found,
+%               fmax = function value at x,
+%               nf   = number of function evaluations.
+%        The iteration is terminated when either
+%               - the relative size of the simplex is <= STOPIT(1)
+%                 (default 1e-3),
+%               - STOPIT(2) function evaluations have been performed
+%                 (default inf, i.e., no limit), or
+%               - a function value equals or exceeds STOPIT(3)
+%                 (default inf, i.e., no test on function values).
+%        The form of the initial simplex is determined by STOPIT(4):
+%          STOPIT(4) = 0: regular simplex (sides of equal length, the default),
+%          STOPIT(4) = 1: right-angled simplex.
+%        Progress of the iteration is not shown if STOPIT(5) = 0 (default 1).
+%        If a non-empty fourth parameter string SAVIT is present, then
+%        `SAVE SAVIT x fmax nf' is executed after each inner iteration.
+%        NB: x0 can be a matrix.  In the output argument, in SAVIT saves,
+%            and in function calls, x has the same shape as x0.
+%        MDSMAX(fun, x0, STOPIT, SAVIT, P1, P2,...) allows additional
+%        arguments to be passed to fun, via feval(fun,x,P1,P2,...).
+
+% This implementation uses 2n^2 elements of storage (two simplices), where x0
+% is an n-vector.  It is based on the algorithm statement in [2, sec.3],
+% modified so as to halve the storage (with a slight loss in readability).
+
+% References:
+% [1] V. J. Torczon, Multi-directional search: A direct search algorithm for
+%     parallel machines, Ph.D. Thesis, Rice University, Houston, Texas, 1989.
+% [2] V. J. Torczon, On the convergence of the multidirectional search
+%     algorithm, SIAM J. Optimization, 1 (1991), pp. 123-145.
+% [3] N. J. Higham, Optimization by direct search in matrix computations,
+%     SIAM J. Matrix Anal. Appl, 14(2): 317-333, 1993.
+% [4] N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%        Second edition, Society for Industrial and Applied Mathematics,
+%        Philadelphia, PA, 2002; sec. 20.5.
+
+x0 = x(:);  % Work with column vector internally.
+n = length(x0);
+
+mu = 2;      % Expansion factor.
+theta = 0.5; % Contraction factor.
+
+% Set up convergence parameters etc.
+if nargin < 3 | isempty(stopit), stopit(1) = 1e-3; end
+tol = stopit(1);  % Tolerance for cgce test based on relative size of simplex.
+if length(stopit) == 1, stopit(2) = inf; end  % Max no. of f-evaluations.
+if length(stopit) == 2, stopit(3) = inf; end  % Default target for f-values.
+if length(stopit) == 3, stopit(4) = 0; end    % Default initial simplex.
+if length(stopit) == 4, stopit(5) = 1; end    % Default: show progress.
+trace  = stopit(5);
+if nargin < 4, savit = []; end                   % File name for snapshots.
+
+V = [zeros(n,1) eye(n)]; T = V;
+f = zeros(n+1,1); ft = f;
+V(:,1) = x0; f(1) = feval(fun,x,varargin{:});
+fmax_old = f(1);
+
+if trace, fprintf('f(x0) = %9.4e\n', f(1)), end
+
+k = 0; m = 0;
+
+% Set up initial simplex.
+scale = max(norm(x0,inf),1);
+if stopit(4) == 0
+   % Regular simplex - all edges have same length.
+   % Generated from construction given in reference [18, pp. 80-81] of [1].
+   alpha = scale / (n*sqrt(2)) * [ sqrt(n+1)-1+n  sqrt(n+1)-1 ];
+   V(:,2:n+1) = (x0 + alpha(2)*ones(n,1)) * ones(1,n);
+   for j=2:n+1
+       V(j-1,j) = x0(j-1) + alpha(1);
+       x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+   end
+else
+   % Right-angled simplex based on co-ordinate axes.
+   alpha = scale*ones(n+1,1);
+   for j=2:n+1
+       V(:,j) = x0 + alpha(j)*V(:,j);
+       x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+   end
+end
+nf = n+1;
+size = 0;         % Integer that keeps track of expansions/contractions.
+flag_break = 0;   % Flag which becomes true when ready to quit outer loop.
+
+while 1    %%%%%% Outer loop.
+k = k+1;
+
+% Find a new best vertex  x  and function value  fmax = f(x).
+[fmax,j] = max(f);
+V(:,[1 j]) = V(:,[j 1]); v1 = V(:,1);
+if ~isempty(savit), x(:) = v1; eval(['save ' savit ' x fmax nf']), end
+f([1 j]) = f([j 1]);
+if trace
+   fprintf('Iter. %2.0f,  inner = %2.0f,  size = %2.0f,  ', k, m, size)
+   fprintf('nf = %3.0f,  f = %9.4e  (%2.1f%%)\n', nf, fmax, ...
+           100*(fmax-fmax_old)/(abs(fmax_old)+eps))
+end
+fmax_old = fmax;
+
+% Stopping Test 1 - f reached target value?
+if fmax >= stopit(3)
+   msg = ['Exceeded target...quitting\n'];
+   break  % Quit.
+end
+
+m = 0;
+while 1   %%% Inner repeat loop.
+    m = m+1;
+
+    % Stopping Test 2 - too many f-evals?
+    if nf >= stopit(2)
+       msg = ['Max no. of function evaluations exceeded...quitting\n'];
+       flag_break = 1; break  % Quit.
+    end
+
+    % Stopping Test 3 - converged?   This is test (4.3) in [1].
+    size_simplex = norm(V(:,2:n+1)- v1(:,ones(1,n)),1) / max(1, norm(v1,1));
+    if size_simplex <= tol
+       msg = sprintf('Simplex size %9.4e <= %9.4e...quitting\n', ...
+                      size_simplex, tol);
+       flag_break = 1; break  % Quit.
+    end
+
+    for j=2:n+1      % ---Rotation (reflection) step.
+        T(:,j) = 2*v1 - V(:,j);
+        x(:) = T(:,j); ft(j) = feval(fun,x,varargin{:});
+    end
+    nf = nf + n;
+
+    replaced = ( max(ft(2:n+1)) > fmax );
+
+    if replaced
+       for j=2:n+1   % ---Expansion step.
+           V(:,j) = (1-mu)*v1 + mu*T(:,j);
+           x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+       end
+       nf = nf + n;
+       % Accept expansion or rotation?
+       if max(ft(2:n+1)) > max(f(2:n+1))
+          V(:,2:n+1) = T(:,2:n+1);  f(2:n+1) = ft(2:n+1);  % Accept rotation.
+       else
+          size = size + 1;  % Accept expansion (f and V already set).
+       end
+    else
+       for j=2:n+1   % ---Contraction step.
+           V(:,j) = (1+theta)*v1 - theta*T(:,j);
+           x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+       end
+       nf = nf + n;
+       replaced = ( max(f(2:n+1)) > fmax );
+       % Accept contraction (f and V already set).
+       size = size - 1;
+    end
+
+    if replaced, break, end
+    if trace & rem(m,10) == 0, fprintf('        ...inner = %2.0f...\n',m), end
+    end %%% Of inner repeat loop.
+
+if flag_break, break, end
+end %%%%%% Of outer loop.
+
+% Finished.
+if trace, fprintf(msg), end
+x(:) = v1;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/nmsmax.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,173 @@
+function [x, fmax, nf] = nmsmax(fun, x, stopit, savit, varargin)
+%NMSMAX  Nelder-Mead simplex method for direct search optimization.
+%        [x, fmax, nf] = NMSMAX(FUN, x0, STOPIT, SAVIT) attempts to
+%        maximize the function FUN, using the starting vector x0.
+%        The Nelder-Mead direct search method is used.
+%        Output arguments:
+%               x    = vector yielding largest function value found,
+%               fmax = function value at x,
+%               nf   = number of function evaluations.
+%        The iteration is terminated when either
+%               - the relative size of the simplex is <= STOPIT(1)
+%                 (default 1e-3),
+%               - STOPIT(2) function evaluations have been performed
+%                 (default inf, i.e., no limit), or
+%               - a function value equals or exceeds STOPIT(3)
+%                 (default inf, i.e., no test on function values).
+%        The form of the initial simplex is determined by STOPIT(4):
+%           STOPIT(4) = 0: regular simplex (sides of equal length, the default)
+%           STOPIT(4) = 1: right-angled simplex.
+%        Progress of the iteration is not shown if STOPIT(5) = 0 (default 1).
+%        If a non-empty fourth parameter string SAVIT is present, then
+%        `SAVE SAVIT x fmax nf' is executed after each inner iteration.
+%        NB: x0 can be a matrix.  In the output argument, in SAVIT saves,
+%            and in function calls, x has the same shape as x0.
+%        NMSMAX(fun, x0, STOPIT, SAVIT, P1, P2,...) allows additional
+%        arguments to be passed to fun, via feval(fun,x,P1,P2,...).
+
+% References:
+% N. J. Higham, Optimization by direct search in matrix computations,
+%    SIAM J. Matrix Anal. Appl, 14(2): 317-333, 1993.
+% C. T. Kelley, Iterative Methods for Optimization, Society for Industrial
+%    and Applied Mathematics, Philadelphia, PA, 1999.
+
+x0 = x(:);  % Work with column vector internally.
+n = length(x0);
+
+% Set up convergence parameters etc.
+if nargin < 3 | isempty(stopit), stopit(1) = 1e-3; end
+tol = stopit(1);  % Tolerance for cgce test based on relative size of simplex.
+if length(stopit) == 1, stopit(2) = inf; end  % Max no. of f-evaluations.
+if length(stopit) == 2, stopit(3) = inf; end  % Default target for f-values.
+if length(stopit) == 3, stopit(4) = 0; end    % Default initial simplex.
+if length(stopit) == 4, stopit(5) = 1; end    % Default: show progress.
+trace  = stopit(5);
+if nargin < 4, savit = []; end                   % File name for snapshots.
+
+V = [zeros(n,1) eye(n)];
+f = zeros(n+1,1);
+V(:,1) = x0; f(1) = feval(fun,x,varargin{:});
+fmax_old = f(1);
+
+if trace, fprintf('f(x0) = %9.4e\n', f(1)), end
+
+k = 0; m = 0;
+
+% Set up initial simplex.
+scale = max(norm(x0,inf),1);
+if stopit(4) == 0
+   % Regular simplex - all edges have same length.
+   % Generated from construction given in reference [18, pp. 80-81] of [1].
+   alpha = scale / (n*sqrt(2)) * [ sqrt(n+1)-1+n  sqrt(n+1)-1 ];
+   V(:,2:n+1) = (x0 + alpha(2)*ones(n,1)) * ones(1,n);
+   for j=2:n+1
+       V(j-1,j) = x0(j-1) + alpha(1);
+       x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+   end
+else
+   % Right-angled simplex based on co-ordinate axes.
+   alpha = scale*ones(n+1,1);
+   for j=2:n+1
+       V(:,j) = x0 + alpha(j)*V(:,j);
+       x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+   end
+end
+nf = n+1;
+how = 'initial  ';
+
+[temp,j] = sort(f);
+j = j(n+1:-1:1);
+f = f(j); V = V(:,j);
+
+alpha = 1;  beta = 1/2;  gamma = 2;
+
+while 1    %%%%%% Outer (and only) loop.
+k = k+1;
+
+    fmax = f(1);
+    if fmax > fmax_old
+       if ~isempty(savit)
+          x(:) = V(:,1); eval(['save ' savit ' x fmax nf'])
+       end
+       if trace
+          fprintf('Iter. %2.0f,', k)
+          fprintf(['  how = ' how '  ']);
+          fprintf('nf = %3.0f,  f = %9.4e  (%2.1f%%)\n', nf, fmax, ...
+                  100*(fmax-fmax_old)/(abs(fmax_old)+eps))
+       end
+    end
+    fmax_old = fmax;
+
+    %%% Three stopping tests from MDSMAX.M
+
+    % Stopping Test 1 - f reached target value?
+    if fmax >= stopit(3)
+       msg = ['Exceeded target...quitting\n'];
+       break  % Quit.
+    end
+
+    % Stopping Test 2 - too many f-evals?
+    if nf >= stopit(2)
+       msg = ['Max no. of function evaluations exceeded...quitting\n'];
+       break  % Quit.
+    end
+
+    % Stopping Test 3 - converged?   This is test (4.3) in [1].
+    v1 = V(:,1);
+    size_simplex = norm(V(:,2:n+1)-v1(:,ones(1,n)),1) / max(1, norm(v1,1));
+    if size_simplex <= tol
+       msg = sprintf('Simplex size %9.4e <= %9.4e...quitting\n', ...
+                      size_simplex, tol);
+       break  % Quit.
+    end
+
+    %  One step of the Nelder-Mead simplex algorithm
+    %  NJH: Altered function calls and changed CNT to NF.
+    %       Changed each `fr < f(1)' type test to `>' for maximization
+    %       and re-ordered function values after sort.
+
+    vbar = (sum(V(:,1:n)')/n)';  % Mean value
+    vr = (1 + alpha)*vbar - alpha*V(:,n+1); x(:) = vr; fr = feval(fun,x,varargin{:});
+    nf = nf + 1;
+    vk = vr;  fk = fr; how = 'reflect, ';
+    if fr > f(n)
+            if fr > f(1)
+               ve = gamma*vr + (1-gamma)*vbar; x(:) = ve; fe = feval(fun,x,varargin{:});
+               nf = nf + 1;
+               if fe > f(1)
+                  vk = ve; fk = fe;
+                  how = 'expand,  ';
+               end
+            end
+    else
+            vt = V(:,n+1); ft = f(n+1);
+            if fr > ft
+               vt = vr;  ft = fr;
+            end
+            vc = beta*vt + (1-beta)*vbar; x(:) = vc; fc = feval(fun,x,varargin{:});
+            nf = nf + 1;
+            if fc > f(n)
+               vk = vc; fk = fc;
+               how = 'contract,';
+            else
+               for j = 2:n
+                   V(:,j) = (V(:,1) + V(:,j))/2;
+                   x(:) = V(:,j); f(j) = feval(fun,x,varargin{:});
+               end
+               nf = nf + n-1;
+               vk = (V(:,1) + V(:,n+1))/2; x(:) = vk; fk = feval(fun,x,varargin{:});
+               nf = nf + 1;
+               how = 'shrink,  ';
+            end
+    end
+    V(:,n+1) = vk;
+    f(n+1) = fk;
+    [temp,j] = sort(f);
+    j = j(n+1:-1:1);
+    f = f(j); V = V(:,j);
+
+end   %%%%%% End of outer (and only) loop.
+
+% Finished.
+if trace, fprintf(msg), end
+x(:) = V(:,1);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/pnorm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,105 @@
+function [est, x, k] = pnorm(A, p, tol, prnt)
+%PNORM   Estimate of matrix p-norm (1 <= p <= inf).
+%        [EST, x, k] = PNORM(A, p, TOL) estimates the Holder p-norm of a
+%        matrix A, using the p-norm power method with a specially
+%        chosen starting vector.
+%        TOL is a relative convergence tolerance (default 1E-4).
+%        Returned are the norm estimate EST (which is a lower bound for the
+%        exact p-norm), the corresponding approximate maximizing vector x,
+%        and the number of power method iterations k.
+%        A nonzero fourth input argument causes trace output to the screen.
+%        If A is a vector, this routine simply returns NORM(A, p).
+%
+%        See also NORM, NORMEST, NORMEST1.
+
+%        Note: The estimate is exact for p = 1, but is not always exact for
+%        p = 2 or p = inf.  Code could be added to treat p = 2 and p = inf
+%        separately.
+%
+%        Calls DUAL.
+%
+%        Reference:
+%        N. J. Higham, Estimating the matrix p-norm, Numer. Math.,
+%             62 (1992), pp. 539-555.
+%        N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%           Second edition, Society for Industrial and Applied Mathematics,
+%           Philadelphia, PA, 2002; sec. 15.2.
+
+if nargin < 2, error('Must specify norm via second parameter.'), end
+[m,n] = size(A);
+if min(m,n) == 1, est = norm(A,p); return, end
+
+if nargin < 4, prnt = 0; end
+if nargin < 3 | isempty(tol), tol = 1e-4; end
+
+% Stage I.  Use Algorithm OSE to get starting vector x for power method.
+% Form y = B*x, at each stage choosing x(k) = c and scaling previous
+% x(k+1:n) by s, where norm([c s],p)=1.
+
+sm = 9;  % Number of samples.
+y = zeros(m,1); x = zeros(n,1);
+
+for k=1:n
+
+    if k == 1
+       c = 1; s = 0;
+    else
+       W = [A(:,k) y];
+
+       if p == 2   % Special case.  Solve exactly for 2-norm.
+          [U,S,V] = svd(full(W));
+          c = V(1,1); s = V(2,1);
+
+       else
+
+          fopt = 0;
+          for th=linspace(0,pi,sm)
+              c1 = cos(th); s1 = sin(th);
+              nrm = norm([c1 s1],p);
+              c1 = c1/nrm; s1 = s1/nrm;   % [c1 s1] has unit p-norm.
+              f = norm( W*[c1 s1]', p );
+              if f > fopt
+                 fopt = f;
+                 c = c1; s = s1;
+              end
+          end
+
+       end
+    end
+
+    x(k) = c;
+    y = x(k)*A(:,k) + s*y;
+    if k > 1, x(1:k-1) = s*x(1:k-1); end
+
+end
+
+est = norm(y,p);
+if prnt, fprintf('Alg OSE: %9.4e\n', est), end
+
+% Stage II.  Apply Algorithm PM (the power method).
+
+q = dual(p);
+k = 1;
+
+while 1
+
+    y = A*x;
+    est_old = est;
+    est = norm(y,p);
+
+    z = A' * dual(y,p);
+
+    if prnt
+        fprintf('%2.0f: norm(y) = %9.4e,  norm(z) = %9.4e', ...
+                 k, norm(y,p), norm(z,q))
+        fprintf('  rel_incr(est) = %9.4e\n', (est-est_old)/est)
+    end
+
+    if ( norm(z,q) <= z'*x | abs(est-est_old)/est <= tol ) & k > 1
+       return
+    end
+
+    x = dual(z,q);
+    k = k + 1;
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/poldec.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,28 @@
+function [U, H] = poldec(A)
+%POLDEC   Polar decomposition.
+%         [U, H] = POLDEC(A) computes a matrix U of the same dimension
+%         (m-by-n) as A, and a Hermitian positive semi-definite matrix H,
+%         such that A = U*H.
+%         U has orthonormal columns if m >= n, and orthonormal rows if m <= n.
+%         U and H are computed via an SVD of A.
+%         U is a nearest unitary matrix to A in both the 2-norm and the
+%         Frobenius norm.
+
+%         Reference:
+%         N. J. Higham, Computing the polar decomposition---with applications,
+%         SIAM J. Sci. Stat. Comput., 7(4):1160--1174, 1986.
+%
+%         (The name `polar' is reserved for a graphics routine.)
+
+[m, n] = size(A);
+
+[P, S, Q] = svd(A, 0);  % Economy size.
+if m < n                % Ditto for the m<n case.
+   S = S(:, 1:m);
+   Q = Q(:, 1:m);
+end
+U = P*Q';
+if nargout == 2
+   H = Q*S*Q';
+   H = (H + H')/2;      % Force Hermitian by taking nearest Hermitian matrix.
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/ps.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,90 @@
+function y = ps(A, m, tol, rl, marksize)
+%PS     Dot plot of a pseudospectrum.
+%       PS(A, M, TOL, RL) plots an approximation to a pseudospectrum
+%       of the square matrix A, using M random perturbations of size TOL.
+%       M defaults to a SIZE(A)-dependent value and TOL to 1E-3.
+%       RL defines the type of perturbation:
+%         RL =  0 (default): absolute complex perturbations of 2-norm TOL.
+%         RL =  1:           absolute real perturbations of 2-norm TOL.
+%         RL = -1:           componentwise real perturbations of size TOL.
+%       The eigenvalues of A are plotted as crosses `x'.
+%       PS(A, M, TOL, RL, MARKSIZE) uses the specified marker size instead
+%       of a size that depends on the figure size, the matrix order, and M.
+%       If MARKSIZE < 0, the plot is suppressed and the plot data is returned
+%       as an output argument.
+%       PS(A, 0) plots just the eigenvalues of A.
+
+%       For a given TOL, the pseudospectrum of A is the set of
+%       pseudo-eigenvalues of A, that is, the set
+%       { e : e is an eigenvalue of A+E, for some E with NORM(E) <= TOL }.
+%
+%       References:
+%       L. N. Trefethen, Computation of pseudospectra, Acta Numerica,
+%          8:247-295, 1999.
+%       L. N. Trefethen, Spectra and pseudospectra, in The Graduate
+%          Student's Guide to Numerical Analysis '98, M. Ainsworth,
+%          J. Levesley, and M. Marletta, eds., Springer-Verlag, Berlin,
+%          1999, pp. 217-250.
+
+if diff(size(A)), error('Matrix must be square.'), end
+n = length(A);
+
+if nargin < 5, marksize = 0; end
+if nargin < 4, rl = 0; end
+if nargin < 3, tol = 1e-3; end
+if nargin < 2 | isempty(m), m = 5*max(1, round( 25*exp(-0.047*n) )); end
+
+if m == 0
+   e = eig(A);
+   ax = cpltaxes(e);
+   plot(real(e), imag(e), 'x')
+   axis(ax), axis('square')
+   return
+end
+
+x = zeros(m*n,1);
+i = sqrt(-1);
+
+for j = 1:m
+   if rl == -1     % Componentwise.
+      dA = -ones(n) + 2*rand(n);   % Uniform random numbers on [-1,1].
+      dA = tol * A .* dA;
+   else
+      if rl == 0   % Complex absolute.
+         dA = randn(n) + i*randn(n);
+      else         % Real absolute.
+         dA = randn(n);
+      end
+      dA = tol/norm(dA)*dA;
+   end
+   x((j-1)*n+1:j*n) = eig(A + dA);
+end
+
+if marksize >= 0
+
+   ax = cpltaxes(x);
+   h = plot(real(x),imag(x),'.');
+   axis(ax), axis('square')
+
+   % Next block adapted from SPY.M.
+   if marksize == 0
+      units = get(gca,'units');
+      set(gca,'units','points');
+      pos = get(gca,'position');
+      nps = 2.4*sqrt(n*m);  % Factor based on number of pseudo-ei'vals plotted.
+      myguess = round(3*min(pos(3:4))/nps);
+      marksize = max(1,myguess);
+      set(gca,'units',units);
+   end
+
+   hold on
+   e = eig(A);
+   plot(real(e),imag(e),'x');
+   set(h,'markersize',marksize);
+   hold off
+
+else
+
+  y = x;
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/pscont.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,104 @@
+function [x, y, z, m] = pscont(A, k, npts, ax, levels)
+%PSCONT   Contours and colour pictures of pseudospectra.
+%         PSCONT(A, K, NPTS, AX, LEVELS) plots LOG10(1/NORM(R(z))),
+%         where R(z) = INV(z*I-A) is the resolvent of the square matrix A,
+%         over an NPTS-by-NPTS grid.
+%         NPTS defaults to a SIZE(A)-dependent value.
+%         The limits are AX(1) and AX(2) on the x-axis and
+%                        AX(3) and AX(4) on the y-axis.
+%         If AX is omitted, suitable limits are guessed based on the
+%         eigenvalues of A.
+%         The eigenvalues of A are plotted as crosses `x'.
+%         K determines the type of plot:
+%             K = 0 (default) PCOLOR and CONTOUR
+%             K = 1           PCOLOR only
+%             K = 2           SURFC (SURF and CONTOUR)
+%             K = 3           SURF only
+%             K = 4           CONTOUR only
+%         The contours levels are specified by the vector LEVELS, which
+%         defaults to -10:-1 (recall we are plotting log10 of the data).
+%         Thus, by default, the contour lines trace out the boundaries of
+%         the epsilon pseudospectra for epsilon = 1e-10, ..., 1e-1.
+%         [X, Y, Z, NPTS] = PSCONT(A, ...) returns the plot data X, Y, Z
+%         and the value of NPTS used.
+%
+%         After calling this function you may want to change the
+%         color map (e.g., type COLORMAP HOT - see HELP COLOR) and the
+%         shading (e.g., type SHADING INTERP - see HELP INTERP).
+%         For an explanation of the term `pseudospectra', and references,
+%         see PS.M.
+%         When A is real and the grid is symmetric about the x-axis, this
+%         routine exploits symmetry to halve the computational work.
+
+%         Colour pseduospectral pictures of this type are referred to as
+%         `spectral portraits' by Godunov, Kostin, and colleagues.
+%         References: see PS.
+
+if diff(size(A)), error('Matrix must be square.'), end
+n = length(A);
+Areal = ~any(imag(A));
+
+if nargin < 5, levels = -10:-1; end
+e = eig(A);
+if nargin < 4 | isempty(ax)
+   ax = cpltaxes(e);
+   if Areal, ax(3) = -ax(4); end  % Make sure region symmetric about x-axis.
+end
+if nargin < 3 | isempty(npts)
+   npts = 3*round( min(max(5, sqrt(20^2*10^3/n^3) ), 30));
+end
+if nargin < 2 | isempty(k), k = 0; end
+
+nptsx = npts; nptsy = npts;
+Ysymmetry = (Areal & ax(3) == -ax(4));
+
+x = linspace(ax(1), ax(2), npts);
+y = linspace(ax(3), ax(4), npts);
+if Ysymmetry                    % Exploit symmetry about x-axis.
+   nptsy = ceil(npts/2);
+   y1 = y;
+   y = y(1:nptsy);
+end
+
+[xx, yy] = meshgrid(x,y);
+z = xx + sqrt(-1)*yy;
+I = eye(n);
+Smin = zeros(nptsy, nptsx);
+
+for j=1:nptsx
+    for i=1:nptsy
+        Smin(i,j) = min( svd( z(i,j)*I-A ) );
+    end
+end
+
+z = log10( Smin + eps );
+if Ysymmetry
+   z = [z; z(nptsy-rem(npts,2):-1:1,:)];
+   y = y1;
+end
+
+if k == 0 | k == 1
+   pcolor(x, y, z); hold on
+elseif k == 2
+   surfc(x, y, z); hold on
+elseif k == 3
+   surf(x, y, z); hold on
+end
+
+if k == 0
+   contour(x, y, z, levels,'-k'); hold on
+elseif k == 4
+   contour(x, y, z, levels); hold on
+end
+
+if k ~= 2 & k ~= 3
+   if k == 0 | k == 1
+      s = 'w';   % White.
+   else
+      s = 'k';   % Black.
+   end
+   plot(real(e),imag(e),['x' s]);
+end
+
+axis('square')
+hold off
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/readme.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,16 @@
+echo on
+% Welcome to the Matrix Computation Toolbox.
+% The primary source for this toolbox is
+%
+%     http://www.ma.man.ac.uk/~higham/mctoolbox
+%
+% The toolbox comprises the M-files in this directory and the accompanying
+% document mctoolbox.pdf:
+%
+%     The Matrix Computation Toolbox for MATLAB (version 1.0).
+%     Numerical Analysis Report No. 410, Manchester Centre for
+%     Computational Mathematics, Manchester, England, August 2002.
+%
+% For a descriptive list of M-files in the toolbox type
+%     help contents
+echo off
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/rootm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,93 @@
+function [X, arg2] = rootm(A,p)
+%ROOTM   Pth root of a matrix.
+%        X = ROOTM(A,P) computes a Pth root X of a square matrix A.
+%        This function computes the Schur decomposition A = Q*T*Q' and then
+%        finds a Pth root U of T by a recursive  formula, giving X = Q*U*Q'.
+%
+%        X is the unique pth root for which every eigenvalue has nonnegative
+%        real part.  If A has any eigenvalues with negative real parts then a
+%        complex result is produced.  If A is singular then A may not have a
+%        pth root.  A warning is printed if exact singularity is detected.
+%
+%        With two output arguments, [X, RESNORM] = ROOTM(A) does not print any
+%        warning, and returns the residual, norm(A-X^2,'fro')/norm(A,'fro').
+
+%        Reference:
+%        M. I. Smith,  A Schur Algorithm for Computing Matrix pth Roots,
+%        Numerical Analysis Report No. 392, Manchester Centre for
+%        Computational Mathematics, Manchester, UK, 2001; to appear in
+%        SIAM J. Matrix Anal. Appl.
+
+%        Original function by Matthew Smith.
+
+n = length(A);
+
+[Q,T] = schur(A,'complex');
+
+U = zeros(n);
+R = zeros(n,(p-2)*n);
+
+for i = 1:n
+    U(i,i) = T(i,i)^(1/p);
+        for a = 1:p-2
+            R(i,(a-1)*n+i) = U(i,i)^(a+1);
+        end
+end
+
+warns = warning;
+warning('off');
+
+for c = 1:n-1
+    for i = 1:n-c
+        sum1 = 0;
+        for d = 1:p-2
+	    sum2 = 0;
+	    for k = i+1:i+c-1
+                sum2 = sum2 + U(i,k)*R(k,(d-1)*n + i+c);
+            end
+            sum1 = sum1 + U(i,i)^(p-2-d)*sum2;
+        end 	
+	sum3 = 0;
+        for k = i+1:i+c-1
+            sum3 = sum3 + U(i,k)*U(k,i+c);
+        end
+        sum1 = sum1 + U(i,i)^(p-2)*sum3;
+        sum4 = 0;
+        for j = 0:p-1
+            sum4 = sum4 + U(i,i)^j*U(i+c,i+c)^(p-1-j);
+        end	
+
+    U(i,i+c) = (T(i,i+c) - sum1)/(sum4);
+
+   for q = 1:p-2
+       sum5 = 0;
+       for g = 0:q
+           sum5 = sum5 + U(i,i )^g*U(i+c,i+c)^(q-g);
+       end
+       sum6 = 0;
+       for h = 1:q-1
+           sum7 = 0;
+	   for w=i+1:i+c-1
+	       sum7 = sum7 + U(i,w)*R(w,(h-1)*n +i+c);
+           end
+           sum6 = sum6 + U(i,i)^(q-1-h)*sum7;
+       end
+       sum = sum6 + U(i,i)^(q-1)*sum3;
+
+       R(i,(q-1)*n +i+c) = U(i,i+c)*sum5 + sum;
+   end
+  end
+end
+
+X = Q*U*Q';
+warning(warns);
+
+nzeig = any(diag(T)==0);
+
+if nzeig & (nargout ~= 2)
+    warning('Matrix is singular and may not have a square root.')
+end
+
+if nargout == 2
+    arg2 = norm(X^p-A,'fro')/norm(A,'fro');
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/rschur.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,33 @@
+function A = rschur(n, mu, x, y)
+%RSCHUR   An upper quasi-triangular matrix.
+%         A = RSCHUR(N, MU, X, Y) is an N-by-N matrix in real Schur form.
+%         All the diagonal blocks are 2-by-2 (except for the last one, if N
+%         is odd) and the k'th has the form [x(k) y(k); -y(k) x(k)].
+%         Thus the eigenvalues of A are x(k) +/- i*y(k).
+%         MU (default 1) controls the departure from normality.
+%         Defaults: X(k) = -k^2/10, Y(k) = -k, i.e., the eigenvalues
+%                   lie on the parabola x = -y^2/10.
+
+%         References:
+%         F. Chatelin, Eigenvalues of Matrices, John Wiley, Chichester, 1993;
+%            Section 4.2.7.
+%         F. Chatelin and V. Fraysse, Qualitative computing: Elements
+%            of a theory for finite precision computation, Lecture notes,
+%            CERFACS, Toulouse, France and THOMSON-CSF, Orsay, France,
+%            June 1993.
+
+m = floor(n/2)+1;
+alpha = 10; beta = 1;
+
+if nargin < 4, y = -(1:m)/beta; end
+if nargin < 3, x = -(1:m).^2/alpha; end
+if nargin < 2, mu = 1; end
+
+A = diag( mu*ones(n-1,1), 1 );
+for i=1:2:2*(m-1)
+    j = (i+1)/2;
+    A(i:i+1,i:i+1) = [x(j) y(j); -y(j) x(j)];
+end
+if 2*m ~= n,
+   A(n,n) = x(m);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/see.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,55 @@
+function see(A, k)
+%SEE    Pictures of a matrix.
+%       SEE(A) displays MESH(A), SEMILOGY(SVD(A),'o'),
+%       and (if A is square) PS(A) and FV(A) in four subplot windows.
+%       SEE(A, 1) plots MESH(PINV(A)) in the
+%       third window instead of the 1e-3-pseudospectrum.
+%       SEE(A, -1) plots only the eigenvalues in the third/fourth window,
+%       which is much quicker than PS or FV.
+%       If A is complex, only real parts are used for the mesh plots.
+%       If A is sparse, just SPY(A) is shown.
+
+if nargin < 2, k = 0; end
+[m, n] = size(A);
+square = (m == n);
+clf
+
+if issparse(A)
+
+   spy(A);
+
+else
+
+   B = pinv(A);
+   s = svd(A);
+   zs = (s == zeros(size(s)));
+   if any( zs )
+      s( zs ) = [];  % Remove zero singular values for semilogy plot.
+   end
+
+   subplot(2,2,1)
+   mesh(real(A)), axis('ij'),  drawnow
+   subplot(2,2,2)
+   semilogy(s, 'o')
+   hold on, semilogy(s, '-'), hold off, drawnow
+   if any(zs), title('Zero(s) omitted'), end
+
+   subplot(2,2,3)
+   if ~square
+      axis off
+      text(0,0,'Matrix not square.')
+      return
+   end
+
+   if k == -1
+      ps(A, 0);
+   elseif k == 1
+      mesh(real(B)), axis('ij'), drawnow
+   else
+      ps(A);
+   end
+
+   subplot(2,2,4)
+   fv(A);
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/seqcheb.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function x = seqcheb(n, k)
+%SEQCHEB   Sequence of points related to Chebyshev polynomials.
+%          X = SEQCHEB(N, K) produces a row vector of length N.
+%          There are two choices:
+%              K = 1:  zeros of T_N,         (the default)
+%              K = 2:  extrema of T_{N-1},
+%          where T_k is the Chebsyhev polynomial of degree k.
+
+if nargin == 1, k = 1; end
+
+if k == 1                     %  Zeros of T_n
+   i = 1:n; j = .5*ones(1,n);
+   x = cos( (i-j) * (pi/n) );
+elseif k == 2                 %  Extrema of T_(n-1)
+   i = 0:n-1;
+   x = cos( i * (pi/(n-1)) );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/seqm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function y = seqm(a, b, n)
+%SEQM   Multiplicative sequence.
+%       Y = SEQM(A, B, N) produces a row vector comprising N
+%       logarithmically equally spaced numbers, starting at A ~= 0
+%       and finishing at B ~= 0.
+%       If A*B < 0 and N > 2 then complex results are produced.
+%       If N is omitted then 10 points are generated.
+
+if nargin == 2, n = 10; end
+
+if n <= 1
+   y = a;
+   return
+end
+p = [0:n-2]/(n-1);
+r = (b/a).^p;
+y = [a*r, b];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/show.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,9 @@
+function show(x)
+%SHOW   Display signs of matrix elements.
+%       SHOW(X) displays X in `FORMAT +' form, that is,
+%       with `+', `-' and  blank representing positive, negative
+%       and zero elements respectively.
+
+format +
+disp(x)
+format
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/signm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,20 @@
+function [S, N] = signm(A)
+%SIGNM   Matrix sign decomposition.
+%        [S, N] = SIGNM(A) is the matrix sign decomposition A = S*N,
+%        computed via the Schur decomposition.
+%        S is the matrix sign function, sign(A).
+
+%        Reference:
+%        N. J. Higham, The matrix sign decomposition and its relation to the
+%        polar decomposition, Linear Algebra and Appl., 212/213:3-20, 1994.
+
+[Q, T] = schur(A,'complex');
+S = Q * matsignt(T) * Q';
+
+% Only problem with Schur method is possible nonzero imaginary part when
+% A is real.  Next line takes care of that.
+if ~any(imag(A)), S = real(S); end
+
+if nargout == 2
+   N = S*A;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/skewpart.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,8 @@
+function S = skewpart(A)
+%SKEWPART  Skew-symmetric (skew-Hermitian) part.
+%          SKEWPART(A) is the skew-symmetric (skew-Hermitian) part of A,
+%          (A - A')/2.
+%          It is the nearest skew-symmetric (skew-Hermitian) matrix to A in
+%          both the 2- and the Frobenius norms.
+
+S = (A - A')./2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/sparsify.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,20 @@
+function A = sparsify(A, p)
+%SPARSIFY  Randomly set matrix elements to zero.
+%          S = SPARSIFY(A, P) is A with elements randomly set to zero
+%          (S = S' if A is square and A = A', i.e. symmetry is preserved).
+%          Each element has probability P of being zeroed.
+%          Thus on average 100*P percent of the elements of A will be zeroed.
+%          Default: P = 0.25.
+
+if nargin < 2, p = 0.25; end
+if p<0 | p>1, error('Second parameter must be between 0 and 1 inclusive.'), end
+
+[m,n] = size(A);
+
+if ~isequal(A,A')
+   A = A .* (rand(m,n) > p);        % Unsymmetric case
+else
+   A = triu(A,1) .* (rand(m,n) > p);  % Preserve symmetry
+   A = A + A';
+   A = A + diag( diag(A) .* (rand(size(n,1)) > p) );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/strassen.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,33 @@
+function C = strassen(A, B, nmin)
+%STRASSEN  Strassen's fast matrix multiplication algorithm.
+%          C = STRASSEN(A, B, NMIN), where A and B are matrices of dimension
+%          a power of 2, computes the product C = A*B.
+%          Strassen's algorithm is used recursively until dimension <= NMIN
+%          is reached, at which point standard multiplication is used.
+%          The default is NMIN = 8 (which minimizes the total number of
+%          operations).
+
+%          Reference:
+%          V. Strassen, Gaussian elimination is not optimal,
+%          Numer. Math., 13 (1969), pp. 354-356.
+
+if nargin < 3, nmin = 8; end
+
+n = length(A);
+if n ~= 2^( log2(n) )
+   error('The matrix dimension must be a power of 2.')
+end
+
+if n <= nmin
+   C = A*B;
+else
+   m = n/2; i = 1:m; j = m+1:n;
+   P1 = strassen( A(i,i)+A(j,j), B(i,i)+B(j,j), nmin);
+   P2 = strassen( A(j,i)+A(j,j), B(i,i), nmin);
+   P3 = strassen( A(i,i), B(i,j)-B(j,j), nmin);
+   P4 = strassen( A(j,j), B(j,i)-B(i,i), nmin);
+   P5 = strassen( A(i,i)+A(i,j), B(j,j), nmin);
+   P6 = strassen( A(j,i)-A(i,i), B(i,i)+B(i,j), nmin);
+   P7 = strassen( A(i,j)-A(j,j), B(j,i)+B(j,j), nmin);
+   C = [ P1+P4-P5+P7  P3+P5;  P2+P4  P1+P3-P2+P6 ];
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/strassenw.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,50 @@
+function C = strassenw(A, B, nmin)
+%STRASSENW Strassen's fast matrix multiplication algorithm (Winograd variant).
+%          C = STRASSENW(A, B, NMIN), where A and B are matrices of dimension
+%          a power of 2, computes the product C = A*B.
+%          Winograd's variant of Strassen's algorithm is
+%          used recursively until dimension <= NMIN is reached,
+%          at which point standard multiplication is used.
+%          The default is NMIN = 8 (which minimizes the total number of
+%          operations).
+
+%          Reference:
+%          N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%          Second edition, Society for Industrial and Applied Mathematics,
+%          Philadelphia, PA, 2002; chap. 23.
+
+if nargin < 3, nmin = 8; end
+
+n = length(A);
+if n ~= 2^( log2(n) )
+   error('The matrix dimension must be a power of 2.')
+end
+
+if n <= nmin
+   C = A*B;
+else
+   m = n/2; i = 1:m; j = m+1:n;
+
+   S1 = A(j,i) + A(j,j);
+   S2 = S1 - A(i,i);
+   S3 = A(i,i) - A(j,i);
+   S4 = A(i,j) - S2;
+   S5 = B(i,j) - B(i,i);
+   S6 = B(j,j) - S5;
+   S7 = B(j,j) - B(i,j);
+   S8 = S6 - B(j,i);
+
+   M1 = strassenw( S2, S6, nmin);
+   M2 = strassenw( A(i,i), B(i,i), nmin);
+   M3 = strassenw( A(i,j), B(j,i), nmin);
+   M4 = strassenw( S3, S7, nmin);
+   M5 = strassenw( S1, S5, nmin);
+   M6 = strassenw( S4, B(j,j), nmin);
+   M7 = strassenw( A(j,j), S8, nmin);
+
+   T1 = M1 + M2;
+   T2 = T1 + M4;
+
+   C = [ M2+M3 T1+M5+M6; T2-M7  T2+M5 ];
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/sub.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function S = sub(A, i, j)
+%SUB     Principal submatrix.
+%        SUB(A,i,j) is A(i:j,i:j).
+%        SUB(A,i)  is the leading principal submatrix of order i,
+%        A(1:i,1:i), if i>0, and the trailing principal submatrix
+%        of order ABS(i) if i<0.
+
+if nargin == 2
+   if i >= 0
+      S = A(1:i, 1:i);
+   else
+      n = min(size(A));
+      S = A(n+i+1:n, n+i+1:n);
+   end
+else
+   S = A(i:j, i:j);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/symmpart.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,7 @@
+function S = symmpart(A)
+%SYMMPART  Symmetric (Hermitian) part.
+%          SYMMPART(A) is the symmetric (Hermitian) part of A, (A + A')/2.
+%          It is the nearest symmetric (Hermitian) matrix to A in both the
+%          2- and the Frobenius norms.
+
+S = (A + A')./2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/trap2tri.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,54 @@
+function [Q, T] = trap2tri(L)
+%TRAP2TRI  Unitary reduction of trapezoidal matrix to triangular form.
+%          [Q, T] = TRAP2TRI(L), where L is an m-by-n lower trapezoidal
+%          matrix with m >= n, produces a unitary Q such that Q*L = [T; 0],
+%          where T is n-by-n and lower triangular.
+%          Q is a product of Householder transformations.
+
+%          Called by COD.
+%
+%          Reference:
+%          G. H. Golub and C. F. Van Loan, Matrix Computations, third
+%          edition, Johns Hopkins University Press, Baltimore, Maryland,
+%          1996; P5.2.5.
+
+[n, r] = size(L);
+
+if r > n  | ~isequal(L,tril(L))
+   error('Matrix must be lower trapezoidal and m-by-n with m >= n.')
+end
+
+Q = eye(n);  % To hold product of Householder transformations.
+
+if r ~= n
+
+   % Reduce nxr L =   r  [L1]  to lower triangular form: QL = [T].
+   %                 n-r [L2]                                 [0]
+
+   for j=r:-1:1
+       % x is the vector to be reduced, which we overwrite with the H.T. vector.
+       x = L(j:n,j);
+       x(2:r-j+1) = zeros(r-j,1);  % These elts of column left unchanged.
+       [v,beta,s] = gallery('house',x);
+
+       % Nothing to do if x is zero (or x=a*e_1, but we don't check for that).
+       if s ~= 0
+
+          %  Implicitly apply H.T. to pivot column.
+          % L(r+1:n,j) = zeros(n-r,1); % We throw these elts away at the end.
+          L(j,j) = s;
+
+          % Apply H.T. to rest of matrix.
+          if j > 1
+             y = v'*L(j:n, 1:j-1);
+             L(j:n, 1:j-1) = L(j:n, 1:j-1) - beta*v*y;
+          end
+
+          % Update H.T. product.
+          y = v'*Q(j:n,:);
+          Q(j:n,:) = Q(j:n,:) - beta*v*y;
+       end
+   end
+end
+
+T = L(1:r,:);   % Rows r+1:n have been zeroed out.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/treshape.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,51 @@
+function T = treshape(x,unit)
+%TRESHAPE  Reshape vector to or from (unit) triangular matrix.
+%          TRESHAPE(X) returns a square upper triangular matrix whose
+%          elements are taken columnwise from the matrix X.
+%          TRESHAPE(X,1) returns a UNIT upper triangular matrix, and
+%          the 1s should not be specified in X.
+%          An error results if X does not have a number of elements of the form
+%          N*(N+1)/2 (or N less than this in the unit triangular case).
+%          X = TRESHAPE(R,2) is the inverse operation to R = TRESHAPE(X).
+%          X = TRESHAPE(R,3) is the inverse operation to R = TRESHAPE(X,1).
+
+if nargin == 1, unit = 0; end
+
+[p,q] = size(x);
+
+if unit < 2   % Convert vector x to upper triangular R.
+
+    m = p*q;
+    n = round( (-1 + sqrt(1+8*m))/2 );
+    if n*(n+1)/2 ~= m
+          error('Matrix must have a ''triangular'' number of elements.')
+    end
+
+    if unit == 1
+       n = n+1;
+    end
+
+    x = x(:);
+    T = unit*eye(n);
+
+    i = 1;
+    for j = 1+unit:n
+        T(1:j-unit,j) = x(i:i+j-1-unit);
+        i = i+j-unit;
+    end
+
+elseif unit >= 2   % Convert upper triangular R to vector x.
+
+   T = x;
+   if p ~= q, error('Must pass square matrix'), end
+   unit = unit - 2;
+   n = p*(p+1)/2 - unit*p;
+   x = zeros(n,1);
+   i = 1;
+   for j = 1+unit:p
+       x(i:i+j-1-unit) = T(1:j-unit,j);
+       i = i+j-unit;
+   end
+   T = x;
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/vand.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,29 @@
+function V = vand(m, p)
+%VAND   Vandermonde matrix.
+%       V = VAND(P), where P is a vector, produces the (primal)
+%       Vandermonde matrix based on the points P, i.e. V(i,j) = P(j)^(i-1).
+%       VAND(M,P) is a rectangular version of VAND(P) with M rows.
+%       Special case: If P is a scalar then P equally spaced points on [0,1]
+%                     are used.
+
+%       Reference:
+%       N. J. Higham, Accuracy and Stability of Numerical Algorithms,
+%       Second edition, Society for Industrial and Applied Mathematics,
+%       Philadelphia, PA, 2002; chap. 22.
+
+if nargin == 1, p = m; end
+n = length(p);
+
+%  Handle scalar p.
+if n == 1
+   n = p;
+   p = linspace(0,1,n);
+end
+
+if nargin == 1, m = n; end
+
+p = p(:).';                    % Ensure p is a row vector.
+V = ones(m,n);
+for i=2:m
+    V(i,:) = p.*V(i-1,:);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/matrixcomp/vecperm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,27 @@
+function P = vecperm(m, n)
+%VECPERM    Vec-permutation matrix.
+%           VECPERM(M, N) is the vec-permutation matrix, an MN-by-MN
+%           permutation matrix P with the property that if A is M-by-N then
+%           vec(A) = P*vec(A').
+%           If N is omitted, it defaults to M.
+
+%   P is formed by taking every n'th row from EYE(M*N), starting with
+%   the first and working down - see p. 277 of the reference.
+
+%   Reference:
+%   H. V. Henderson and S. R. Searle The vec-permutation matrix,
+%   the vec operator and Kronecker products: A review Linear and
+%   Multilinear Algebra, 9 (1981), pp. 271-288.
+
+if nargin == 1, n = m; end
+
+P = zeros(m*n);
+I = eye(m*n);
+
+k = 1;
+for i=1:n
+    for j=i:n:m*n
+        P(k,:) = I(j,:);
+        k = k+1;
+    end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/Contents.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,46 @@
+% Matrix Function Toolbox (MFT).
+% Version 1.0             6-Mar-2008
+% Copyright (c) 2008 by N. J. Higham
+%
+%   arnoldi               - Arnoldi iteration
+%   ascent_seq            - Ascent sequence for square (singular) matrix.
+%   cosm                  - Matrix cosine by double angle algorithm.
+%   cosm_pade             - Evaluate Pade approximation to the matrix cosine.
+%   cosmsinm              - Matrix cosine and sine by double angle algorithm.
+%   cosmsinm_pade         - Evaluate Pade approximations to matrix cosine and sine.
+%   expm_cond             - Relative condition number of matrix exponential.
+%   expm_frechet_pade     - Frechet derivative of matrix exponential via Pade approx.
+%   expm_frechet_quad     - Frechet derivative of matrix exponential via quadrature.
+%   fab_arnoldi           - f(A)*b approximated by Arnoldi method.
+%   funm_condest1         - Estimate of 1-norm condition number of matrix function.
+%   funm_condest_fro      - Estimate of Frobenius norm condition number of matrix function.
+%   funm_ev               - Evaluate general matrix function via eigensystem.
+%   funm_simple           - Simplified Schur-Parlett method for function of a matrix.
+%   logm_cond             - Relative condition number of matrix logarithm.
+%   logm_frechet_pade     - Frechet derivative of matrix logarithm via Pade approx.
+%   logm_iss              - Matrix logarithm by inverse scaling and squaring method.
+%   logm_pade_pf          - Evaluate Pade approximant to matrix log by partial fractions.
+%   mft_test              - Test the Matrix Function Toolbox.
+%   mft_tolerance         - Convergence tolerance for matrix iterations.
+%   polar_newton          - Polar decomposition by scaled Newton iteration.
+%   polar_svd             - Canonical polar decomposition via singular value decomposition.
+%   polyvalm_ps           - Evaluate polynomial at matrix argument by Paterson-Stockmeyer alg.
+%   power_binary          - Power of matrix by binary powering (repeated squaring).
+%   quasitriang_struct    - Block structure of upper quasitriangular matrix.
+%   readme                - Welcome to the Matrix Function Toolbox.
+%   riccati_xaxb          - Solve Riccati equation XAX = B in positive definite matrices.
+%   rootpm_newton         - Coupled Newton iteration for matrix pth root.
+%   rootpm_real           - Pth root of real matrix via real Schur form.
+%   rootpm_schur_newton   - Matrix pth root by Schur-Newton method.
+%   rootpm_sign           - Matrix Pth root via matrix sign function.
+%   signm                 - Matrix sign decomposition.
+%   signm_newton          - Matrix sign function by Newton iteration.
+%   sqrtm_db              - Matrix square root by Denman-Beavers iteration.
+%   sqrtm_dbp             - Matrix square root by product form of Denman-Beavers iteration.
+%   sqrtm_newton          - Matrix square root by Newton iteration (unstable).
+%   sqrtm_newton_full     - Matrix square root by full Newton method.
+%   sqrtm_pd              - Square root of positive definite matrix via polar decomposition.
+%   sqrtm_pulay           - Matrix square root by Pulay iteration.
+%   sqrtm_real            - Square root of real matrix by real Schur method.
+%   sqrtm_triang_min_norm - Estimated min norm square root of triangular matrix.
+%   sylvsol               - Solve Sylvester equation.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/arnoldi.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,28 @@
+function [Q,H] = arnoldi(A,q1,m)
+%ARNOLDI    Arnoldi iteration
+%   [Q,H] = ARNOLDI(A,q1,M) carries out M iterations of the
+%   Arnoldi iteration with N-by-N matrix A and starting vector q1
+%   (which need not have unit 2-norm).  For M < N it produces
+%   an N-by-(M+1) matrix Q with orthonormal columns and an
+%   (M+1)-by-M upper Hessenberg matrix H such that
+%   A*Q(:,1:M) = Q(:,1:M)*H(1:M,1:M) + H(M+1,M)*Q(:,M+1)*E_M',
+%   where E_M is the M'th column of the M-by-M identity matrix.
+
+n = length(A);
+if nargin < 3, m = n; end
+q1 = q1/norm(q1);
+Q = zeros(n,m); Q(:,1) = q1;
+H = zeros(min(m+1,m),n);
+
+for k=1:m
+    z = A*Q(:,k);
+    for i=1:k
+        H(i,k) = Q(:,i)'*z;
+        z = z - H(i,k)*Q(:,i);
+    end
+    if k < n
+       H(k+1,k) = norm(z);
+       if H(k+1,k) == 0, return, end
+       Q(:,k+1) = z/H(k+1,k);
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/ascent_seq.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,25 @@
+function [d,a] = ascent_seq(A)
+%ASCENT_SEQ   Ascent sequence for square (singular) matrix.
+%   [d,a] = ASCENT_SEQ(A) returns symbolically computed
+%   a(i) = dim(null(A^(i-1))) and the ascent sequence d = DIFF(a).
+%   A has a square root if in the ascent sequence no two terms are
+%   the same odd integer.
+%   This function is intended for singular matrices of small
+%   dimension with exactly known entries.
+%   It requires the Symbolic Math Toolbox.
+
+if isempty(ver('symbolic'))
+   error('The Symbolic Math Toolbox is required.')
+end
+
+n = length(A);
+a = zeros(n,1);
+A = sym(A);
+X = sym(eye(n));
+for i = 2:n+1
+    X = X*A;
+    N = null(X);
+    if isempty(N), break, end
+    a(i) = rank(N);
+end
+d = diff(a);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/cosm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,46 @@
+function [C,cost,s] = cosm(A)
+%COSM    Matrix cosine by double angle algorithm.
+%   [C,COST,s] = COSM(A) computes the cosine of the matrix A using the
+%   double angle algorithm with Pade approximation.  The total number of
+%   matrix multiplications and linear systems solved is returned as
+%   COST and s specifies the amount of scaling (A is scaled to 2^(-s)*A).
+
+n = length(A);
+% theta_m for m=2:2:20.
+theta = [0.00613443965526 0.11110098037055  0.43324697422211 0.98367255274344 ...
+           1.72463663220280 2.61357494421368 3.61521023400301 4.70271938553349 ...
+           5.85623410320942 7.06109053959248];
+s = 0;
+
+B = A^2;
+normA2 = sqrt(norm(B,inf));
+d = [2 4 6 8 12 16 20];
+for i = d(1:6)
+    if normA2 < theta(i/2)
+        m = i;
+        cost = (m<=8)*(m/2+1) + (m==12)*6;
+        C = cosm_pade(B,m);
+        s = m;
+        return
+    end
+end
+
+if normA2 > theta(20/2)
+    s = ceil( log2( normA2/theta(20/2) ) );
+    B = B/(4^s);
+    normA2 = normA2/(2^s);
+end
+
+if normA2 > 2*theta(12/2)
+    m = 20; cost = 8;
+elseif normA2 > theta(16/2)
+    B = B/4; m = 12; cost = 6; s = s+1;
+else
+    m = 16; cost = 7;
+end
+
+C = cosm_pade(B,m);
+for i = 1:s
+    C = 2*(C^2) - eye(n);
+end
+cost = cost+s;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/cosm_pade.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,122 @@
+function C = cosm_pade(A,m,sq)
+%COSM_PADE  Evaluate Pade approximation to the matrix cosine.
+%   C = COSM_PADE(A,M,SQ) approximates the matrix cosine using the
+%   Mth order diagonal Pade approximation.
+%   If SQ = 1 (default) then C is an approximation to cos(sqrt(A));
+%   otherwise C is an approximation to cos(A).
+
+if nargin < 3
+    sq = 1;
+end
+if sq == 1
+    A2 = A;
+else
+    A2 = A^2;
+end
+
+n = length(A2);
+I = eye(n);
+if m == 2
+    X2 = A2;
+    P = I - (5/12)*X2;
+    Q = I + (1/12)*X2;
+elseif m == 4
+    X2 = A2; X4 = X2^2;
+    P = I - (115/252)*X2 + (313/15120)*X4;
+    Q = I + (11/252)*X2 + (13/15120)*X4;
+elseif m == 6
+    X2 = A2; X4 = X2^2; X6 = X4*X2;
+    P = I - (3665/7788)*X2 + (711/25960)*X4 - (2923/7850304)*X6;
+    Q = I + (229/7788)*X2 + (1/2360)*X4 + (127/39251520)*X6;
+elseif m == 8
+    X2 = A2; X4 = X2^2; X6 = X4*X2; X8 = X6*X2;
+    P = I - (260735/545628)*X2 + (4375409/141863280)*X4  ...
+          -  (7696415/13108167072)*X6 + (80737373/23594700729600)*X8 ;
+    Q = I + (12079/545628)*X2 + (34709/141863280)*X4 ...
+          + (109247/65540835360)*X6 + (11321/1814976979200)*X8 ;
+elseif m == 12
+    X1 = A2; X2 = X1*X1; X3 = X2*X1;
+    p = [1,-220574348151635/454605030049116,20837207639809/606140040065488,...
+            -199961484798769/241849875986129712,38062401688454831/...
+            4440363723105341512320,-116112688080827/2894459315802000393216,...
+            151259208063389819/2133505961677654489839513600];
+    q = [1,6728166872923/454605030049116,66817219029/606140040065488,...
+            650617920073/1209249379930648560,8225608067111/4440363723105341512320,...
+            2848116281867/651253346055450088473600,12170851069679/...
+            2133505961677654489839513600];
+    P = X3*((p(7)*eye(n))*X3+(p(4)*eye(n)+p(5)*X1+p(6)*X2)*eye(n))+...
+            (p(1)*eye(n)+p(2)*X1+p(3)*X2);
+    Q = X3*((q(7)*eye(n))*X3+(q(4)*eye(n)+q(5)*X1+q(6)*X2)*eye(n))+...
+            (q(1)*eye(n)+q(2)*X1+q(3)*X2);
+elseif m == 16
+    X1 = A2; X2 = X1*X1; X3 = X2*X1; X4 = X2*X2;
+    p = [1,-1126682407530029115789472765/2304577612359442026681336372,...
+            145053661043845297596963732421/4009965045505429126425525287280,...
+            -1534672316720770887322573595/1603986018202171650570210114912,...
+            718202654899849477670594159641/60630671488042088391553942343673600,...
+            -128936233968950140829066659951/1673406533069961639606888808685391360,...
+            6524116556754642812271854422129/23929713422900451446378509964201096448000,...
+            -382586638331055978467487427009/...
+            763836452458982410168402038057298998620160,...
+            88555612088268453352055067469523/...
+            233733954452448617511531023645533493577768960000];
+    q = [1,25606398649691897551195421/2304577612359442026681336372,...
+            22668270274336502918805611/364542276864129920584138662480,...
+            1853378279158412863783499/8019930091010858252851050574560,...
+            38226389122327179481602241/60630671488042088391553942343673600,...
+            995615371594253927197913/760639333213618927094040367584268800,...
+            225870994754204367988837/110275177064057379937228156517055744000,...
+            42889724495628101076622829/19095911311474560254210050951432474965504000,...
+            2603898999593850290644763/1931685573987178657120091104508541269237760000];
+    P = X4*((p(9)*eye(n))*X4+(p(5)*eye(n)+p(6)*X1+p(7)*X2+p(8)*X3)*eye(n))+...
+        (p(1)*eye(n)+p(2)*X1+p(3)*X2+p(4)*X3);
+    Q = X4*((q(9)*eye(n))*X4+(q(5)*eye(n)+q(6)*X1+q(7)*X2+q(8)*X3)*eye(n))+...
+        (q(1)*eye(n)+q(2)*X1+q(3)*X2+q(4)*X3);
+elseif m == 20
+    X1 = A2; X2 = X1*X1; X3 = X2*X1; X4 = X2*X2; X5 = X4*X1;
+    p = [1,-18866133841442352341137832915472113127673/...
+            38415527280635118612047973206722428679860,...
+            917980006162069077942240197016800349995791/...
+            24637158162647322736526766816577984260016880,...
+            -4028339250935885155796261896908967142863591/...
+            3880352410616953331002965773611032520952658600,...
+            3925400573997340625949450726927185904756763/...
+            279385373564420639832213535699994341508591419200,...
+            -804035081520215224783821741744290679884325097/...
+            7621632990837395054622785253895845636354373915776000,...
+            19795406323827219175300218252334434555489703/...
+            42100448901768467920773480450091337800814636868096000,...
+            -118523567829079039162888326742509818128969627/...
+            92831489828399471765305524392451399850796274294151680000,...
+            6151694105279089780298999575203793198700323571/...
+            2954269332298984789459083008265373348851740633137083064320000,...
+            -438673281197605688527510681818034658057709668453/...
+            232382825678638143538851469430154267620677918202562953839411200000,...
+            31699084606166905465868332652040368902350407479/...
+            42944346185412328925979751550692508656301279283833633869523189760000];
+    q = [1,341629798875206964886153687889101212257/...
+            38415527280635118612047973206722428679860,...
+            981038224413663993784862242489461225499/...
+            24637158162647322736526766816577984260016880,...
+            461441299765418864926911910257258436499/...
+            3880352410616953331002965773611032520952658600,...
+            73764947345500690357380325430051300659/...
+            279385373564420639832213535699994341508591419200,...
+            3496016725011957790816142668159659762953/...
+            7621632990837395054622785253895845636354373915776000,...
+            562876526229442596170390468670872658343/...
+            884109426937137826336243089451918093817107374230016000,...
+            65309174262483666596220950666851746623/...
+            92831489828399471765305524392451399850796274294151680000,...
+            1768262649350763278383509302712194678051/...
+            2954269332298984789459083008265373348851740633137083064320000,...
+            83263779334467686055536878437959026858717/...
+            232382825678638143538851469430154267620677918202562953839411200000,...
+            24953265550459114615706087077367245444511/...
+            214721730927061644629898757753462543281506396419168169347615948800000];
+    P = X5*((p(11)*eye(n))*X5+(p(6)*eye(n)+p(7)*X1+p(8)*X2+p(9)*X3+p(10)*X4)*eye(n))...
+        +p(1)*eye(n)+p(2)*X1+p(3)*X2+p(4)*X3+p(5)*X4;
+    Q = X5*((q(11)*eye(n))*X5+(q(6)*eye(n)+q(7)*X1+q(8)*X2+q(9)*X3+q(10)*X4)*eye(n))...
+        +(q(1)*eye(n)+q(2)*X1+q(3)*X2+q(4)*X3+q(5)*X4);
+end
+C = Q\P;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/cosmsinm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,46 @@
+function [C,S,cost,s] = cosmsinm(A)
+%COSMSINM  Matrix cosine and sine by double angle algorithm.
+%   [C,S,COST,s] = COSMSINM(A) computes the cosine C and the sine S
+%   of the matrix A using the double angle algorithm with Pade
+%   approximation.  The total number of matrix multiplications and
+%   linear systems solved is returned as COST and s specifies the
+%   amount of scaling (A is scaled to 2^(-s)*A).
+
+n = length(A);
+theta = [0.00613443965526 0.11110098037055  0.43324697422211 0.98367255274344 ...
+         1.72463663220280 2.61357494421368 3.61521023400301 4.70271938553349 ...
+         5.85623410320942 7.06109053959248 0 9.58399601173102];
+
+normA = norm(A,inf);
+d = [2 4 6 8 10 12 14 16 20];
+for i = d(1:8)
+    if normA <= theta(i/2);
+        m = i;
+        cost = m/2+3;
+        [C,S] = cosmsinm_pade(A,m);
+        s = m;
+        return
+    end
+end
+
+s = 0;
+if normA > theta(20/2)
+    s = max( ceil( log2( normA/theta(20/2) ) ), 1 );
+    A = A/(2^s);
+    normA = normA/(2^s);
+end
+
+if normA > 2*theta(12/2)
+    m = 20; cost = 12;
+elseif normA > theta(16/2)
+    A = A/2; m = 12; cost = 9; s = s+1;
+else
+    m = 16; cost = 11;
+end
+
+[C,S] = cosmsinm_pade(A,m);
+for i = 1:s
+    S = 2*S*C;
+    C = 2*(C^2) - eye(n);
+end
+cost = cost + 2*s;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/cosmsinm_pade.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,231 @@
+function [C,S] = cosmsinm_pade(A,m)
+%COSMSINM_PADE Evaluate Pade approximations to matrix cosine and sine.
+%   [C,S] = COSMSINM_PADE(A,M) approximates the matrix cosine
+%   C = cos(A) and the matrix sine S = sin(A) using the [M/M] and
+%   [M+1/M+1] Pade approximants, respectively, where M is even.
+
+n = length(A);
+I = eye(n);
+if m == 2
+    X2 = A^2;
+    p = I - (5/12)*X2;
+    q = I + (1/12)*X2;
+    p2 = A*(-7/60*X2+I);
+    q2 = I+1/20*X2;
+elseif m == 4
+    X2 = A^2; X4 = X2^2;
+    p = I - (115/252)*X2 + (313/15120)*X4;
+    q = I + (11/252)*X2 + (13/15120)*X4;
+    p2 = A*(551/166320*X4-53/396*X2+I);
+    q2 = I+13/396*X2+5/11088*X4;
+elseif m == 6
+    X2 = A^2; X4 = X2^2; X6 = X4*X2;
+    p = I - (3665/7788)*X2 + (711/25960)*X4 - (2923/7850304)*X6;
+    q = I + (229/7788)*X2 + (1/2360)*X4 + (127/39251520)*X6;
+    p2 = A*(-479249/11511339840*X6+34911/7613320*X4-29593/207636*X2+I);
+    q2 = I+1671/69212*X2+97/351384*X4+2623/1644477120*X6;
+elseif m == 8
+    X2 = A^2; X4 = X2^2; X6 = X4*X2; X8 = X6*X2;
+    p = I - (260735/545628)*X2 + (4375409/141863280)*X4  ...
+          -  (7696415/13108167072)*X6 + (80737373/23594700729600)*X8 ;
+    q = I + (12079/545628)*X2 + (34709/141863280)*X4 ...
+          + (109247/65540835360)*X6 + (11321/1814976979200)*X8 ;
+    p2 = A*(4585922449/15605159573203200*X8-269197963/3940696861920*X6...
+          + 38518909/7217393520*X4-53272705/360869676*X2+I);
+    q2 = I+2290747/120289892*X2+1281433/7217393520*X4+560401/...
+            562956694560*X6+1029037/346781323848960*X8;
+elseif m == 10
+    X2 = A^2; X4 = X2^2; X6 = X4*X2; X8 = X6*X2; X10 = X8*X2;
+    p = I-213692663231/11226787947026555904*X10+2677576097699/...
+        425257119205551360*X8-18894084119/25961973089472*X6+...
+        53471234645/1622623318092*X4-5114526085/10605381164*X2;
+    q = I+2045322787/280669698675663897600*X10+1469628299/425257119205551360*...
+        X8+117715523/129809865447360*X6+256513745/1622623318092*X4+...
+        188164497/10605381164*X2;
+    p2 = A*(-481959816488503/363275871831577908403200*X10+23704595077729/...
+        42339845201815607040*X8-2933434889971/33603051747472704*X6+...
+        3605886663403/617703157122660*X4-109061004303/722459832892*X2+I);
+    q2 = I+34046903537/2167379498676*X2+1679739379/13726736824948*X4+...
+        101555058991/168015258737363520*X6+3924840709/2016183104848362240*X8+...
+        37291724011/11008359752472057830400*X10;
+elseif m == 12
+    X2 = A^2; X4 = X2^2; X6 = X4*X2; X8 = X6*X2; X10 = X8*X2; X12 = X10*X2;
+    p = I-220574348151635/454605030049116*X2+20837207639809/606140040065488*X4-...
+        199961484798769/241849875986129712*X6+38062401688454831/...
+        4440363723105341512320*X8-116112688080827/2894459315802000393216*X10+...
+        151259208063389819/2133505961677654489839513600*X12;
+    q = I+6728166872923/454605030049116*X2+66817219029/606140040065488*X4+...
+        650617920073/1209249379930648560*X6+8225608067111/...
+        4440363723105341512320*X8+2848116281867/651253346055450088473600*X10+...
+        12170851069679/2133505961677654489839513600*X12;
+    p2 = A*(29255060349997508141/7036046883826051377386580480000*X12-...
+        2614538885979566893/906830082707196962219136000*X10+366447091815846989/...
+        467616734780258173795200*X8-404844663233524697/...
+        3987959482775106258000*X6+235300071854130877/37980566502620059600*X4-...
+        189866343920582047/1238496733781088900*X2+I);
+    q2 = I+5516592792088701/412832244593696300*X2+10149321432971387/...
+        113941699507860178800*X4+307158833383841/797591896555021251600*X6+...
+        234442582333727/202056613793938717072000*X8+5257566411989069/...
+        2225855657554028907265152000*X10+547584669013343/...
+        209866390569379868399285760000*X12;
+elseif m == 14
+    X2 = A^2; X4 = X2^2; X6 = X4*X2; X8 = X6*X2; X10 = X8*X2; X12 = X10*X2; ...
+        X14 = X12*X2;
+    p = I-9494011832127130075/19482625349840916996*X2+310367939726544641761/...
+        8767181407428412648200*X4-290442017949426019417/...
+    322632275793365585453760*X6+422064995289725174621/...
+        40651666749964063767173760*X8-10143257867238927098753/...
+        169923967014849786546786316800*X10+569672335133865317479319/...
+        3379787703925362254415579841152000*X12-42948386132766526786783/...
+        227121733703784343496726965325414400*X14;
+    q = I+247300842793328423/19482625349840916996*X2+711404045526343261/...
+        8767181407428412648200*X4+184363345338626039/...
+        537720459655609309089600*X6+42648708634036181/...
+        40651666749964063767173760*X8+2008837845325770881/...
+        849619835074248932733931584000*X10+1790291961275627717/...
+        482826814846480322059368548736000*X12+1651330565298296101/...
+        516185758417691689765288557557760000*X14;
+    p2 = A*(-91320110661332736729305509/...
+        9540763134798175069489060319738357760000*X14+...
+        261319443906679357131557951/25722645706563707295191093999294592000*X12...
+        -25455044236652128985986613/5785570334359808208545005397952000*X10+...
+        145834078116038838318251/150274554139215797624545594752*X8-...
+        264802008995134681176249/2352661207225153389183707200*X6+...
+        1466350032750183270853069/226863759268139791099857480*X4-...
+        390816793511920445322905/2520708436312664345553972*X2+I);
+    q2 = I+9767093068952315200919/840236145437554781851324*X2+...
+        3067578723707839145789/45372751853627958219971496*X4+...
+        7023288908523954335543/27223651112176774931982897600*X6+...
+        2065373697229749176197/2922005219373640509366164342400*X8+...
+        106107024079408237517/75137277069607898812272797376000*X10+...
+        1279669772129105715749/659555018117018135774130615366528000*X12+...
+        70807382924337520289663/48975917425297298690043842974656903168000*X14;
+elseif m == 16
+    X1 = A^2; X2 = X1*X1; X3 = X2*X1; X4 = X2*X2;
+    p = [1,-1126682407530029115789472765/2304577612359442026681336372,...
+            145053661043845297596963732421/4009965045505429126425525287280,...
+            -1534672316720770887322573595/1603986018202171650570210114912,...
+            718202654899849477670594159641/60630671488042088391553942343673600,...
+            -128936233968950140829066659951/1673406533069961639606888808685391360,...
+            6524116556754642812271854422129/23929713422900451446378509964201096448000,...
+            -382586638331055978467487427009/...
+            763836452458982410168402038057298998620160,...
+            88555612088268453352055067469523/...
+            233733954452448617511531023645533493577768960000];
+    q = [1,25606398649691897551195421/2304577612359442026681336372,...
+            22668270274336502918805611/364542276864129920584138662480,...
+            1853378279158412863783499/8019930091010858252851050574560,...
+            38226389122327179481602241/60630671488042088391553942343673600,...
+            995615371594253927197913/760639333213618927094040367584268800,...
+            225870994754204367988837/110275177064057379937228156517055744000,...
+            42889724495628101076622829/19095911311474560254210050951432474965504000,...
+            2603898999593850290644763/1931685573987178657120091104508541269237760000];
+    p2 = fliplr([8061385294694990486176108590199657/...
+            477924965253252594838839715519805972908219146240000,...
+            -4615365296919423054990558658047229/...
+            177615978658790247602767966416950841324802560000,...
+            99552990732049352416683777921586343/...
+            5920532621959674920092265547231694710826752000,...
+            -2609410230536249996829038272149121/...
+            450025282909674287024343687080548396992000,...
+            307444630821656723644151024596157/...
+            272742595702832901226874961866999028480,...
+            -1685658558102333722460472837754189/...
+            13889669225607231080998261946930506080,...
+            228262816046878642628376219569959/34211007944845396751227246174705680,...
+            -2876333034231579924039546769067/18393015024110428360874863534788,1]);
+    q2 = fliplr([1604612285445093/2658455991569831745807614120560689152,...
+            2976480701836195/2596148429267413814265248164610048,...
+            5974590438482987/5070602400912917605986812821504,520154651818787/...
+        618970019642690137449562112,4332878172069797/9671406556917033397649408,...
+            3408424760479185/18889465931478580854784,7823297585492293/...
+            147573952589676412928,1482203634412391/144115188075855872,1]);
+    p = X4*((p(9)*eye(n))*X4+(p(5)*eye(n)+p(6)*X1+p(7)*X2+p(8)*X3)*eye(n))+...
+        (p(1)*eye(n)+p(2)*X1+p(3)*X2+p(4)*X3);
+    q = X4*((q(9)*eye(n))*X4+(q(5)*eye(n)+q(6)*X1+q(7)*X2+q(8)*X3)*eye(n))+...
+        (q(1)*eye(n)+q(2)*X1+q(3)*X2+q(4)*X3);
+    p2 = A*(X4*((p2(9)*eye(n))*X4+(p2(5)*eye(n)+p2(6)*X1+p2(7)*X2+p2(8)*X3)*eye(n))+...
+        (p2(1)*eye(n)+p2(2)*X1+p2(3)*X2+p2(4)*X3));
+    q2 = X4*((q2(9)*eye(n))*X4+(q2(5)*eye(n)+q2(6)*X1+q2(7)*X2+q2(8)*X3)*eye(n))+...
+        (q2(1)*eye(n)+q2(2)*X1+q2(3)*X2+q2(4)*X3);
+elseif m == 20
+    X1 = A^2; X2 = X1*X1; X3 = X2*X1; X4 = X2*X2; X5 = X4*X1;
+    p = [1,-18866133841442352341137832915472113127673/...
+            38415527280635118612047973206722428679860,...
+            917980006162069077942240197016800349995791/...
+            24637158162647322736526766816577984260016880,...
+            -4028339250935885155796261896908967142863591/...
+            3880352410616953331002965773611032520952658600,...
+            3925400573997340625949450726927185904756763/...
+            279385373564420639832213535699994341508591419200,...
+            -804035081520215224783821741744290679884325097/...
+            7621632990837395054622785253895845636354373915776000,...
+            19795406323827219175300218252334434555489703/...
+            42100448901768467920773480450091337800814636868096000,...
+            -118523567829079039162888326742509818128969627/...
+            92831489828399471765305524392451399850796274294151680000,...
+            6151694105279089780298999575203793198700323571/...
+            2954269332298984789459083008265373348851740633137083064320000,...
+            -438673281197605688527510681818034658057709668453/...
+            232382825678638143538851469430154267620677918202562953839411200000,...
+            31699084606166905465868332652040368902350407479/...
+            42944346185412328925979751550692508656301279283833633869523189760000];
+    q = [1,341629798875206964886153687889101212257/...
+            38415527280635118612047973206722428679860,...
+            981038224413663993784862242489461225499/...
+            24637158162647322736526766816577984260016880,...
+            461441299765418864926911910257258436499/...
+            3880352410616953331002965773611032520952658600,...
+            73764947345500690357380325430051300659/...
+            279385373564420639832213535699994341508591419200,...
+            3496016725011957790816142668159659762953/...
+            7621632990837395054622785253895845636354373915776000,...
+            562876526229442596170390468670872658343/...
+            884109426937137826336243089451918093817107374230016000,...
+            65309174262483666596220950666851746623/...
+            92831489828399471765305524392451399850796274294151680000,...
+            1768262649350763278383509302712194678051/...
+            2954269332298984789459083008265373348851740633137083064320000,...
+            83263779334467686055536878437959026858717/...
+            232382825678638143538851469430154267620677918202562953839411200000,...
+            24953265550459114615706087077367245444511/...
+            214721730927061644629898757753462543281506396419168169347615948800000];
+    p2 = fliplr([504755453995739302254967861091843658659225385703072439/...
+            19060545079818588288159374768330241280586535602550332513982470267073331200000,...
+            -8739283052502581091809652173945956169815253539783369/...
+            114148670977473878836743171447659847170838038103667100934138640957440000,...
+            7098206776968855944478646605493639889495273479386941/...
+            73428969634632319719542390989722708706387042054990532764650587750400,...
+            -113535124075801033808719602712883055821671599003497/...
+            1648104073175508145683904128728603495708248986725888205771264000,...
+            159444168407032138165237007441577372335666527110947/...
+            5304242994128072193005668460275965273543789842336191926620160,...
+            -250413931005302347677105318749990589129438025771967/...
+            30554395127465853646346016476244039594146254852167004185600,...
+            89477294557053179299513803506788857306761134861/...
+            65142408168740093907440765129293961270139550682600640,...
+            -12973367660334266409984992764341733133099094183655/...
+            96446954316495750146294243927538003769401056982850392,...
+            2886168459322903018906421202468694698344454148801/...
+            413757847775614543742146048595186631357361891818320,...
+            -755789275315695418647501273540203715920168810921/...
+            4774129012795552427793992868405999592584944905596,1]);
+    q2 = fliplr([2290207460112795/44601490397061246283071436545296723011960832,...
+            3841593531710827/21778071482940061661655974875633165533184,...
+            3463624789758205/10633823966279326983230456482242756608,...
+            8728650941242757/20769187434139310514121985316880384,4208410993374357/...
+            10141204801825835211973625643008,1606756025554677/...
+            4951760157141521099596496896,7804153743481773/38685626227668133590597632,...
+            1845833986288687/18889465931478580854784,1293253388138245/...
+            36893488147419103232,2408831652010703/288230376151711744,1]);
+    p = X5*((p(11)*eye(n))*X5+(p(6)*eye(n)+p(7)*X1+p(8)*X2+p(9)*X3+p(10)*X4)*eye(n))...
+        +p(1)*eye(n)+p(2)*X1+p(3)*X2+p(4)*X3+p(5)*X4;
+    q = X5*((q(11)*eye(n))*X5+(q(6)*eye(n)+q(7)*X1+q(8)*X2+q(9)*X3+q(10)*X4)*eye(n))...
+        +(q(1)*eye(n)+q(2)*X1+q(3)*X2+q(4)*X3+q(5)*X4);
+    p2 = A*(X5*((p2(11)*eye(n))*X5+(p2(6)*eye(n)+p2(7)*X1+p2(8)*X2+p2(9)*X3+p2(10)*X4)*eye(n))...
+        +p2(1)*eye(n)+p2(2)*X1+p2(3)*X2+p2(4)*X3+p2(5)*X4);
+    q2 = X5*((q2(11)*eye(n))*X5+(q2(6)*eye(n)+q2(7)*X1+q2(8)*X2+q2(9)*X3+q2(10)*X4)*eye(n))...
+        +(q2(1)*eye(n)+q2(2)*X1+q2(3)*X2+q2(4)*X3+q2(5)*X4);
+end
+C = q\p;
+S = q2\p2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/expm_cond.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,25 @@
+function [c,K] = expm_cond(A)
+%EXPM_COND  Relative condition number of matrix exponential.
+%   EXPM_COND(A) is the relative condition number in the Frobenius
+%   norm of the matrix exponential at the matrix A.
+%   [C,K] = EXPM_COND(A) returns the condition number C and the Kronecker
+%   matrix form K of the Frechet derivative.
+
+n = length(A);
+N = n^2;
+K = zeros(N);
+E = zeros(n);
+
+if nargout < 2 && ~isequal(A,triu(A))
+   % If returning K cannot use Schur form.
+   A = schur(A,'complex');
+end
+
+for j = 1:N
+    e = zeros(N,1); e(j) = 1;
+    E(:) = e;
+    X = expm_frechet_pade(A,E);
+    K(:,j) = X(:);
+end
+
+c = norm(K) * norm(A,'fro') / norm(expm(A),'fro');
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/expm_frechet_pade.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,59 @@
+function L = expm_frechet_pade(A,E,k)
+%EXPM_FRECHET_PADE Frechet derivative of matrix exponential via Pade approx.
+%   L = EXPM_FRECHET_PADE(A,E) evaluates the Frechet derivative of
+%   the matrix exponential at A in the direction E via scaling and
+%   squaring and a Pade approximant of the function tanh(x)/x.
+%   L = EXPM_FRECHET_PADE(A,E,k) uses either matrix exponentials
+%   (k = 0, the default) or repeated squaring (k = 1) in the final
+%   phase of the algorithm.
+
+if nargin < 3, k = 0; end
+
+real_data = isreal(A) && isreal(E);
+% Form complex Schur form if A not already upper triangular.
+use_Schur = false;
+if ~isequal(A,triu(A))
+   use_Schur = true;
+   [Q,T] = schur(A,'complex'); A = T; E = Q'*E*Q;
+end
+
+Abound = 1;
+if norm(A,1) <= Abound
+   s = 0;
+else
+   s = ceil( log2(norm(A,1)/Abound) );
+end
+
+As = A/2^s;
+
+I = eye(size(A));
+
+m = 8;
+% Positive zeros of p8 and q8 in r8 = p8/q8 Pade approximant.
+load tau_r8_zeros
+% Zeros come in \pm pairs.
+a = complex(0, [pzero; -pzero]);
+b = complex(0, [qzero; -qzero]);
+
+G = 2^(-s)*E;
+for i=1:m
+    rhs = (I + As/a(i)) * G + G * (I - As/a(i));
+    AA = I + As/b(i); BB = I - As/b(i);
+    G = sylvsol(AA, BB, rhs);
+end
+
+X = expm(As);
+L = (G*X + X*G)/2;
+for i=s:-1:1
+    if i < s
+        if k == 0
+           X = expm(2^(-i)*A);
+        else
+           X = X^2;
+        end
+    end
+    L = X*L + L*X;
+end
+
+if use_Schur, L = Q*L*Q'; end
+if real_data, L = real(L); end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/expm_frechet_quad.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,51 @@
+function R = expm_frechet_quad(A,E,theta,rule,k)
+%EXPM_FRECHET_QUAD Frechet derivative of matrix exponential via quadrature.
+%   L = EXPM_FRECHET_QUAD(A,E,THETA,RULE) is an approximation to the
+%   Frechet derivative of the matrix exponential at A in the direction E
+%   intended to have norm of the correct order of magnitude.
+%   It is obtained from the repeated trapezium rule (RULE = 'T'),
+%   the repeated Simpson rule (RULE = 'S', default),
+%   or the repeated midpoint rule (RULE = 'M').
+%   L = EXPM_FRECHET_QUAD(A,E,THETA,RULE,k) uses either matrix
+%   exponentials (k = 0, the default) or repeated squaring (k = 1)
+%   in the final phase of the algorithm.
+
+%   A is scaled so that norm(A/2^s) <= THETA.  Defalt: THETA = 1/2.
+
+if nargin < 3 || isempty(theta), theta = 1/2; end
+if nargin < 4 || isempty(rule), rule = 'S'; end
+if nargin < 5, k = 0; end
+
+s = ceil( log2(norm(A,1)/theta) );
+As = A/2^s;
+
+X = expm(As);
+
+switch upper(rule)
+
+    case 'T'
+       R = 2^(-s) * (X*E + E*X)/2 ;
+
+    case 'S'
+       Xmid = expm(As/2);
+       R = 2^(-s) * (X*E + 4*Xmid*E*Xmid + E*X)/6;
+
+    case 'M'
+       Xmid = expm(As/2);
+       R = 2^(-s) * Xmid*E*Xmid;
+
+    otherwise
+        error('Illegal value of RULE.')
+
+end
+
+for i = s:-1:1
+    if i < s
+        if k == 0
+           X = expm(2^(-i)*A);
+        else
+           X = X^2;
+        end
+    end
+    R = X*R + R*X;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/fab_arnoldi.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,14 @@
+function c = fab_arnoldi(A,b,fun,m)
+%FAB_ARNOLDI  f(A)*b approximated by Arnoldi method.
+%   C = FAB_ARNOLDI(A,B,FUN,M) approximates FUNM(A,FUN)*B
+%   for a square matrix A using M steps of the Arnoldi process
+%   with starting vector B/norm(B).
+%   FUN must be a function handle for which FUNM(A,FUN) is defined.
+%   For large matrices M is intended to be much less than LENGTH(A).
+
+q1 = b/norm(b);
+[Q,H] = arnoldi(A,q1,m);
+H = H(1:m,1:m);
+Q = Q(:,1:m);
+e = zeros(m,1); e(1) = 1;
+c = norm(b)*Q*funm(H,fun)*e;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/funm_condest1.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,91 @@
+function [c,est] = funm_condest1(A,fun,fun_frechet,flag1,varargin)
+%FUNM_CONDEST1  Estimate of 1-norm condition number of matrix function.
+%    C = FUNM_CONDEST1(A,FUN,FUN_FRECHET,FLAG) produces an estimate of
+%    the 1-norm relative condition number of function FUN at the matrix A.
+%    FUN and FUN_FRECHET are function handles:
+%      - FUN(A) evaluates the function at the matrix A.
+%      - If FLAG == 0 (default)
+%           FUN_FRECHET(B,E) evaluates the Frechet derivative at B
+%              in the direction E;
+%        if FLAG == 1
+%           - FUN_FRECHET('notransp',E) evaluates the
+%                    Frechet derivative at A in the direction E.
+%           - FUN_FRECHET('transp',E) evaluates the
+%                    Frechet derivative at A' in the direction E.
+%    If FUN_FRECHET is empty then the Frechet derivative is approximated
+%    by finite differences.  More reliable results are obtained when
+%    FUN_FRECHET is supplied.
+%    MATLAB'S NORMEST1 (block 1-norm power method) is used, with a random
+%    starting matrix, so the approximation can be different each time.
+%    C = FUNM_CONDEST1(A,FUN,FUN_FRECHET,FLAG,P1,P2,...) passes extra inputs
+%    P1,P2,... to FUN and FUN_FRECHET.
+%    [C,EST] = FUNM_CONDEST1(A,...) also returns an estimate EST of the
+%    1-norm of the Frechet derivative.
+%    Note: this function makes an assumption on the adjoint of the
+%    Frechet derivative that, for f having a power series expansion,
+%    is equivalent to the series having real coefficients.
+
+if nargin < 3 || isempty(fun_frechet), fte_diff = 1; else fte_diff = 0; end
+if nargin < 4 || isempty(flag1), flag1 = 0; end
+
+n = length(A);
+funA = feval(fun,A,varargin{:});
+if fte_diff, d = sqrt( eps*norm(funA,1) ); end
+
+factor = norm(A,1)/norm(funA,1);
+
+[est,v,w,iter] = normest1(@afun);
+c = est*factor;
+
+       %%%%%%%%%%%%%%%%%%%%%%%%% Nested function.
+       function Z = afun(flag,X)
+       %AFUN  Function to evaluate matrix products needed by NORMEST1.
+
+       if isequal(flag,'dim')
+          Z = n^2;
+       elseif isequal(flag,'real')
+          Z = isreal(A);
+       else
+
+          [p,q] = size(X);
+          if p ~= n^2, error('Dimension mismatch'), end
+          E = zeros(n);
+          Z = zeros(n^2,q);
+          for j=1:q
+
+              E(:) = X(:,j);
+
+              if isequal(flag,'notransp')
+
+                 if fte_diff
+                    Y = (feval(fun,A+d*E/norm(E,1),varargin{:}) - funA)/d;
+                 else
+                    if flag1
+                       Y = feval(fun_frechet,'notransp',E,varargin{:});
+                    else
+                       Y = feval(fun_frechet,A,E,varargin{:});
+                    end
+                 end
+
+              elseif isequal(flag,'transp')
+
+                 if fte_diff
+                    Y = (feval(fun,A'+d*E/norm(E,1),varargin{:}) - funA')/d;
+                 else
+                    if flag1
+                       Y = feval(fun_frechet,'transp',E,varargin{:});
+                    else
+                       Y = feval(fun_frechet,A',E,varargin{:});
+                    end
+                 end
+
+              end
+
+              Z(:,j) = Y(:);
+          end
+
+       end
+
+       end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/funm_condest_fro.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,66 @@
+function c = funm_condest_fro(A,fun,fun_frechet,its,flag,varargin)
+%FUNM_CONDEST_FRO  Estimate of Frobenius norm condition number of matrix function.
+%    C = FUNM_CONDEST_FRO(A,FUN,FUN_FRECHET,ITS,FLAG) produces an estimate of
+%    the Frobenius norm relative condition number of function FUN at
+%    the matrix A.    FUN and FUN_FRECHET are function handles:
+%      - FUN(A) evaluates the function at the matrix A.
+%      - If FLAG == 0 (default)
+%           FUN_FRECHET(B,E) evaluates the Frechet derivative at B
+%              in the direction E;
+%        if FLAG == 1
+%           - FUN_FRECHET('notransp',E) evaluates the
+%                    Frechet derivative at A in the direction E.
+%           - FUN_FRECHET('transp',E) evaluates the
+%                    Frechet derivative at A' in the direction E.
+%    If FUN_FRECHET is empty then the Frechet derivative is approximated
+%    by finite differences.  More reliable results are obtained when
+%    FUN_FRECHET is supplied.
+%    The power method is used, with a random starting matrix,
+%    so the approximation can be different each time.
+%    ITS iterations (default 6) are used.
+%    C = FUNM_COND_EST_FRO(A,FUN,FUN_FRECHET,ITS,FLAG,P1,P2,...)
+%    passes extra inputs P1,P2,... to FUN and FUN_FRECHET.
+%    Note: this function makes an assumption on the adjoint of the
+%    Frechet derivative that, for f having a power series expansion,
+%    is equivalent to the series having real coefficients.
+
+if nargin < 5 || isempty(flag), flag = 0; end
+if nargin < 4 || isempty(its), its = 6; end
+if nargin < 3 || isempty(fun_frechet), fte_diff = 1; else fte_diff = 0; end
+
+funA = feval(fun,A,varargin{:});
+d = sqrt( eps*norm(funA,'fro') );
+Z = randn(size(A));
+Znorm = 1;
+
+factor = norm(A,'fro')/norm(funA,'fro');
+
+for i=1:its
+
+   Z = Z/norm(Z,'fro');
+   if fte_diff
+      W = (feval(fun,A+d*Z,varargin{:}) - funA)/d;
+   else
+      if flag
+         W = feval(fun_frechet,'notransp',Z,varargin{:});
+      else
+         W = feval(fun_frechet,A,Z,varargin{:});
+      end
+   end
+
+   W = W/norm(W,'fro');
+   if fte_diff
+      Z = (feval(fun,A'+d*W,varargin{:}) - funA')/d;
+   else
+      if flag
+         Z = feval(fun_frechet,'transp',W,varargin{:});
+      else
+         Z = feval(fun_frechet,A',W,varargin{:});
+      end
+   end
+
+   Znorm = norm(Z,'fro');
+
+end
+
+c = Znorm*factor;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/funm_ev.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,9 @@
+function F = funm_ev(A,fun)
+%FUNM_EV   Evaluate general matrix function via eigensystem.
+%   F = FUNM_EV(A,FUN) evaluates the function FUN at the
+%   square matrix A using the eigensystem of A.
+%   This function is intended for diagonalizable matrices only
+%   and can be numerically unstable.
+
+[V,D] = eig(A);
+F = V * diag(feval(fun,diag(D))) / V;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/funm_simple.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,30 @@
+function F = funm_simple(A,fun)
+%FUNM_SIMPLE Simplified Schur-Parlett method for function of a matrix.
+%   F = FUNM_SIMPLE(A,FUN) evaluates the function FUN at the
+%   square matrix A by the Schur-Parlett method using the scalar
+%   Parlett recurrence (and hence without blocking or reordering).
+%   This function is intended for matrices with distinct eigenvalues
+%   only and can be numerically unstable.
+%   FUNM should in general be used in preference.
+
+n = length(A);
+
+[Q,T] = schur(A,'complex');   % Complex Schur form.
+F = diag(feval(fun,diag(T))); % Diagonal of F.
+
+% Compute off-diagonal of F by scalar Parlett recurrence.
+for j=2:n
+   for i = j-1:-1:1
+      s = T(i,j)*(F(i,i)-F(j,j));
+      if j-i >= 2
+         k = i+1:j-1;
+         s = s + F(i,k)*T(k,j) - T(i,k)*F(k,j);
+      end
+      d = T(i,i) - T(j,j);
+      if d ~= 0
+         F(i,j) = s/d;
+      end
+   end
+end
+
+F = Q*F*Q';
Binary file mftoolbox/log_pade_err_opt.mat has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/logm_cond.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,22 @@
+function c = logm_cond(A)
+%LOGM_COND  Relative condition number of matrix logarithm.
+%   LOGM_COND(A) is the relative condition number in the Frobenius
+%   norm of the matrix logarithm at the matrix A.
+
+n = length(A);
+N = n^2;
+K = zeros(N);
+E = zeros(n);
+
+if ~isequal(A,triu(A))
+   A = schur(A,'complex');
+end
+
+for j = 1:N
+    e = zeros(N,1); e(j) = 1;
+    E(:) = e;
+    X = logm_frechet_pade(A,E);
+    K(:,j) = X(:);
+end
+
+c = norm(K) * norm(A,'fro') / norm(logm(A),'fro');
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/logm_frechet_pade.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,47 @@
+function L = logm_frechet_pade(A,E)
+%LOGM_FRECHET_PADE Frechet derivative of matrix logarithm via Pade approx.
+%   L = LOGM_FRECHET_PADE(A,E) evaluates the Frechet
+%   derivative of the matrix logarithm at A in the direction E via the
+%   inverse scaling and squaring and a Pade approximant of the function
+%   tanh(x)/x.  A must have no eigenvalues on the negative real axis.
+
+real_data = isreal(A) && isreal(E);
+% Form complex Schur form if A not already upper triangular.
+use_Schur = false;
+if ~isequal(A,triu(A))
+   use_Schur = true;
+   [Q,T] = schur(A,'complex'); A = T; E = Q'*E*Q;
+end
+
+I = eye(size(A));
+B = A;
+for i = 0:inf
+    if norm(B-I,1) <= 1-1/exp(1), s = i; break, end
+    B = sqrtm(B);
+    Aroot{i+1} = B;
+end
+
+% Positive zeros of p8 and q8 in r8 = p8/q8 Pade approximant.
+load tau_r8_zeros
+% Zeros come in \pm pairs.
+a = complex(0, [pzero; -pzero]);
+b = complex(0, [qzero; -qzero]);
+
+E = 2^s*E;
+for i = 1:s
+    E = sylvsol(Aroot{i},Aroot{i},E);
+end
+
+G = sylvsol(B,B,E);
+
+X = logm(B);
+
+for i=8:-1:1
+    rhs = (I + X/b(i)) * G + G * (I - X/b(i));
+    AA = I + X/a(i); BB = I - X/a(i);
+    G = sylvsol(AA, BB, rhs);
+end
+
+L = 2*G;
+if use_Schur, L = Q*L*Q'; end
+if real_data, L = real(L); end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/logm_iss.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,44 @@
+function [X,k,m] = logm_iss(A)
+%LOGM_ISS Matrix logarithm by inverse scaling and squaring method.
+%   X = LOGM_ISS(A) computes the logarithm of A, for a matrix with no
+%   nonpositive real eigenvalues, using the inverse scaling and squaring
+%   method with Pade approximation.
+%   Matrix square roots are computed by the product form of the
+%   Denman-Beavers teration.
+%   [X,K,M] = LOGM_ISS(A) returns the number K of square roots
+%   computed and the degree M of the Pade approximant.
+
+e = eig(A);
+if any( imag(e) == 0 & real(e) <= 0 )
+   error('A must not have any nonpositive real eigenvalues!')
+end
+
+n = length(A);
+
+load log_pade_err_opt  % mmax-by-3 matrix DATA.
+% mvals = data(:,1);
+xvals = data(:,2);
+
+X = A;
+k = 0; p = 0; itk = 5;
+
+while 1
+
+    normdiff = norm(X-eye(n),1);
+    if normdiff <= xvals(16)
+
+       p = p+1;
+       j1 = find(normdiff <= xvals(3:16));
+       j1 = j1(1) + 2;
+       j2 = find(normdiff/2 <= xvals(3:16));
+       j2 = j2(1) + 2;
+       if 2*(j1-j2)/3 < itk || p == 2, m = j1; break, end
+
+    end
+
+    [X,M,itk] = sqrtm_dbp(X,1); k = k+1;
+
+end
+
+X = 2^k*logm_pade_pf(X-eye(n),m);
+if isreal(A), X = real(X); end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/logm_pade_pf.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,32 @@
+function S = logm_pade_pf(A,m)
+%LOGM_PADE_PF   Evaluate Pade approximant to matrix log by partial fractions.
+%   Y = LOGM_PADE_PF(A,M) evaluates the [M/M] Pade approximation to
+%   LOG(EYE(SIZE(A))+A) using a partial fraction expansion.
+
+[nodes,wts] = gauss_legendre(m);
+% Convert from [-1,1] to [0,1].
+nodes = (nodes + 1)/2;
+wts = wts/2;
+
+n = length(A);
+S = zeros(n);
+
+for j=1:m
+    S = S + wts(j)*(A/(eye(n) + nodes(j)*A));
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function [x,w] = gauss_legendre(n)
+%GAUSS_LEGENDRE  Nodes and weights for Gauss-Legendre quadrature.
+%   [X,W] = GAUSS_LEGENDRE(N) computes the nodes X and weights W
+%   for N-point Gauss-Legendre quadrature.
+
+% Reference:
+% G. H. Golub and J. H. Welsch, Calculation of Gauss quadrature
+% rules, Math. Comp., 23(106):221-230, 1969.
+
+i = 1:n-1;
+v = i./sqrt((2*i).^2-1);
+[V,D] = eig( diag(v,-1)+diag(v,1) );
+x = diag(D);
+w = 2*(V(1,:)'.^2);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/mft_test.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,223 @@
+function mft_test(n)
+%MFT_TEST  Test the Matrix Function Toolbox.
+%   MFT_TEST(N) tests most of the functions in the
+%   Matrix Function Toolbox on (mainly) random matrices of order N.
+%   The default is N = 8.
+%   For a run time of a few seconds choose a small
+%   value of N (such as the default).
+%   Each invocation uses different random matrices.
+%   This is not a thorough suite of tests.
+
+%   For N much larger than 10 it may be necssary to adjust the fudge factors
+%   in this function and in MFT_TOLERANCE in order to achieve successful
+%   completion.
+
+if nargin < 1, n = 8; end
+
+fudge_factor = 200;
+A = randn(n);
+E = randn(n);
+B = A^2;  % No eigenvalues on nonpositive real axis.
+m = 2*n;
+c = randn(m,1);
+Ap = A'*A; Bp = A'*A; % Positive definite.
+
+C = randn(n);
+X = sylvsol(A,E,C);
+assert_small( (A*X+X*E-C)/((norm(A,1)+norm(E,1))*norm(X,1) + norm(C,1)))
+assert_eq(real(X),X)
+
+T = triu(randn(n)); U = triu(randn(n));
+X = sylvsol(T,U,C);
+assert_eq(real(X),X)
+assert_small( (T*X+X*U-C)/((norm(T,1)+norm(U,1))*norm(X,1) + norm(C,1)))
+
+if license('test','Symbolic_Toolbox') && ~isempty(ver('symbolic'))
+   % If Symbolic Math Toolbox licensed and installed...
+    for p = 2:6
+      J = gallery('jordbloc',p,0);
+      [d,a] = ascent_seq(J);
+      assert_eq(d,ones(p,1))
+      assert_eq(a,(0:p)')
+    end
+end
+
+A1 = 2.5*A/sqrt(norm(A^2,inf));
+for k = 1:3  % Try different norms.
+  C1 = cosm(A1);
+  [C,S] = cosmsinm(A1);
+  C0 = funm(A1,@cos);
+  S0 = funm(A1,@sin);
+  assert_sim(C,C1)
+  assert_sim(C,C0)
+  assert_sim(S,S0)
+  assert_small( (C^2+S^2-eye(n)) / (norm(C,1)^1+norm(S,1)^2) )
+  A1 = 2*A1;
+end
+
+L = expm_frechet_pade(A,E);
+R = expm_frechet_quad(A,E);
+assert_sim(L,R,1e-2)
+
+% Check identity $L_{\exp}\bigl(\log(A), L_{\log}(A,E)\bigr) = E$.
+L_log = logm_frechet_pade(B,E);
+L_exp = expm_frechet_pade(logm(B),L_log);
+assert_sim(L_exp,E,sqrt(eps)*norm(E,1))
+
+m = n-1;
+E = eye(m);
+E = E(:,m);
+[Q,H] = arnoldi(A,randn(n,1),m);
+res = A*Q(:,1:m) - Q(:,1:m)*H(1:m,1:m) - H(m+1,m)*Q(:,m+1)*E';
+assert_small(res/(norm(H,1)*norm(Q,1)))
+
+b = randn(n,1); y = fab_arnoldi(A,b,@exp,n);
+assert_sim(y,expm(A)*b)
+
+X = logm(B);
+X1 = logm_iss(B);
+tol = funm_condest1(B,@logm,@logm_frechet_pade)*eps*n;
+assert_sim(X,X1,tol)
+
+c1 = expm_cond(A);
+[c1a,K] = expm_cond(A);
+assert_sim(c1,c1a)
+st = randn('state');
+c2 = funm_condest_fro(A,@expm,@expm_frechet_pade);
+randn('state',st);
+c3 = funm_condest_fro(A,@expm,@fun_frechet_exp,[],1);
+assert_sim(c2,c3,0.5)
+fudge_factor1 = 2;
+if c2 < c1/10 || c2 > c1*fudge_factor1, [c2 c1 c2/c1], error('Failure'), end
+if c3 < c1/10 || c3 > c1*fudge_factor1, [c3 c1 c3/c1], error('Failure'), end
+
+c1 = funm_condest1(A,@expm);
+st = rand('twister');
+c2 = funm_condest1(A,@expm,@expm_frechet_pade);
+assert_sim(c1,c2,1)
+rand('twister',st);
+c3 = funm_condest1(A,@expm,@fun_frechet_exp,1);
+assert_sim(c2,c3,0.5)
+
+c1 = funm_condest_fro(B,@logm);
+c2 = funm_condest_fro(B,@logm,@logm_frechet_pade);
+assert_sim(c1,c2,1)
+
+[U1,H1] = polar_newton(A);
+assert_small((A-U1*H1)/norm(A,1))
+[U2,H2] = polar_svd(A);
+assert_small((A-U2*H2)/norm(A,1))
+assert_sim(U1,U2)
+assert_small((H1-H2)/norm(H1,1))
+assert_small(U1'*U1-eye(n))
+assert_small(U2'*U2-eye(n))
+assert_eq(H1,H1')
+assert_eq(H2,H2')
+
+A2 = randn(n+2,n);
+for i = 1:4
+   [U,H] = polar_svd(A2);
+   assert_small((A2-U*H)/norm(A2,1))
+   assert_small(U1'*U1-eye(n))
+   assert_eq(H,H')
+   A2 = A2';
+   if i == 2, A2(:,round(n/2)) = 0; end
+end
+
+P1 = polyvalm_ps(c,A);
+P2 = polyvalm(c,A);
+assert_small((P1-P2)/norm(P1,1))
+
+assert_sim(power_binary(A,m),A^m)
+
+X = riccati_xaxb(Ap,Bp);
+assert_small( (X*Ap*X-Bp) / (norm(X,1)^2*norm(Ap,1)+norm(Bp,1)) )
+assert_eq(X,X')
+
+for p = [2 5 10 16]
+  X = rootpm_real(B,p); assert_small( (X^p-B)/(norm(X,1)^p + norm(B,1)) );
+  X = rootpm_sign(B,p); assert_small( (X^p-B)/(norm(X,1)^p + norm(B,1)) );
+  [X,Y] = rootpm_schur_newton(B,p);
+  assert_small( (X^p-B)/(norm(X,1)^p + norm(B,1)) )
+  assert_small( (X*Y - eye(n))/(norm(X,1)*norm(Y,1)) )
+end
+
+[S,N] = signm(A);
+assert_small( (S^2 - eye(n))/(norm(S,1)^2+1) )
+assert_small( (A-S*N)/(norm(A,1)+norm(S,1)*norm(N,1)) )
+
+[X0,alpha,condest] = sqrtm(B);
+tol = n*norm(X0,1)*condest*eps;
+
+[P,Q] = sqrtm_db(B);
+assert_small( (P^2 - B)/(norm(P,1)^2+norm(B,1)) )
+assert_small( (Q^2 - inv(B))/(norm(Q,1)^2+norm(inv(B),1)) )
+assert_small(X0-P, tol)
+[P,Q] = sqrtm_dbp(B);
+assert_small( (P^2 - B)/(norm(P,1)^2+norm(B,1)) )
+assert_small( Q - eye(n) )
+assert_small(X0-P, tol)
+
+% Following usually succeeds but can fail:
+% only local cgce conditions are known for full Newton.
+% X = sqrtm_newton_full(B);
+% assert_small( (X^2 - B)/(norm(X,1)^2+norm(B,1)) )
+
+X = sqrtm_pd(Ap);
+assert_small( (X^2 - Ap)/(norm(X,1)^2+norm(Ap,1)) )
+assert_eq(X,X')
+
+C = full(gallery('tridiag',n,1,4,1));
+D = diag(diag(C));
+X = sqrtm_pulay(C,D);
+assert_small( (X^2 - C)/(norm(X,1)^2+norm(C,1)) )
+
+X = sqrtm_real(B);
+assert_small( (X^2 - B)/(norm(X,1)^2+norm(B,1)) )
+assert_small(X0-P, tol)
+
+T = schur(A,'complex');
+R = sqrtm_triang_min_norm(T);
+assert_small( (R^2 - T)/(norm(R,1)^2+norm(T,1)) )
+
+fprintf(['MFT_TEST: All tests of the Matrix Function Toolbox passed' ...
+        ' (n = %g).\n'], n)
+
+      % Nested functions
+
+      function L = fun_frechet_exp(flag,E)
+      % Frechet derivative of exponential.
+
+      if strcmp(flag,'transp'), E = E'; end
+      L = expm_frechet_pade(A,E);
+      if strcmp(flag,'transp'), L = L'; end
+
+      end
+
+      % ---------------------------------------------------------
+      % Assertion functions.
+
+      function assert_sim(a,b,tol)
+      if nargin < 3, tol = fudge_factor*eps(superiorfloat(a,b))*length(a); end
+      if norm(a-b,1)/max( norm(a,1), norm(b,1) ) > tol
+         fprintf('%9.2e, %9.2e\n', ...
+                  norm(a-b,1)/max( norm(a,1), norm(b,1)), tol )
+         error('Failure')
+      end
+      end
+
+      function assert_small(a,tol)
+      if nargin < 2, tol = fudge_factor*eps(class(a))*length(a); end
+      if norm(a,1) > tol
+         fprintf('%9.2e, %9.2e\n',norm(a,1), tol),
+         error('Failure')
+      end
+      end
+
+      function assert_eq(a,b)
+      if norm(a-b,1)
+         error('Failure')
+      end
+      end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/mft_tolerance.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,8 @@
+function tol = mft_tolerance(A)
+%MFT_TOLERANCE   Convergence tolerance for matrix iterations.
+%   TOL = MFT_TOLERANCE(A) returns a convergence tolerance to use in
+%   the matrix iterations in the Matrix Function Toolbox applied to the
+%   matrix A.  All functions in the toolbox call this function to set
+%   the convergence tolerance.
+
+tol = sqrt(length(A))*eps/2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/polar_newton.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,43 @@
+function [U,H,k] = polar_newton(A)
+%POLAR_NEWTON Polar decomposition by scaled Newton iteration.
+%   [U,H,k] = POLAR_NEWTON(A), where the matrix A is square and
+%   nonsingular, computes a unitary U and a Hermitian positive
+%   definite H such that A = U*H.   k is the number of iterations.
+%   A Newton iteration with acceleration parameters is used.
+
+accel_tol = 1e-2;  % Precise value not important.
+need_accel = 1;
+
+tol = mft_tolerance(A);
+
+X = A;
+reldiff = inf;
+maxit  = 16;
+
+for k = 1:maxit
+
+      Xold = X;
+      Xinv = inv(X);
+      if need_accel
+         g = ( norm(Xinv,1)*norm(Xinv,inf) / (norm(X,1)*norm(X,inf)) )^(1/4);
+      else
+         g = 1;
+      end
+      X = 0.5*(g*X + Xinv'/g);
+      reldiff_old = reldiff;
+      diff_F = norm(X-Xold,'fro');
+      reldiff = diff_F/norm(X,'fro');
+
+      if need_accel && (reldiff < accel_tol), need_accel = false; end
+      cged = (diff_F <= sqrt(tol)) || (reldiff > reldiff_old/2 && ~need_accel);
+      if cged, break, end
+
+      if k == maxit, error('Not converged after %2.0f iterations', maxit), end
+
+end
+
+U = X;
+if nargout >= 2
+   H = U'*A;
+   H = (H + H')/2;  % Force Hermitian by taking nearest Hermitian matrix.
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/polar_svd.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,18 @@
+function [U,H] = polar_svd(A)
+%POLAR_SVD   Canonical polar decomposition via singular value decomposition.
+%   [U,H] = POLAR_SVD(A) computes a matrix U of the same dimension
+%   (m-by-n) as A, and a Hermitian positive semi-definite matrix H,
+%   such that A = U*H.
+%   U is a partial isometry with range(U^*) = range(H).
+%   If A has full rank then U has orthonormal columns if m >= n
+%   and orthonormal rows if m <= n.
+%   U and H are computed via an SVD of A.
+
+[P,S,Q] = svd(A,'econ');
+U = P*Q';
+r = sum( diag(S) > norm(A,1)*eps/2 );
+U = P(:,1:r)*Q(:,1:r)';
+if nargout == 2
+   H = Q*S*Q';
+   H = (H + H')/2;      % Force Hermitian by taking nearest Hermitian matrix.
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/polyvalm_ps.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,48 @@
+function [P,s,cost] = polyvalm_ps(c,A,s)
+%POLYVALM_PS  Evaluate polynomial at matrix argument by Paterson-Stockmeyer alg.
+%   [P,S,COST] = POLYVALM_PS(C,A,S) evaluates the polynomial whose
+%   coefficients are the vector C at the matrix A using the
+%   Paterson-Stockmeyer algorithm.  If omitted, the integer parameter
+%   S is chosen automatically and its value is returned as an
+%   output argument.   COST is the number of matrix multiplications used.
+
+m = length(c)-1; % Degree of poly.
+c = c(end:-1:1); c = c(:);
+n = length(A);
+
+if nargin < 3
+   % Determine optimum parameter s.
+   s = ceil(sqrt(m));
+end
+r = floor(m/s);
+cost = s+r-(m==r*s)-1;
+
+% Apower{i+1} = A^i;
+Apower = cell(s+1);
+Apower{1} = eye(n);
+for i=2:s+1
+    Apower{i} = A*Apower{i-1};
+end
+
+B = cell(r+1);
+for k=0:r-1
+    temp = c(s*k+1)*eye(n);
+    for j=1:s-1
+        temp = temp + c(s*k+j+1)*Apower{j+1};
+    end
+    B{k+1} = temp;
+end
+B{r+1} = c(m+1)*Apower{m-s*r+1};
+for j=m-1:-1:s*r
+    if j == s*r
+       B{r+1} = B{r+1} + c(s*r+1)*eye(n);
+    else
+       B{r+1} = B{r+1} + c(j+1)*Apower{m-s*r-(m-j)+1};
+    end
+end
+
+As = Apower{s+1};
+P = zeros(n);
+for k=r:-1:0
+    P = P*As + B{k+1};
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/power_binary.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,22 @@
+function X = power_binary(A,m)
+%POWER_BINARY   Power of matrix by binary powering (repeated squaring).
+%   X = POWER_BINARY(A,m) computes A^m for a square matrix A and a
+%   positive integer m, by binary powering.
+
+s = double(dec2bin(m)) - 48;  % Binary representation of s in double array.
+k = length(s);
+
+P = A;
+i = k;
+while s(i) == 0
+      P = P^2;
+      i = i-1;
+end
+X = P;
+for j = i-1:-1:1
+    P = P^2;
+    if s(j) == 1
+       X = X*P;
+    end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/quasitriang_struct.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,31 @@
+function [m, s, k] = quasitriang_struct(R)
+%QUASITRIANG_STRUCT  Block structure of upper quasitriangular matrix.
+%   [M,S,K] = QUASITRIANG_STRUCT(R), where R is an upper
+%   quasitriangular matrix, determines that R has M diagonal blocks,
+%   the i'th of which has order S(i) and starting position K(i).
+%   Any subdiagonal elements less than the tolerance EPS*NORM(R,'FRO')
+%   are treated as zero.
+
+n = length(R);
+tol = eps*norm(R,'fro');
+
+i = 1; j = 1;
+
+while i < n
+      k(j) = i;
+      if abs(R(i+1,i)) <= tol
+         s(j) = 1;
+      else
+         s(j) = 2;
+      end
+      i = i + s(j);
+      j = j+1;
+end
+
+if i == n
+   k(j) = n;
+   s(j) = 1;
+   m = j;
+else
+   m = j-1;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/readme.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,18 @@
+echo on
+% Welcome to the Matrix Function Toolbox.
+% The primary source for this toolbox is
+%
+%     http://www.ma.man.ac.uk/~higham/mftoolbox
+%
+% The toolbox comprises the M-files in this directory.
+% It accompanies the book
+%
+% Nicholas J. Higham, Functions of Matrices: Theory and Computation,
+% SIAM, Philadelphia, PA, USA, 2008. ISBN 978-0-898716-46-7,
+%
+% which is the documentation for the toolbox.
+% In particular, Appendix D of the book describes the toolbox.
+%
+% For a descriptive list of M-files in the toolbox type
+%     help mft
+echo off
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/riccati_xaxb.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,15 @@
+function X = riccati_xaxb(A,B)
+%RICCATI_XAXB  Solve Riccati equation XAX = B in positive definite matrices.
+%   X = RICCATI_XAXB(A,B) is the Hermitian positive definite
+%   solution to XAX = B, where A and B are Hermitian positive
+%   definite matrices.
+
+R = chol(A);
+S = chol(B);
+
+U = polar_newton(S*R');
+X = R\(U'*S);
+X = (X + X')/2;
+
+% [U,H] = polar_newton(R*S'); % Variant derived in solution of Problem 6.21.
+% X = R\(U*S);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/rootpm_newton.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,29 @@
+function [X,k] = rootpm_newton(A,p,c)
+%ROOTPM_NEWTON  Coupled Newton iteration for matrix pth root.
+%   [X,k] = ROOTPM_NEWTON(A,P,C) computes the principal
+%   Pth root of the matrix A.
+%   C (default 1) is a convergence parameter.
+%   k is the number of iterations.
+
+if nargin < 3, c = 1; end
+
+n = length(A);
+M = A/c^p;
+X = c*eye(n);
+tol = mft_tolerance(A);
+maxit = 20;
+
+relres = inf;
+
+for k=1:maxit
+
+   X = ( ((p+1)*eye(n) - M)/p )\X;
+   M = power_binary( ((p+1)*eye(n) - M)/p, p) * M;
+
+   relres_old = relres;
+   relres = norm(M-eye(n),inf);
+
+   if relres <= tol || relres > relres_old/2, return, end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/rootpm_real.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,99 @@
+function X = rootpm_real(A,p)
+%ROOTPM_REAL  Pth root of real matrix via real Schur form.
+%   X = ROOTPM_REAL(A,P) is a Pth root of the nonsingular matrix A
+%   (A = X^P).  When A has no eigenvalues on the negative real axis
+%   then X is the principal pth root, which is real when A is real.
+
+%   Implementation is complicated by use of zero (and even "-1")
+%   subscripts in algorithm.  Dealt with by increasing "k" by 1
+%   each time and moving "-1" cases in front of loops.
+
+if norm(imag(A),1), error('A must be real.'), end
+if p == 1, X = A; return; end
+
+n = length(A);
+[Q,R] = schur(A,'real');
+
+% m blocks: i'th has order s(i), starts at t(i).
+[m,s,t] = quasitriang_struct(R);
+U = zeros(n); B = cell(p); V = cell(p-1);
+V{1} = eye(n);  % U^k == V{k+1}.
+
+for j=1:m
+    rj = t(j):t(j)+s(j)-1;
+    if s(j) == 1
+       % If A is real, X will be real unless R(p,p)<0 on the next line.
+       U(rj,rj) = R(rj,rj)^(1/p);
+    else
+       U(rj,rj) = root_block(R(rj,rj),p);
+    end
+    for k = 0:p-2, kk = k+1; V{kk}(rj,rj) = U(rj,rj)^(k+1); end
+    for i=j-1:-1:1
+        ri = t(i):t(i)+s(i)-1;
+        for k = 0:p-2
+            kk = k+1;
+            B{kk} = zeros(s(i),s(j));
+            for ell = i+1:j-1
+                rell = t(ell):t(ell)+s(ell)-1;
+                B{kk} = B{kk} + U(ri,rell)*V{kk}(rell,rj);
+            end
+        end
+        rhs = R(ri,rj) - B{p-2 +1};
+        for k = 0:p-3
+            rhs = rhs - V{p-3-k +1}(ri,ri)*B{k+1};
+        end
+        coeff = kron( eye(s(j)), V{p-2 +1}(ri,ri) ) ...
+               + kron( V{p-2 +1}(rj,rj).', eye(s(i)) );
+        for k = 1:p-2
+            coeff = coeff + kron( V{k-1 +1}(rj,rj).', V{p-2-k +1}(ri,ri) );
+        end
+        y = coeff\rhs(:);
+        rhs(:) = y;                  % `Un-vec' the solution.
+        U(ri,rj) = rhs;
+        for k = 0:p-2
+            if k == 0
+               S = U(ri,rj);
+            else
+               S = V{k-1 +1}(ri,ri)*U(ri,rj) + U(ri,rj)*V{k-1 +1}(rj,rj) ...
+                   + B{k-1 +1};
+               for ell = 1:k-1
+                   S = S + V{k-ell-1 +1}(ri,ri)*U(ri,rj)*V{ell-1 +1}(rj,rj);
+               end
+               for ell = 0:k-2
+                   S = S + V{k-2-ell +1}(ri,ri)*B{ell +1};
+               end
+            end
+            V{k+1}(ri,rj) = S;
+        end
+   end
+end
+
+X = Q*U*Q';
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function X = root_block(R,p)
+%ROOT_BLOCK  Pth root of a real 2x2 matrix with complex conjugate eigenvalues.
+
+if norm(imag(R)) ~= 0 || any(size(R) - [2,2])
+   error('Matrix must be real, of dimension 2.')
+end
+
+r11 = R(1,1); r12 = R(1,2);
+r21 = R(2,1); r22 = R(2,2);
+
+theta = (r11 + r22) / 2;
+musq = (-(r11 - r22)^2 - 4*r21*r12) / 4;
+mu = sqrt(musq);
+
+if musq <= 0
+   error('Matrix must have non-real complex conjugate eigenvalues.')
+end
+
+r = sqrt(theta^2+musq);
+phi = angle(complex(theta,mu));
+rootp = r^(1/p)*exp(i*phi/p);
+
+alpha = real(rootp);
+beta = imag(rootp);
+
+X = alpha*eye(2) + (beta/mu)*(R - theta*eye(2));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/rootpm_schur_newton.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,66 @@
+function [X,Y] = rootpm_schur_newton(A,p)
+%ROOTPM_SCHUR_NEWTON  Matrix pth root by Schur-Newton method.
+%   [X,Y] = ROOTPM_SCHUR_NEWTON(A,p) computes the principal pth root
+%   X of the real matrix A using the Schur-Newton algorithm.
+%   It also returns Y = INV(X).
+
+if norm(imag(A),1), error('A must be real.'), end
+A = real(A);   % Discard any zero imaginary part.
+[Q,R] = schur(A,'real'); % Quasitriangular R.
+
+e = eig(R);
+if any (e(find(e == real(e))) < 0 )
+   error('A has a negative real eigenvalue: principal pth root not defined')
+end
+
+f = factor(p);
+k0 = length(find(f == 2)); % Number of factors 2.
+q = p/2^(k0);
+k1 = k0;
+if q > 1
+
+   emax = max(abs(e)); emin = min(abs(e));
+   if emax > emin % Avoid log(0).
+      k1 = max(k1, ceil( log2( log2(emax/emin) ) ));
+   end
+
+   max_arg = norm(angle(e),inf);
+   if max_arg > pi/8
+      k3 = 1;
+      if max_arg > pi/2
+         k3 = 3;
+      elseif max_arg > pi/4
+         k3 = 2;
+      end
+      k1 = max(k1,k3);
+   end
+
+end
+
+for i = 1:k1, R = sqrtm_real(R); end
+
+
+if q ~= 1
+   pw = 2^(-k1); emax = emax^pw; emin = emin^pw;
+   if ~any(imag(e))
+     % Real eigenvalues.
+     if emax > emin
+        alpha = emax/emin;
+        c = ( (alpha^(1/q)*emax-emin)/( (alpha^(1/q)-1)*(q+1) ) )^(1/q);
+     else
+        c = emin^(1/q);
+     end
+   else
+     % Complex eigenvalues.
+     c = (( emax+emin)/2 )^(1/q);
+   end
+
+   X = rootpm_newton(R,q,c);
+   for i = 1:k1-k0
+       X = X*X;
+   end
+else
+   X = R;
+end
+Y = Q*(X\Q');  % Return inverse pth root, too.
+X = Q*X*Q';
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/rootpm_sign.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,47 @@
+function X = rootpm_sign(A,p)
+%ROOTPM_SIGN  Matrix Pth root via matrix sign function.
+%   X = ROOTPM_SIGN(A,P) computes the principal Pth root X
+%   of the matrix A using the matrix sign function and a block
+%   companion matrix approach.
+
+if p == 1, X = A; return, end
+
+n = length(A);
+podd = rem(p,2);
+
+Y = A;
+
+if podd
+    p = 2*p;  % Compensate by squaring at end.
+else
+    while mod(p,4) == 0
+        Y = sqrtm(Y);
+        p = round(p/2);
+    end
+end
+
+if p == 2
+    X = sqrtm(Y);
+    return
+end
+
+% Form C, the block companion matrix.
+C = zeros(p*n);
+C(end-n+1:end, 1:n) = Y;
+C = C + diag(ones(n*(p-1),1),n);
+
+S = signm(C);
+
+X = S(n+1:2*n,1:n);
+
+% Scale factor.
+c = 0;
+for l = 1:floor(p/4)
+    c = c+cos(2*pi*l/p);
+end
+c = c*4+2;
+c = c/p;
+X = X/c;
+if podd
+    X = X*X;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/signm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,47 @@
+function [S,N] = signm(A)
+%SIGNM   Matrix sign decomposition.
+%   [S,N] = SIGNM(A) is the matrix sign decomposition A = S*N,
+%   computed via the Schur decomposition.
+%   S is the matrix sign function, sign(A).
+
+[Q, T] = schur(A,'complex');
+S = Q * matsignt(T) * Q';
+
+if nargout == 2
+   N = S*A;
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+function S = matsignt(T)
+%MATSIGNT    Matrix sign function of a triangular matrix.
+%   S = MATSIGN(T) computes the matrix sign function S of the
+%   upper triangular matrix T using a recurrence.
+
+n = length(T);
+S = diag( sign( diag(real(T)) ) );
+for p = 1:n-1
+   for i = 1:n-p
+
+      j = i+p;
+      d = T(j,j) - T(i,i);
+
+      if S(i,i) ~= -S(j,j)  % Solve via S^2 = I if we can.
+
+         % Get S(i,j) from S^2 = I.
+         k = i+1:j-1;
+         S(i,j) = -S(i,k)*S(k,j) / (S(i,i)+S(j,j));
+
+      else
+
+         % Get S(i,j) from S*T = T*S.
+         s = T(i,j)*(S(j,j)-S(i,i));
+         if p > 1
+            k = i+1:j-1;
+            s = s + T(i,k)*S(k,j) - S(i,k)*T(k,j);
+         end
+         S(i,j) = s/d;
+
+      end
+
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/signm_newton.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,53 @@
+function [X,k] = signm_newton(A,scal)
+%SIGNM_NEWTON   Matrix sign function by Newton iteration.
+%   [S,k] = SIGNM_NEWTON(A,SCAL) computes the matrix sign
+%   function S of A using the scaled Newton iteration, with
+%   scale factors specified by SCAL:
+%   SCAL = 0: no scaling.
+%   SCAL = 1: determinantal scaling (default).
+%   SCAL = 2: spectral scaling.
+%   SCAL = 3: norm scaling.
+%   k is the number of iterations.
+
+if nargin < 2, scal = 1; end
+
+tol = mft_tolerance(A);
+accel_tol = 1e-2;  %  Precise value not important.
+need_accel = 1;
+n = length(A);
+maxit = 16;
+
+X = A;
+reldiff = inf;
+
+for k = 1:maxit
+
+      Xold = X;
+      Xinv = inv(X);
+      if need_accel && scal > 0
+         % In practice should estimate spectral radius and 2-norms;
+         % here they are computed exactly.
+         switch scal
+         case 1
+              g = abs(det(X))^(-1/n);
+         case 2
+              s1 = max(abs(eig(Xinv)));
+              s2 = max(abs(eig(X)));
+              g = sqrt(s1/s2);
+         case 3
+              g = sqrt( norm(Xinv) / (norm(X)) );
+         end
+         X = g*X; Xinv = Xinv/g;
+      end
+      X = 0.5*(X + Xinv);
+      diff_F = norm(X-Xold,'fro');
+      reldiff_old = reldiff;
+      reldiff = diff_F/norm(X,'fro');
+
+      if need_accel && (reldiff < accel_tol), need_accel = false; end
+      cged = (diff_F <= sqrt( tol*norm(X)/norm(Xinv) ) || ...
+              reldiff > reldiff_old/2 && ~need_accel);
+      if cged, return, end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_db.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,43 @@
+function [P,Q,k] = sqrtm_db(A,scale)
+%SQRTM_DB   Matrix square root by Denman-Beavers iteration.
+%   [P,Q,k] = SQRTM_DB(A,SCAL) computes the principal square root
+%   P of the matrix A using the Denman-Beavers iteration.
+%   It also returns Q = INV(P).
+%   SCAL specifies the scaling:
+%   SCAL == 0, no scaling.
+%   SCAL == 1, determinantal scaling (default).
+%   k is the number of iterations.
+
+n = length(A);
+if nargin < 2, scale = 1; end
+
+tol = mft_tolerance(A);
+P = A;
+Q = eye(n);
+reldiff = inf;
+maxit = 25;
+
+for k = 1:maxit
+
+   if scale == 1
+       g = (abs(det(P)*det(Q)))^(-1/(2*n));
+       P = g*P; Q = g*Q;
+   end
+
+   Pold = P;
+
+   Poldinv = inv(Pold);
+   P = (P + inv(Q))/2;
+   Q = (Q + inv(Pold))/2;
+
+   diff_F = norm(P-Pold,'fro');
+   reldiff_old = reldiff;
+   reldiff = diff_F/norm(P,'fro');
+   if reldiff < 1e-2, scale = 0; end  % Switch to no scaling.
+
+   cged = (diff_F <= sqrt( tol*norm(P)/norm(Poldinv) ) || ...
+           reldiff > reldiff_old/2 && ~scale);
+   if cged, return, end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_dbp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,39 @@
+function [X,M,k] = sqrtm_dbp(A,scale)
+%SQRTM_DBP  Matrix square root by product form of Denman-Beavers iteration.
+%   [X,M,k] = SQRTM_DBP(A,SCAL) computes the principal square root X
+%   of the matrix A using the product form of the Denman-Beavers
+%   iteration. The matrix M tends to EYE.
+%   SCAL specifies the scaling:
+%        SCAL == 0, no scaling.
+%        SCAL == 1, determinantal scaling (default).
+%   k is the number of iterations.
+
+n = length(A);
+if nargin < 2, scale = 1; end
+
+tol = mft_tolerance(A);
+X = A;
+M = A;
+maxit = 25;
+
+for k = 1:maxit
+
+   if scale == 1
+       g = (abs(det(M)))^(-1/(2*n));
+       X = g*X; M = g^2*M;
+   end
+
+   Xold = X; invM = inv(M);
+
+   X = X*(eye(n) + invM)/2;
+   M = 0.5*(eye(n) + (M + invM)/2);
+
+   Mres = norm(M - eye(n),'fro');
+
+   reldiff = norm(X - Xold,'fro')/norm(X,'fro');
+   if reldiff < 1e-2, scale = 0; end  % Switch to no scaling.
+
+   if Mres <= tol, return; end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_newton.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,36 @@
+function [X,k] = sqrtm_newton(A,scal,maxit)
+%SQRTM_NEWTON  Matrix square root by Newton iteration (unstable).
+%   [X,k] = SQRTM_NEWTON(A,SCAL,MAXIT) computes the principal
+%   square root X of the matrix A by an unstable Newton iteration.
+%   SCAL specifies whether scaling is used (SCAL = 1) or not
+%   (SCAL = 0, default).
+%   MAXIT (default 25) is the maximum number of iterations allowed.
+%   k is the number of iterations.
+
+n = length(A);
+if nargin < 2, scal = 0; end
+if nargin < 3, maxit = 25; end
+
+tol = mft_tolerance(A);
+accel_tol = 1e-2;  %  Precise value not important.
+need_accel = 1;
+
+X = A;
+
+for k = 1:maxit
+
+   if need_accel && scal > 0
+      mu = (abs(det(X))/sqrt(abs(det(A))))^(-1/n);
+      X = mu*X;
+   end
+
+   Xold = X;
+   X = (X + X\A)/2;
+
+   reldiff = norm(X-Xold,'fro')/norm(X,'fro');
+   if need_accel && (reldiff < accel_tol), need_accel = false; end
+
+   if reldiff <= tol, return, end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_newton_full.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,26 @@
+function [X,k] = sqrtm_newton_full(A, X0)
+%SQRTM_NEWTON_FULL   Matrix square root by full Newton method.
+%   [X,K] = SQRTM_NEWTON_FULL(A, X0) applies Newton's method to
+%   compute a square root X of the matrix A, with starting matrix X0.
+%   Default: X0 = A.  K is the number of iterations.
+
+if nargin < 2, X0 = A; end
+
+X = X0;
+tol = mft_tolerance(A);
+maxit = 50;
+
+for k = 1:maxit
+
+   Xold = X;
+   R = A - X^2;
+   % Solve XE + EX = R.
+   E = sylvsol(X,X,R);
+   X = X + E;
+
+   reldiff = norm(X - Xold,inf)/norm(X,inf);
+
+   if reldiff <= tol; return; end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_pd.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,10 @@
+function [X,k] = sqrtm_pd(A)
+%SQRTM_PD    Square root of positive definite matrix via polar decomposition.
+%   [X,K] = SQRT_PD(A) computes the Hermitian positive definite
+%   square root X of the Hermitian positive definite matrix A.
+%   It computes the Hermitian polar factor of the Cholesky factor of A.
+%   K is the number of Newton polar iterations used.
+
+R = chol(A);
+[U,H,k] = polar_newton(R);
+X = H;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_pulay.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,35 @@
+function [X,k] = sqrtm_pulay(A,D)
+%SQRTM_PULAY   Matrix square root by Pulay iteration.
+%   [X,K] = SQRTM_PULAY(A,D) computes the principal square root of the
+%   matrix A using the Pulay iteration with diagonal matrix D
+%   (default: D = DIAG(DIAG(A))).  D must have positive diagonal entries.
+%   K is the number of iterations.
+%   Note: this iteration is linearly converent and converges only when
+%         SQRTM(D) sufficiently well approximates SQRTM(A).
+
+if nargin < 2, D = diag(diag(A)); end
+
+if any(diag(D)<0) || ~isreal(D)
+   error('D must have positive, real diagonal.')
+end
+
+n = length(A);
+dhalf = sqrt(diag(D));
+Dhalf = diag(dhalf);
+B = zeros(n);
+maxit = 50;
+
+tol = mft_tolerance(A);
+
+for k = 1:maxit
+
+   Bold = B;
+   B = (A - D - Bold^2) ./ (dhalf(:,ones(1,n)) + dhalf(:,ones(1,n))');
+   X = Dhalf + B;
+
+   reldiff = norm(B - Bold,inf)/norm(X,inf);
+
+   if reldiff <= tol, return, end
+
+end
+error('Not converged after %2.0f iterations', maxit)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_real.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,62 @@
+function X = sqrtm_real(A)
+%SQRTM_REAL Square root of real matrix by real Schur method.
+%   X = SQRTM_REAL(A) is the principal square root of the real matrix A,
+%   computed in real arithmetic by the real Schur method.
+%   X is real unless A has a real negative eigenvalue
+%   (in this case a real primary square root does not exist).
+
+if norm(imag(A),1), error('A must be real.'), end
+A = real(A);   % Discard any zero imaginary part.
+n = length(A);
+[Q,R] = schur(A,'real'); % Quasitriangular R.
+
+% m blocks: i'th has order s(i), starts at t(i).
+[m, s, k] = quasitriang_struct(R);
+T = zeros(n);
+
+for j=1:m
+    p = k(j):k(j)+s(j)-1;
+    if s(j) == 1
+       % If A is real, X will be real unless R(p,p)<0 on the next line.
+       T(p,p) = sqrt(R(p,p));
+    else
+       T(p,p) = rsqrt2(R(p,p));
+    end
+    for r=j-1:-1:1
+        rind = k(r):k(r)+s(r)-1;
+        rj = k(r+1):k(j)-1;
+        if ~isempty(rj)
+           prod = T(rind,rj)*T(rj,p);  % Gives [] when rj = [].
+        else
+           prod = zeros(s(r),s(j));
+        end
+        B = R(rind,p) - prod;
+        % NB Unconjugated transpose on next line for complex case.
+        A = kron( eye(s(j)), T(rind,rind) ) + kron( T(p,p).', eye(s(r)) );
+        y = A\B(:);
+        B(:) = y;                      % `Un-vec' the solution.
+        T(rind,p) = B;
+   end
+end
+
+X = Q*T*Q';
+
+%%%%%%%%%%%%%%%%%%%%%%
+function X = rsqrt2(R)
+%RSQRT2  Real square root of a real 2x2 matrix with complex conjugate
+%        eigenvalues.
+
+r11 = R(1,1); r12 = R(1,2);
+r21 = R(2,1); r22 = R(2,2);
+
+theta = (r11 + r22) / 2;
+musq = (-(r11 - r22)^2 - 4*r21*r12) / 4;
+mu = sqrt(musq);
+
+if theta > 0
+   alpha = sqrt( (theta + sqrt(theta^2+musq))/2 );
+else
+   alpha = mu / sqrt( 2*(-theta + sqrt(theta^2+musq)) );
+end
+
+X = (alpha-theta/(2*alpha)) * eye(2) + R/(2*alpha);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sqrtm_triang_min_norm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,25 @@
+function R = sqrtm_triang_min_norm(T)
+%SQRTM_TRIANG_MIN_NORM  Estimated min norm square root of triangular matrix.
+%   R = SQRTM_TRIANG_MIN_NORM(T) computes a primary square root of the
+%   upper triangular matrix T and attempts to minimize its 1-norm.
+
+if ~isequal(T,triu(T)), error('T must be upper triangular'), end
+
+n = length(T);
+rp = zeros(n,1);
+rm = zeros(n,1);
+
+R = zeros(n);
+for j=1:n
+    rp(j) = sqrt(T(j,j));
+    rm(j) = -sqrt(T(j,j));
+    for i=j-1:-1:1
+        rp(i) = (T(i,j) - R(i,i+1:j-1)*rp(i+1:j-1))/(R(i,i) + rp(j));
+        rm(i) = (T(i,j) - R(i,i+1:j-1)*rm(i+1:j-1))/(R(i,i) + rm(j));
+    end
+    if norm(rp(1:j),1) <= norm(rm(1:j),1)
+       R(1:j,j) = rp(1:j);
+    else
+       R(1:j,j) = rm(1:j);
+    end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mftoolbox/sylvsol.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,39 @@
+function X = sylvsol(A,B,C)
+%SYLVSOL  Solve Sylvester equation.
+%   X = SYLSOL(A, B, C) is the solution to the Sylvester equation
+%   AX + XB = C, where A is m-by-m, B is n-by-n and C is m-by-n.
+%   Schur decompositions are used to convert to a (quasi)-triangular
+%   system.
+
+%   Reference:
+%   R. H. Bartels and G. W. Stewart.
+%   Algorithm 432: Solution of the matrix equation AX+XB=C.
+%   Comm. ACM, 15(9):820-826, 1972.
+
+[m,m] = size(A);
+[n,n] = size(B);
+
+realdata = (isreal(A) && isreal(B) && isreal(C));
+if ~isequal(A,triu(A)) || ~isequal(B,triu(B))
+
+   [Q, T] = schur(A,'complex');
+   [P, U] = schur(B,'complex');
+   C = Q'*C*P;
+   schur_red = 1;
+
+else
+
+   schur_red = 0;
+   T = A; U = B;
+
+end
+
+X = zeros(m,n);
+
+% Forward substitution.
+for i = 1:n
+    X(:,i) = (T + U(i,i)*eye(m)) \ (C(:,i) - X(:,1:i-1)*U(1:i-1,i));
+end
+
+if schur_red, X = Q*X*P'; end
+if realdata, X = real(X); end
Binary file mftoolbox/tau_r8_zeros.mat has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/augment.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,37 @@
+function C = augment(A, alpha)
+%AUGMENT  Augmented system matrix.
+%         AUGMENT(A, ALPHA) is the square matrix
+%         [ALPHA*EYE(m) A; A' ZEROS(n)] of dimension m+n, where A is m-by-n.
+%         It is the symmetric and indefinite coefficient matrix of the
+%         augmented system associated with a least squares problem
+%         minimize NORM(A*x-b).  ALPHA defaults to 1.
+%         Special case: if A is a scalar, n say, then AUGMENT(A) is the
+%                       same as AUGMENT(RANDN(p,q)) where n = p+q and
+%                       p = ROUND(n/2), that is, a random augmented matrix
+%                       of dimension n is produced.
+%         The eigenvalues of AUGMENT(A) are given in terms of the singular
+%         values s(i) of A (where m>n) by
+%                  1/2 +/- SQRT( s(i)^2 + 1/4 ),  i=1:n  (2n eigenvalues),
+%                  1,  (m-n eigenvalues).
+%         If m < n then the first expression provides 2m eigenvalues and the
+%         remaining n-m eigenvalues are zero.
+%
+%         See also SPAUGMENT.
+
+%         Reference:
+%         G.H. Golub and C.F. Van Loan, Matrix Computations, Second
+%         Edition, Johns Hopkins University Press, Baltimore, Maryland,
+%         1989, sec. 5.6.4.
+
+[m, n] = size(A);
+if nargin < 2, alpha = 1; end
+
+if max(m,n) == 1
+   n = A;
+   p = round(n/2);
+   q = n - p;
+   A = randn(p,q);
+   m = p; n = q;
+end
+
+C = [alpha*eye(m) A; A' zeros(n)];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/bandred.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,55 @@
+function A = bandred(A, kl, ku)
+%BANDRED  Band reduction by two-sided unitary transformations.
+%         B = BANDRED(A, KL, KU) is a matrix unitarily equivalent to A
+%         with lower bandwidth KL and upper bandwidth KU
+%         (i.e. B(i,j) = 0 if i > j+KL or j > i+KU).
+%         The reduction is performed using Householder transformations.
+%         If KU is omitted it defaults to KL.
+
+%         Called by RANDSVD.
+%         This is a `standard' reduction.  Cf. reduction to bidiagonal form
+%         prior to computing the SVD.  This code is a little wasteful in that
+%         it computes certain elements which are immediately set to zero!
+%
+%         Reference:
+%         G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
+%         Johns Hopkins University Press, Baltimore, Maryland, 1989.
+%         Section 5.4.3.
+
+if nargin == 2, ku = kl; end
+
+if kl == 0 & ku == 0
+   error('You''ve asked for a diagonal matrix.  In that case use the SVD!')
+end
+
+% Check for special case where order of left/right transformations matters.
+% Easiest approach is to work on the transpose, flipping back at the end.
+flip = 0;
+if ku == 0
+   A = A';
+   temp = kl; kl = ku; ku = temp; flip = 1;
+end
+
+[m, n] = size(A);
+
+for j=1 : min( min(m,n), max(m-kl-1,n-ku-1) )
+
+    if j+kl+1 <= m
+       [v, beta] = house(A(j+kl:m,j));
+       temp = A(j+kl:m,j:n);
+       A(j+kl:m,j:n) = temp - beta*v*(v'*temp);
+       A(j+kl+1:m,j) = zeros(m-j-kl,1);
+    end
+
+    if j+ku+1 <= n
+       [v, beta] = house(A(j,j+ku:n)');
+       temp = A(j:m,j+ku:n);
+       A(j:m,j+ku:n) = temp - beta*(temp*v)*v';
+       A(j,j+ku+1:n) = zeros(1,n-j-ku);
+    end
+
+end
+
+if flip
+   A = A';
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/cauchy.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,39 @@
+function C = cauchy(x, y)
+%CAUCHY  Cauchy matrix.
+%        C = CAUCHY(X, Y), where X, Y are N-vectors, is the N-by-N matrix
+%        with C(i,j) = 1/(X(i)+Y(j)).   By default, Y = X.
+%        Special case: if X is a scalar CAUCHY(X) is the same as CAUCHY(1:X).
+%        Explicit formulas are known for DET(C) (which is nonzero if X and Y
+%        both have distinct elements) and the elements of INV(C).
+%        C is totally positive if 0 < X(1) < ... < X(N) and
+%        0 < Y(1) < ... < Y(N).
+
+%        References:
+%        N.J. Higham, Accuracy and Stability of Numerical Algorithms,
+%           Society for Industrial and Applied Mathematics, Philadelphia, PA,
+%           USA, 1996; sec. 26.1.
+%        D.E. Knuth, The Art of Computer Programming, Volume 1,
+%           Fundamental Algorithms, second edition, Addison-Wesley, Reading,
+%           Massachusetts, 1973, p. 36.
+%        E.E. Tyrtyshnikov, Cauchy-Toeplitz matrices and some applications,
+%           Linear Algebra and Appl., 149 (1991), pp. 1-18.
+%        O. Taussky and M. Marcus, Eigenvalues of finite matrices, in
+%           Survey of Numerical Analysis, J. Todd, ed., McGraw-Hill, New York,
+%           pp. 279-313, 1962. (States the totally positive property on p. 295.)
+
+n = max(size(x));
+%  Handle scalar x.
+if n == 1
+   n = x;
+   x = 1:n;
+end
+
+if nargin == 1, y = x; end
+
+x = x(:); y = y(:);   % Ensure x and y are column vectors.
+if any(size(x) ~= size(y))
+   error('Parameter vectors must be of same dimension.')
+end
+
+C = x*ones(1,n) + ones(n,1)*y.';
+C = ones(n) ./ C;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/chebspec.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,53 @@
+function C = chebspec(n, k)
+%CHEBSPEC  Chebyshev spectral differentiation matrix.
+%          C = CHEBSPEC(N, K) is a Chebyshev spectral differentiation
+%          matrix of order N.  K = 0 (the default) or 1.
+%          For K = 0 (`no boundary conditions'), C is nilpotent, with
+%              C^N = 0 and it has the null vector ONES(N,1).
+%              C is similar to a Jordan block of size N with eigenvalue zero.
+%          For K = 1, C is nonsingular and well-conditioned, and its eigenvalues
+%              have negative real parts.
+%          For both K, the computed eigenvector matrix X from EIG is
+%              ill-conditioned (MESH(REAL(X)) is interesting).
+
+%          References:
+%          C. Canuto, M.Y. Hussaini, A. Quarteroni and T.A. Zang, Spectral
+%             Methods in Fluid Dynamics, Springer-Verlag, Berlin, 1988; p. 69.
+%          L.N. Trefethen and M.R. Trummer, An instability phenomenon in
+%             spectral methods, SIAM J. Numer. Anal., 24 (1987), pp. 1008-1023.
+%          D. Funaro, Computing the inverse of the Chebyshev collocation
+%             derivative, SIAM J. Sci. Stat. Comput., 9 (1988), pp. 1050-1057.
+
+if nargin == 1, k = 0; end
+
+% k = 1 case obtained from k = 0 case with one bigger n.
+if k == 1, n = n + 1; end
+
+n = n-1;
+C = zeros(n+1);
+
+one = ones(n+1,1);
+x = cos( (0:n)' * (pi/n) );
+d = ones(n+1,1); d(1) = 2; d(n+1) = 2;
+
+% eye(size(C)) on next line avoids div by zero.
+C = (d * (one./d)') ./ (x*one'-one*x' + eye(size(C)));
+
+%  Now fix diagonal and signs.
+
+C(1,1) = (2*n^2+1)/6;
+for i=2:n+1
+    if rem(i,2) == 0
+       C(:,i) = -C(:,i);
+       C(i,:) = -C(i,:);
+    end
+    if i < n+1
+       C(i,i) = -x(i)/(2*(1-x(i)^2));
+    else
+       C(n+1,n+1) = -C(1,1);
+    end
+end
+
+if k == 1
+   C = C(2:n+1,2:n+1);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/chebvand.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,34 @@
+function C = chebvand(m,p)
+%CHEBVAND  Vandermonde-like matrix for the Chebyshev polynomials.
+%          C = CHEBVAND(P), where P is a vector, produces the (primal)
+%          Chebyshev Vandermonde matrix based on the points P,
+%          i.e., C(i,j) = T_{i-1}(P(j)), where T_{i-1} is the Chebyshev
+%          polynomial of degree i-1.
+%          CHEBVAND(M,P) is a rectangular version of CHEBVAND(P) with M rows.
+%          Special case: If P is a scalar then P equally spaced points on
+%                        [0,1] are used.
+
+%           Reference:
+%           N.J. Higham, Stability analysis of algorithms for solving confluent
+%           Vandermonde-like systems, SIAM J. Matrix Anal. Appl., 11 (1990),
+%           pp. 23-41.
+
+if nargin == 1, p = m; end
+n = max(size(p));
+
+%  Handle scalar p.
+if n == 1
+   n = p;
+   p = seqa(0,1,n);
+end
+
+if nargin == 1, m = n; end
+
+p = p(:).';                    % Ensure p is a row vector.
+C = ones(m,n);
+if m == 1, return, end
+C(2,:) = p;
+%      Use Chebyshev polynomial recurrence.
+for i=3:m
+    C(i,:) = 2.*p.*C(i-1,:) - C(i-2,:);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/cholp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,69 @@
+function [R, P, I] = cholp(A, piv)
+%CHOLP  Cholesky factorization with pivoting of a pos. semidefinite matrix.
+%       [R, P] = CHOLP(A) returns R and a permutation matrix P such that
+%       R'*R = P'*A*P.  Only the upper triangular part of A is used.
+%       If A is not positive definite, an error message is printed.
+%
+%       [R, P, I] = CHOLP(A) never produces an error message.
+%       If A is positive definite then I = 0 and R is the Cholesky factor.
+%       If A is not positive definite then I is positive and
+%       R is (I-1)-by-N with P'*A*P - R'*R zeros in columns 1:I-1 and
+%       rows 1:I-1.
+%       [R, I] = CHOLP(A, 0) forces P = EYE(SIZE(A)), and therefore behaves
+%       like [R, I] = CHOL(A).
+
+%       This routine is based on the LINPACK routine CCHDC.  It works
+%       for both real and complex matrices.
+%
+%       Reference:
+%       G.H. Golub and C.F. Van Loan, Matrix Computations, Second
+%       Edition, Johns Hopkins University Press, Baltimore, Maryland,
+%       1989, sec. 4.2.9.
+
+if nargin == 1, piv = 1; end
+
+n = length(A);
+pp = 1:n;
+I = 0;
+
+for k = 1:n
+
+    if piv
+       d = diag(A);
+       [big, m] = max( d(k:n) );
+       m = m+k-1;
+    else
+       big = A(k,k);  m = k;
+    end
+    if big <= 0, I = k; break, end
+
+%   Symmetric row/column permutations.
+    if m ~= k
+       A(:, [k m]) = A(:, [m k]);
+       A([k m], :) = A([m k], :);
+       pp( [k m] ) = pp( [m k] );
+    end
+
+    A(k,k) = sqrt( A(k,k) );
+    if k == n, break, end
+    A(k, k+1:n) = A(k, k+1:n) / A(k,k);
+
+%   For simplicity update the whole of the remaining submatrix (rather
+%   than just the upper triangle).
+
+    j = k+1:n;
+    A(j,j) = A(j,j) - A(k,j)'*A(k,j);
+
+end
+
+R = triu(A);
+if I > 0
+    if nargout < 3, error('Matrix must be positive definite.'), end
+    R = R(1:I-1,:);
+end
+
+if piv == 0
+   P = I;
+else
+   P = eye(n); P = P(:,pp);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/chop.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,18 @@
+function c = chop(x, t)
+%CHOP    Round matrix elements.
+%        CHOP(X, t) is the matrix obtained by rounding the elements of X
+%        to t significant binary places.
+%        Default is t = 24, corresponding to IEEE single precision.
+
+if nargin < 2, t = 24; end
+[m, n] = size(x);
+
+%  Use the representation:
+%  x(i,j) = 2^e(i,j) * .d(1)d(2)...d(s) * sign(x(i,j))
+
+%  On the next line `+(x==0)' avoids passing a zero argument to LOG, which
+%  would cause a warning message to be generated.
+
+y = abs(x) + (x==0);
+e = floor(log2(y) + 1);
+c = pow2(round( pow2(x, t-e) ), e-t);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/chow.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function A = chow(n, alpha, delta)
+%CHOW    Chow matrix - a singular Toeplitz lower Hessenberg matrix.
+%        A = CHOW(N, ALPHA, DELTA) is a Toeplitz lower Hessenberg matrix
+%        A = H(ALPHA) + DELTA*EYE, where H(i,j) = ALPHA^(i-j+1).
+%        H(ALPHA) has p = FLOOR(N/2) zero eigenvalues, the rest being
+%        4*ALPHA*COS( k*PI/(N+2) )^2, k=1:N-p.
+%        Defaults: ALPHA = 1, DELTA = 0.
+
+%        References:
+%        T.S. Chow, A class of Hessenberg matrices with known
+%           eigenvalues and inverses, SIAM Review, 11 (1969), pp. 391-395.
+%        G. Fairweather, On the eigenvalues and eigenvectors of a class of
+%           Hessenberg matrices, SIAM Review, 13 (1971), pp. 220-221.
+%        I. Singh, G. Poole and T. Boullion, A class of Hessenberg matrices
+%           with known pseudoinverse and Drazin inverse, Math. Comp.,
+%           29 (1975), pp. 615-619.
+
+if nargin < 3, delta = 0; end
+if nargin < 2, alpha = 1; end
+
+A = toeplitz( alpha.^(1:n), [alpha 1 zeros(1,n-2)] ) + delta*eye(n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/circul.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,25 @@
+function C = circul(v)
+%CIRCUL  Circulant matrix.
+%        C = CIRCUL(V) is the circulant matrix whose first row is V.
+%        (A circulant matrix has the property that each row is obtained
+%        from the previous one by cyclically permuting the entries one step
+%        forward; it is a special Toeplitz matrix in which the diagonals
+%        `wrap round'.)
+%        Special case: if V is a scalar then C = CIRCUL(1:V).
+%        The eigensystem of C (N-by-N) is known explicitly.   If t is an Nth
+%        root of unity, then the inner product of V with W = [1 t t^2 ... t^N]
+%        is an eigenvalue of C, and W(N:-1:1) is an eigenvector of C.
+
+%        Reference:
+%        P.J. Davis, Circulant Matrices, John Wiley, 1977.
+
+n = max(size(v));
+
+if n == 1
+   n = v;
+   v = 1:n;
+end
+
+v = v(:).';   % Make sure v is a row vector.
+
+C = toeplitz( [ v(1) v(n:-1:2) ], v );
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/clement.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function A = clement(n, k)
+%CLEMENT   Clement matrix - tridiagonal with zero diagonal entries.
+%          CLEMENT(N, K) is a tridiagonal matrix with zero diagonal entries
+%          and known eigenvalues.  It is singular if N is odd.  About 64
+%          percent of the entries of the inverse are zero.  The eigenvalues
+%          are plus and minus the numbers N-1, N-3, N-5, ..., (1 or 0).
+%          For K = 0 (the default) the matrix is unsymmetric, while for
+%          K = 1 it is symmetric.
+%          CLEMENT(N, 1) is diagonally similar to CLEMENT(N).
+
+%          Similar properties hold for TRIDIAG(X,Y,Z) where Y = ZEROS(N,1).
+%          The eigenvalues still come in plus/minus pairs but they are not
+%          known explicitly.
+%
+%          References:
+%          P.A. Clement, A class of triple-diagonal matrices for test
+%             purposes, SIAM Review, 1 (1959), pp. 50-52.
+%          A. Edelman and E. Kostlan, The road from Kac's matrix to Kac's
+%             random polynomials. In John~G. Lewis, editor, Proceedings of
+%             the Fifth SIAM Conference on Applied Linear Algebra Society
+%             for Industrial and Applied Mathematics, Philadelphia, 1994,
+%             pp. 503-507.
+%          O. Taussky and J. Todd, Another look at a matrix of Mark Kac,
+%             Linear Algebra and Appl., 150 (1991), pp. 341-360.
+
+if nargin == 1, k = 0; end
+
+n = n-1;
+
+x = n:-1:1;
+z = 1:n;
+
+if k == 0
+   A = diag(x, -1) + diag(z, 1);
+else
+   y = sqrt(x.*z);
+   A = diag(y, -1) + diag(y, 1);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/cod.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function [U, R, V] = cod(A, tol)
+%COD    Complete orthogonal decomposition.
+%       [U, R, V] = COD(A, TOL) computes a decomposition A = U*T*V,
+%       where U and V are unitary, T = [R 0; 0 0] has the same dimensions as
+%       A, and R is upper triangular and nonsingular of dimension rank(A).
+%       Rank decisions are made using TOL, which defaults to approximately
+%       MAX(SIZE(A))*NORM(A)*EPS.
+%       By itself, COD(A, TOL) returns R.
+
+%       Reference:
+%       G.H. Golub and C.F. Van Loan, Matrix Computations, Second
+%       Edition, Johns Hopkins University Press, Baltimore, Maryland,
+%       1989, sec. 5.4.2.
+
+[m, n] = size(A);
+
+% QR decomposition.
+[U, R, P] = qr(A);    % AP = UR
+V = P';               % A = URV;
+if nargin == 1, tol = max(m,n)*eps*abs(R(1,1)); end  % |R(1,1)| approx NORM(A).
+
+% Determine r = effective rank.
+r = sum(abs(diag(R)) > tol);
+r = r(1);             % Fix for case where R is vector.
+R = R(1:r,:);         % Throw away negligible rows (incl. all zero rows, m>n).
+
+if r ~= n
+
+   % Reduce nxr R' =  r  [L]  to lower triangular form: QR' = [Lbar].
+   %                 n-r [M]                                  [0]
+
+   [Q, R] = trap2tri(R');
+   V = Q*V;
+   R = R';
+
+end
+
+if nargout <= 1, U = R; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/comp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,41 @@
+function C = comp(A, k)
+%COMP    Comparison matrices.
+%        COMP(A) is DIAG(B) - TRIL(B,-1) - TRIU(B,1), where B = ABS(A).
+%        COMP(A, 1) is A with each diagonal element replaced by its
+%        absolute value, and each off-diagonal element replaced by minus
+%        the absolute value of the largest element in absolute value in
+%        its row.  However, if A is triangular COMP(A, 1) is too.
+%        COMP(A, 0) is the same as COMP(A).
+%        COMP(A) is often denoted by M(A) in the literature.
+
+%        Reference (e.g.):
+%        N.J. Higham, A survey of condition number estimation for
+%        triangular matrices, SIAM Review, 29 (1987), pp. 575-596.
+
+if nargin == 1, k = 0; end
+[m, n] = size(A);
+p = min(m, n);
+
+if k == 0
+
+% This code uses less temporary storage than the `high level' definition above.
+   C = -abs(A);
+   for j=1:p
+     C(j,j) = abs(A(j,j));
+   end
+
+elseif k == 1
+
+   C = A';
+   for j=1:p
+       C(k,k) = 0;
+   end
+   mx = max(abs(C));
+   C = -mx'*ones(1,n);
+   for j=1:p
+       C(j,j) = abs(A(j,j));
+   end
+   if all( A == tril(A) ), C = tril(C); end
+   if all( A == triu(A) ), C = triu(C); end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/compan.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,45 @@
+function A = compan(p)
+%COMPAN  Companion matrix.
+%        COMPAN(P) is a companion matrix.  There are three cases.
+%        If P is a scalar then COMPAN(P) is the P-by-P matrix COMPAN(1:P+1).
+%        If P is an (n+1)-vector, COMPAN(P) is the n-by-n companion matrix
+%           whose first row is -P(2:n+1)/P(1).
+%        If P is a square matrix, COMPAN(P) is the companion matrix
+%           of the characteristic polynomial of P, computed as
+%           COMPAN(POLY(P)).
+
+%        References:
+%        J.H. Wilkinson, The Algebraic Eigenvalue Problem,
+%           Oxford University Press, 1965, p. 12.
+%        G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
+%           Johns Hopkins University Press, Baltimore, Maryland, 1989,
+%           sec 7.4.6.
+%        C. Kenney and A.J. Laub, Controllability and stability radii for
+%          companion form systems, Math. Control Signals Systems, 1 (1988),
+%          pp. 239-256. (Gives explicit formulas for the singular values of
+%          COMPAN(P).)
+
+[n,m] = size(p);
+
+if n == m & n > 1
+   % Matrix argument.
+   A = compan(poly(p));
+   return
+end
+
+n = max(n,m);
+%  Handle scalar p.
+if n == 1
+   n = p+1;
+   p = 1:n;
+end
+
+p = p(:)';                    % Ensure p is a row vector.
+
+% Construct matrix of order n-1.
+if n == 2
+   A = 1;
+else
+    A = diag(ones(1,n-2),-1);
+    A(1,:) = -p(2:n)/p(1);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/cond.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function y = cond(A, p)
+%COND   Matrix condition number in 1, 2, Frobenius, or infinity norm.
+%       For p = 1, 2, 'fro', inf,  COND(A,p) = NORM(A,p) * NORM(INV(A),p).
+%       If p is omitted then p = 2 is used.
+%       A may be a rectangular matrix if p = 2; in this case COND(A)
+%       is the ratio of the largest singular value of A to the smallest
+%       (and hence is infinite if A is rank deficient).
+
+%	See also RCOND, NORM, CONDEST, NORMEST.
+%       Generalises and incorporates MATFUN/COND.M from Matlab 4.
+
+if length(A) == 0  % Handle null matrix.
+    y = NaN;
+    return
+end
+if issparse(A)
+    error('Matrix must be non-sparse.')
+end
+
+if nargin == 1, p = 2; end
+
+[m, n] = size(A);
+if m ~= n & p ~= 2
+   error('A is rectangular.  Use the 2 norm.')
+end
+
+if p == 2
+   s = svd(A);
+   if any(s == 0)   % Handle singular matrix
+        disp('Condition is infinite')
+        y = Inf;
+        return
+   end
+   y = max(s)./min(s);
+else
+%  We'll let NORM pick up any invalid p argument.
+   y = norm(A, p) * norm(inv(A), p);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/condex.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,63 @@
+function A = condex(n, k, theta)
+%CONDEX   `Counterexamples' to matrix condition number estimators.
+%         CONDEX(N, K, THETA) is a `counterexample' matrix to a condition
+%         estimator.  It has order N and scalar parameter THETA (default 100).
+%         If N is not equal to the `natural' size of the matrix then
+%         the matrix is padded out with an identity matrix to order N.
+%         The matrix, its natural size, and the estimator to which it applies
+%         are specified by K (default K = 4) as follows:
+%             K = 1:   4-by-4,     LINPACK (RCOND)
+%             K = 2:   3-by-3,     LINPACK (RCOND)
+%             K = 3:   arbitrary,  LINPACK (RCOND) (independent of THETA)
+%             K = 4:   N >= 4,     SONEST (Higham 1988)
+%         (Note that in practice the K = 4 matrix is not usually a
+%          counterexample because of the rounding errors in forming it.)
+
+%         References:
+%         A.K. Cline and R.K. Rew, A set of counter-examples to three
+%            condition number estimators, SIAM J. Sci. Stat. Comput.,
+%            4 (1983), pp. 602-611.
+%         N.J. Higham, FORTRAN codes for estimating the one-norm of a real or
+%            complex matrix, with applications to condition estimation
+%            (Algorithm 674), ACM Trans. Math. Soft., 14 (1988), pp. 381-396.
+
+if nargin < 3, theta = 100; end
+if nargin < 2, k = 4; end
+
+if k == 1    % Cline and Rew (1983), Example B.
+
+   A = [1  -1  -2*theta     0
+        0   1     theta  -theta
+        0   1   1+theta  -(theta+1)
+        0   0   0         theta];
+
+elseif k == 2   % Cline and Rew (1983), Example C.
+
+   A = [1   1-2/theta^2  -2
+        0   1/theta      -1/theta
+        0   0             1];
+
+elseif k == 3    % Cline and Rew (1983), Example D.
+
+    A = triw(n,-1)';
+    A(n,n) = -1;
+
+elseif k == 4    % Higham (1988), p. 390.
+
+    x = ones(n,3);            %  First col is e
+    x(2:n,2) = zeros(n-1,1);  %  Second col is e(1)
+
+    % Third col is special vector b in SONEST
+    x(:, 3) = (-1).^[0:n-1]' .* ( 1 + [0:n-1]'/(n-1) );
+
+    Q = orth(x);  %  Q*Q' is now the orthogonal projector onto span(e(1),e,b)).
+    P = eye(n) - Q*Q';
+    A = eye(n) + theta*P;
+
+end
+
+% Pad out with identity as necessary.
+[m, m] = size(A);
+if m < n
+   for i=n:-1:m+1, A(i,i) = 1; end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/contents.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,114 @@
+% Test Matrix Toolbox.
+% Version 3.0, September 19 1995
+% Copyright (c) 1995 by N. J. Higham
+%
+% Demonstration
+%TMTDEMO   Demonstration of Test Matrix Toolbox.
+%
+% Test Matrices
+%AUGMENT   Augmented system matrix.
+%CAUCHY    Cauchy matrix.
+%CHEBSPEC  Chebyshev spectral differentiation matrix.
+%CHEBVAND  Vandermonde-like matrix for the Chebyshev polynomials.
+%CHOW      Chow matrix - a singular Toeplitz lower Hessenberg matrix.
+%CIRCUL    Circulant matrix.
+%CLEMENT   Clement matrix - tridiagonal with zero diagonal entries.
+%COMPAN    Companion matrix.
+%CONDEX    `Counterexamples' to matrix condition estimators.
+%CYCOL     Matrix whose columns repeat cyclically.
+%DINGDONG  Dingdong matrix - a symmetric Hankel matrix.
+%DORR      Dorr matrix - diag. dominant, ill-conditioned, tridiagonal (sparse).
+%DRAMADAH  A (0,1) matrix with large inverse.
+%FIEDLER   Fiedler matrix - symmetric.
+%FORSYTHE  Forsythe matrix - a perturbed Jordan block.
+%FRANK     Frank matrix - ill-conditioned eigenvalues.
+%GALLERY   Famous, and not so famous, test matrices.
+%GEARM     Gear matrix.
+%GFPP      Matrix giving maximal growth factor for Gaussian elim. with pivoting.
+%GRCAR     Grcar matrix - a Toeplitz matrix with sensitive eigenvalues.
+%HADAMARD  Hadamard matrix.
+%HANOWA    Hanowa matrix.
+%HILB      Hilbert matrix.
+%INVHESS   Inverse of an upper Hessenberg matrix.
+%INVOL     An involutory matrix.
+%IPJFACT   A Hankel matrix with factorial elements.
+%JORDBLOC  Jordan block.
+%KAHAN     Kahan matrix - upper trapezoidal.
+%KMS       Kac-Murdock-Szego Toeplitz matrix.
+%KRYLOV    Krylov matrix.
+%LAUCHLI   Lauchli matrix.
+%LEHMER    Lehmer matrix - symmetric positive definite.
+%LESP      A tridiagonal matrix with real, sensitive eigenvalues.
+%LOTKIN    Lotkin matrix.
+%MAKEJCF   A matrix with given Jordan canonical form.
+%MINIJ     Symmetric positive definite matrix MIN(i,j).
+%MOLER     Moler matrix - symmetric positive definite.
+%NEUMANN   Singular matrix from the discrete Neumann problem.
+%OHESS     Random, orthogonal upper Hessenberg matrix.
+%ORTHOG    Orthogonal and nearly orthogonal matrices.
+%PARTER    Parter matrix - a Toeplitz matrix with singular values near PI.
+%PASCAL    Pascal matrix.
+%PDTOEP    Symmetric positive definite Toeplitz matrix.
+%PEI       Pei matrix.
+%PENTOEP   Pentadiagonal Toeplitz matrix (sparse).
+%POISSON   Block tridiagonal matrix from Poisson's equation (sparse).
+%PROLATE   Prolate matrix - symmetric, ill-conditioned Toeplitz matrix.
+%RANDO     Random matrix with elements -1, 0 or 1.
+%RANDSVD   Random matrices with pre-assigned singular values.
+%REDHEFF   A matrix of 0s and 1s of Redheffer.
+%RIEMANN   A matrix associated with the Riemann hypothesis.
+%RSCHUR    An upper quasi-triangular matrix.
+%SMOKE     Smoke matrix - complex, with a `smoke ring' pseudospectrum.
+%TRIDIAG   Tridiagonal matrix (sparse).
+%TRIW      Upper triangular matrix discussed by Wilkinson and others.
+%VAND      Vandermonde matrix.
+%WATHEN    Wathen matrix - a finite element matrix (sparse, random entries).
+%WILK      Various specific matrices devised/discussed by Wilkinson.
+%
+% Visualization
+%FV        Field of values (or numerical range).
+%GERSH     Gershgorin disks.
+%PS        Approximation to the pseudospectrum.
+%PSCONT    Contours and colour pictures of pseudospectra.
+%SEE       Pictures of a matrix and its (pseudo-) inverse.
+%
+% Decompositions and factorizations.
+%CGS       Gram-Schmidt QR factorization.
+%CHOLP     Cholesky factorization with pivoting of a pos. semidefinite matrix.
+%COD       Complete orthogonal decomposition.
+%DIAGPIV   Diagonal pivoting factorization with partial pivoting.
+%GE        Gaussian elimination without pivoting.
+%GECP      Gaussian elimination with complete pivoting.
+%GJ        Gauss-Jordan elimination to solve Ax = b.
+%MGS       Modified Gram-Schmidt QR factorization.
+%POLDEC    Polar decomposition.
+%SIGNM     Matrix sign decomposition.
+%
+% Direct Search Optimization.
+%ADSMAX  Alternating directions direct search method.
+%MDSMAX  Multidirectional search method for direct search optimization.
+%NMSMAX  Nedler-Mead simplex method for direct search optimization.
+%
+% Miscellaneous
+%BANDRED   Band reduction by two-sided orthogonal transformations.
+%CHOP      Round matrix elements.
+%COMP      Comparison matrices.
+%COND      Matrix condition number in 1, 2, Frobenius, or infinity norm.
+%CPLTAXES  Determine suitable AXIS for plot of complex vector.
+%DUAL      Dual vector with respect to Holder p-norm.
+%EIGSENS   Eigenvalue condition numbers.
+%HOUSE     Householder matrix.
+%MATRIX    Test Matrix Collection information and access by number.
+%MATSIGNT  Matrix sign function of a triangular matrix.
+%PNORM     Estimate of matrix p-norm (1 <= p <= inf).
+%QMULT     Pre-multiply by random orthogonal matrix.
+%RQ        Rayleigh quotient.
+%SEQA      An additive sequence.
+%SEQCHEB   Sequence of points related to Chebyshev polynomials, T_N.
+%SEQM      A multiplicative sequence.
+%SHOW      Display signs of matrix elements.
+%SKEWPART  Skew-symmetric (Hermitian) part.
+%SPARSIFY  Randomly sets matrix elements to zero.
+%SUB       Principal submatrix.
+%SYMMPART  Symmetric (Hermitian) part.
+%TRAP2TRI  Trapezoidal matrix to triangular form.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/cpltaxes.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,37 @@
+function x = cpltaxes(z)
+%CPLTAXES   Determine suitable AXIS for plot of complex vector.
+%           X = CPLTAXES(Z), where Z is a complex vector,
+%           determines a 4-vector X such that AXIS(X) sets axes for a plot
+%           of Z that has axes of equal length and leaves a reasonable amount
+%           of space around the edge of the plot.
+
+%           Called by FV, GERSH, PS and PSCONT.
+
+% Set x and y axis ranges so both have the same length.
+
+xmin = min(real(z)); xmax = max(real(z));
+ymin = min(imag(z)); ymax = max(imag(z));
+
+% Fix for rare case of `trivial data'.
+if xmin == xmax, xmin = xmin - 1/2; xmax = xmax + 1/2; end
+if ymin == ymax, ymin = ymin - 1/2; ymax = ymax + 1/2; end
+
+if xmax-xmin >= ymax-ymin
+   ymid = (ymin + ymax)/2;
+   ymin =  ymid - (xmax-xmin)/2; ymax = ymid + (xmax-xmin)/2;
+else
+   xmid = (xmin + xmax)/2;
+   xmin = xmid - (ymax-ymin)/2; xmax = xmid + (ymax-ymin)/2;
+end
+axis('square')
+
+% Scale ranges by 1+2*alpha to give extra space around edges of plot.
+
+alpha = 0.1;
+x(1) = xmin - alpha*(xmax-xmin);
+x(2) = xmax + alpha*(xmax-xmin);
+x(3) = ymin - alpha*(ymax-ymin);
+x(4) = ymax + alpha*(ymax-ymin);
+
+if x(1) == x(2), x(2) = x(2) + 0.1; end
+if x(3) == x(4), x(4) = x(3) + 0.1; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/cycol.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,22 @@
+function A = cycol(n, k)
+%CYCOL   Matrix whose columns repeat cyclically.
+%        A = CYCOL([M N], K) is an M-by-N matrix of the form A = B(1:M,1:N)
+%        where B = [C C C...] and C = RANDN(M, K).  Thus A's columns repeat
+%        cyclically, and A has rank at most K.   K need not divide N.
+%        K defaults to ROUND(N/4).
+%        CYCOL(N, K), where N is a scalar, is the same as CYCOL([N N], K).
+%
+%        This type of matrix can lead to underflow problems for Gaussian
+%        elimination: see NA Digest Volume 89, Issue 3 (January 22, 1989).
+
+m = n(1);              % Parameter n specifies dimension: m-by-n.
+n = n(max(size(n)));
+
+if nargin < 2, k = max(round(n/4),1); end
+
+A = randn(m, k);
+for i=2:ceil(n/k)
+    A = [A A(:,1:k)];
+end
+
+A = A(:, 1:n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/diagpiv.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,108 @@
+function [L, D, P, rho] = diagpiv(A)
+%DIAGPIV     Diagonal pivoting factorization with partial pivoting.
+%            Given a Hermitian matrix A,
+%            [L, D, P, rho] = DIAGPIV(A) computes a permutation P,
+%            a unit lower triangular L, and a real block diagonal D
+%            with 1x1 and 2x2 diagonal blocks, such that
+%            P*A*P' = L*D*L'.
+%            The Bunch-Kaufman partial pivoting strategy is used.
+%            Rho is the growth factor.
+
+%            Reference:
+%            J.R. Bunch and L. Kaufman, Some stable methods for calculating
+%            inertia and solving symmetric linear systems, Math. Comp.,
+%            31(137):163-179, 1977.
+
+%            This routine does not exploit symmetry and is not designed to be
+%            efficient.
+
+if norm(triu(A,1)'-tril(A,-1),1), error('Must supply Hermitian matrix.'), end
+
+n = max(size(A));
+k = 1;
+D = eye(n);
+L = eye(n);
+pp = 1:n;
+normA = norm(A(:),inf);
+rho = normA;
+
+alpha = (1 + sqrt(17))/8;
+
+while k < n
+      [lambda, r] = max( abs(A(k+1:n,k)) );
+      r = r(1) + k;
+
+      if lambda > 0
+          swap = 0;
+          if abs(A(k,k)) >= alpha*lambda
+             s = 1;
+          else
+             temp = A(k:n,r); temp(r-k+1) = 0;
+             sigma = norm(temp, inf);
+             if alpha*lambda^2 <= abs(A(k,k))*sigma
+                s = 1;
+             elseif abs(A(r,r)) >= alpha*sigma
+                swap = 1;
+                m1 = k; m2 = r;
+                s = 1;
+             else
+                swap = 1;
+                m1 = k+1; m2 = r;
+                s = 2;
+             end
+          end
+
+          if swap
+             A( [m1, m2], : ) = A( [m2, m1], : );
+             L( [m1, m2], : ) = L( [m2, m1], : );
+             A( :, [m1, m2] ) = A( :, [m2, m1] );
+             L( :, [m1, m2] ) = L( :, [m2, m1] );
+             pp( [m1, m2] ) = pp( [m2, m1] );
+          end
+
+          if s == 1
+
+             D(k,k) = A(k,k);
+             A(k+1:n,k) = A(k+1:n,k)/A(k,k);
+             L(k+1:n,k) = A(k+1:n,k);
+             i = k+1:n;
+             A(i,i) = A(i,i) - A(i,k) * A(k,i);
+
+          elseif s == 2
+
+             E = A(k:k+1,k:k+1);
+             D(k:k+1,k:k+1) = E;
+             C = A(k+2:n,k:k+1);
+             temp = C/E;
+             L(k+2:n,k:k+1) = temp;
+             A(k+2:n,k+2:n) = A(k+2:n,k+2:n) - temp*C';
+
+         end
+
+         % Make diagonal real (see LINPACK User's Guide, p. 5.17).
+         for i=k+s:n
+             A(i,i) = real(A(i,i));
+         end
+
+         if k+s <= n
+            rho = max(rho, max(max(abs(A(k+s:n,k+s:n)))) );
+         end
+
+      else  % Nothing to do.
+
+         s = 1;
+         D(k,k) = A(k,k);
+
+      end
+
+      k = k + s;
+
+      if k == n
+         D(n,n) = A(n,n);
+         break
+      end
+
+end
+
+if nargout >= 3, P = eye(n); P = P(pp,:); end
+rho = rho/normA;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/dingdong.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,15 @@
+function A = dingdong(n)
+%DINGDONG  Dingdong matrix - a symmetric Hankel matrix.
+%          A = DINGDONG(N) is the symmetric N-by-N Hankel matrix with
+%                         A(i,j) = 0.5/(N-i-j+1.5).
+%          The eigenvalues of A cluster around PI/2 and -PI/2.
+
+%          Invented by F.N. Ris.
+%
+%          Reference:
+%          J.C. Nash, Compact Numerical Methods for Computers: Linear
+%          Algebra and Function Minimisation, second edition, Adam Hilger,
+%          Bristol, 1990 (Appendix 1).
+
+p= -2*(1:n) + (n+1.5);
+A = cauchy(p);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/dorr.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,41 @@
+function [c, d, e] = dorr(n, theta)
+%DORR  Dorr matrix - diagonally dominant, ill conditioned, tridiagonal.
+%      [C, D, E] = DORR(N, THETA) returns the vectors defining a row diagonally
+%      dominant, tridiagonal M-matrix that is ill conditioned for small
+%      values of the parameter THETA >= 0.
+%      If only one output parameter is supplied then
+%      C = FULL(TRIDIAG(C,D,E)), i.e., the matrix iself is returned.
+%      The columns of INV(C) vary greatly in norm.  THETA defaults to 0.01.
+%      The amount of diagonal dominance is given by (ignoring rounding errors):
+%            COMP(C)*ONES(N,1) = THETA*(N+1)^2 * [1 0 0 ... 0 1]'.
+
+%      Reference:
+%      F.W. Dorr, An example of ill-conditioning in the numerical
+%      solution of singular perturbation problems, Math. Comp., 25 (1971),
+%      pp. 271-283.
+
+if nargin < 2, theta = 0.01; end
+
+c = zeros(n,1); e = c; d = c;
+% All length n for convenience.  Make c, e of length n-1 later.
+
+h = 1/(n+1);
+m = floor( (n+1)/2 );
+term = theta/h^2;
+
+i = (1:m)';
+    c(i) = -term*ones(m,1);
+    e(i) = c(i) - (0.5-i*h)/h;
+    d(i) = -(c(i) + e(i));
+
+i = (m+1:n)';
+    e(i) = -term*ones(n-m,1);
+    c(i) = e(i) + (0.5-i*h)/h;
+    d(i) = -(c(i) + e(i));
+
+c = c(2:n);
+e = e(1:n-1);
+
+if nargout <= 1
+   c = tridiag(c, d, e);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/dramadah.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,55 @@
+function A = dramadah(n, k)
+%DRAMADAH  A (0,1) matrix whose inverse has large integer entries.
+%          An anti-Hadamard matrix A is a matrix with elements 0 or 1 for
+%          which MU(A) := NORM(INV(A),'FRO') is maximal.
+%          A = DRAMADAH(N, K) is an N-by-N (0,1) matrix for which MU(A) is
+%          relatively large, although not necessarily maximal.
+%          Available types (the default is K = 1):
+%          K = 1: A is Toeplitz, with ABS(DET(A)) = 1, and MU(A) > c(1.75)^N,
+%                 where c is a constant.
+%          K = 2: A is upper triangular and Toeplitz.
+%          The inverses of both types have integer entries.
+%
+%          Another interesting (0,1) matrix:
+%          K = 3: A has maximal determinant among (0,1) lower Hessenberg
+%          matrices: det(A) = the n'th Fibonacci number.  A is Toeplitz.
+%          The eigenvalues have an interesting distribution in the complex
+%          plane.
+
+%          References:
+%          R.L. Graham and N.J.A. Sloane, Anti-Hadamard matrices,
+%             Linear Algebra and Appl., 62 (1984), pp. 113-137.
+%          L. Ching, The maximum determinant of an nxn lower Hessenberg
+%             (0,1) matrix, Linear Algebra and Appl., 183 (1993), pp. 147-153.
+
+if nargin < 2, k = 1; end
+
+if k == 1  % Toeplitz
+
+   c = ones(n,1);
+   for i=2:4:n
+       m = min(1,n-i);
+       c(i:i+m) = zeros(m+1,1);
+   end
+   r = zeros(n,1);
+   r(1:4) = [1 1 0 1];
+   if n < 4, r = r(1:n); end
+   A = toeplitz(c,r);
+
+elseif k == 2  % Upper triangular and Toeplitz
+
+   c = zeros(n,1);
+   c(1) = 1;
+   r = ones(n,1);
+   for i=3:2:n
+       r(i) = 0;
+   end
+   A = toeplitz(c,r);
+
+elseif k == 3  % Lower Hessenberg.
+
+   c = ones(n,1);
+   for i=2:2:n, c(i)=0; end;
+   A = toeplitz(c, [1 1 zeros(1,n-2)]);
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/dual.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,47 @@
+function y = dual(x, p)
+%DUAL    Dual vector with respect to Holder p-norm.
+%        Y = DUAL(X, p), where 1 <= p <= inf, is a vector of unit q-norm
+%        that is dual to X with respect to the p-norm, that is,
+%        norm(Y, q) = 1 where 1/p + 1/q = 1 and there is
+%        equality in the Holder inequality: X'*Y = norm(X, p)*norm(Y, q).
+%        Special case: DUAL(X), where X >= 1 is a scalar, returns Y such
+%                      that 1/X + 1/Y = 1.
+
+%        Called by PNORM.
+
+if max(size(x)) == 1 & nargin == 1
+   p = x;
+end
+
+% The following test avoids a `division by zero message' when p = 1.
+if p == 1
+   q = inf;
+else
+   q = 1/(1-1/p);
+end
+
+if max(size(x)) == 1 & nargin == 1
+   y = q;
+   return
+end
+
+if norm(x,inf) == 0, y = x; return, end
+
+if p == 1
+
+   y = sign(x) + (x == 0);   % y(i) = +1 or -1 (if x(i) real).
+
+elseif p == inf
+
+   [xmax, k] = max(abs(x));
+   f = find(abs(x)==xmax); k = f(1);
+   y = zeros(size(x));
+   y(k) = sign(x(k));        % y is a multiple of unit vector e_k.
+
+else  % 1 < p < inf.  Dual is unique in this case.
+
+  x = x/norm(x,inf);         % This scaling helps to avoid under/over-flow.
+  y = abs(x).^(p-1) .* ( sign(x) + (x==0) );
+  y = y / norm(y,q);         % Normalize to unit q-norm.
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/eigsens.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,25 @@
+function [X, D, s] = eigsens(A)
+%EIGSENS   Eigenvalue condition numbers.
+%          EIGSENS(A) is a vector of condition numbers for the eigenvalues
+%          of A (reciprocals of the Wilkinson s(lambda) numbers).
+%          These condition numbers are the reciprocals of the cosines of the
+%          angles between the left and right eigenvectors.
+%          [V, D, s] = EIGSENS(A) is equivalent to
+%                      [V, D] = EIG(A); s = EIGSENS(A);
+
+%          Reference:
+%          G.H. Golub and C.F. Van Loan, Matrix Computations, Second
+%          Edition, Johns Hopkins University Press, Baltimore, Maryland,
+%          1989, sec. 7.2.2.
+
+n = max(size(A));
+s = zeros(n,1);
+
+[X, D] = eig(A);
+Y = inv(X);
+
+for i=1:n
+    s(i) = norm(Y(i,:)) * norm(X(:,i)) / abs( Y(i,:)*X(:,i) );
+end
+
+if nargout <= 1, X = s; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/fdemo.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,7 @@
+function f = fdemo(A)
+%FDEMO   Demonstration function for direct search maximizers.
+%        FDEMO(A) is the reciprocal of the underestimation ratio for RCOND
+%        applied to the square matrix A.
+%        Demonstration function for ADSMAX, MDSMAX and NMSMAX.
+
+f = norm(A,1)*norm(inv(A),1)*rcond(A);  % f >= 1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/fiedler.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,33 @@
+function A = fiedler(c)
+%FIEDLER  Fiedler matrix - symmetric.
+%         A = FIEDLER(C), where C is an n-vector, is the n-by-n symmetric
+%         matrix with elements ABS(C(i)-C(j)).
+%         Special case: if C is a scalar, then A = FIEDLER(1:C)
+%                       (i.e. A(i,j) = ABS(i-j)).
+%         Properties:
+%           FIEDLER(N) has a dominant positive eigenvalue and all the other
+%                      eigenvalues are negative (Szego, 1936).
+%           Explicit formulas for INV(A) and DET(A) are given by Todd (1977)
+%           and attributed to Fiedler.  These indicate that INV(A) is
+%           tridiagonal except for nonzero (1,n) and (n,1) elements.
+%           [I think these formulas are valid only if the elements of
+%           C are in increasing or decreasing order---NJH.]
+
+%           References:
+%           G. Szego, Solution to problem 3705, Amer. Math. Monthly,
+%              43 (1936), pp. 246-259.
+%           J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
+%              Birkhauser, Basel, and Academic Press, New York, 1977, p. 159.
+
+n = max(size(c));
+
+%  Handle scalar c.
+if n == 1
+   n = c;
+   c = 1:n;
+end
+
+c = c(:).';                    % Ensure c is a row vector.
+
+A = ones(n,1)*c;
+A = abs(A - A.');              % NB. array transpose.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/forsythe.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,13 @@
+function A = forsythe(n, alpha, lambda)
+%FORSYTHE  Forsythe matrix - a perturbed Jordan block.
+%          FORSYTHE(N, ALPHA, LAMBDA) is the N-by-N matrix equal to
+%          JORDBLOC(N, LAMBDA) except it has an ALPHA in the (N,1) position.
+%          It has the characteristic polynomial
+%                  DET(A-t*EYE) = (LAMBDA-t)^N - (-1)^N ALPHA.
+%          ALPHA defaults to SQRT(EPS) and LAMBDA to 0.
+
+if nargin < 2, alpha = sqrt(eps); end
+if nargin < 3, lambda = 0; end
+
+A = jordbloc(n, lambda);
+A(n,1) = alpha;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/frank.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,45 @@
+function F = frank(n, k)
+%FRANK   Frank matrix---ill conditioned eigenvalues.
+%        F = FRANK(N, K) is the Frank matrix of order N.  It is upper
+%        Hessenberg with determinant 1.  K = 0 is the default; if K = 1 the
+%        elements are reflected about the anti-diagonal (1,N)--(N,1).
+%        F has all positive eigenvalues and they occur in reciprocal pairs
+%        (so that 1 is an eigenvalue if N is odd).
+%        The eigenvalues of F may be obtained in terms of the zeros of the
+%        Hermite polynomials.
+%        The FLOOR(N/2) smallest eigenvalues of F are ill conditioned,
+%        the more so for bigger N.
+
+%        DET(FRANK(N)') comes out far from 1 for large N---see Frank (1958)
+%        and Wilkinson (1960) for discussions.
+%
+%        This version incorporates improvements suggested by W. Kahan.
+%
+%        References:
+%        W.L. Frank, Computing eigenvalues of complex matrices by determinant
+%           evaluation and by methods of Danilewski and Wielandt, J. Soc.
+%           Indust. Appl. Math., 6 (1958), pp. 378-392 (see pp. 385, 388).
+%        G.H. Golub and J.H. Wilkinson, Ill-conditioned eigensystems and the
+%           computation of the Jordan canonical form, SIAM Review, 18 (1976),
+%             pp. 578-619 (Section 13).
+%        H. Rutishauser, On test matrices, Programmation en Mathematiques
+%           Numeriques, Editions Centre Nat. Recherche Sci., Paris, 165,
+%           1966, pp. 349-365.  Section 9.
+%        J.H. Wilkinson, Error analysis of floating-point computation,
+%           Numer. Math., 2 (1960), pp. 319-340 (Section 8).
+%        J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
+%           Press, 1965 (pp. 92-93).
+%        The next two references give details of the eigensystem, as does
+%        Rutishauser (see above).
+%        P.J. Eberlein, A note on the matrices denoted by B_n, SIAM J. Appl.
+%           Math., 20 (1971), pp. 87-92.
+%        J.M. Varah, A generalization of the Frank matrix, SIAM J. Sci. Stat.
+%           Comput., 7 (1986), pp. 835-839.
+
+if nargin == 1, k = 0; end
+
+p = n:-1:1;
+F = triu( p( ones(n,1), :) - diag( ones(n-1,1), -1), -1 );
+if k ~= 0
+   F = F(p,p)';
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/fv.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,95 @@
+function [f, e] = fv(B, nk, thmax, noplot)
+%FV     Field of values (or numerical range).
+%       FV(A, NK, THMAX) evaluates and plots the field of values of the
+%       NK largest leading principal submatrices of A, using THMAX
+%       equally spaced angles in the complex plane.
+%       The defaults are NK = 1 and THMAX = 16.
+%       (For a `publication quality' picture, set THMAX higher, say 32.)
+%       The eigenvalues of A are displayed as `x'.
+%       Alternative usage: [F, E] = FV(A, NK, THMAX, 1) suppresses the
+%       plot and returns the field of values plot data in F, with A's 
+%       eigenvalues in E.   Note that NORM(F,INF) approximates the
+%       numerical radius,
+%                 max {abs(z): z is in the field of values of A}.
+
+%       Theory:
+%       Field of values FV(A) = set of all Rayleigh quotients. FV(A) is a
+%       convex set containing the eigenvalues of A.  When A is normal FV(A) is
+%       the convex hull of the eigenvalues of A (but not vice versa).
+%               z = x'Ax/(x'x),  z' = x'A'x/(x'x)
+%               => REAL(z) = x'Hx/(x'x),   H = (A+A')/2
+%       so      MIN(EIG(H)) <= REAL(z) <= MAX(EIG(H))
+%       with equality for x = corresponding eigenvectors of H.  For these x,
+%       RQ(A,x) is on the boundary of FV(A).
+%
+%       Based on an original routine by A. Ruhe.
+%
+%       References:
+%       R.A. Horn and C.R. Johnson, Topics in Matrix Analysis, Cambridge
+%            University Press, 1991, Section 1.5.
+%       A.S. Householder, The Theory of Matrices in Numerical Analysis,
+%            Blaisdell, New York, 1964, Section 3.3.
+%       C.R. Johnson, Numerical determination of the field of values of a
+%            general complex matrix, SIAM J. Numer. Anal., 15 (1978),
+%            pp. 595-602.
+
+if nargin < 2, nk = 1; end
+if nargin < 3, thmax = 16; end
+thmax = thmax - 1;  % Because code below uses thmax + 1 angles.
+
+iu = sqrt(-1);
+[n, p] = size(B);
+if n ~= p, error('Matrix must be square.'), end
+f = [];
+z = zeros(2*thmax+1,1);
+e = eig(B);
+
+% Filter out cases where B is Hermitian or skew-Hermitian, for efficiency.
+if norm(skewpart(B),1) == 0
+
+   f = [min(e) max(e)];
+
+elseif norm(symmpart(B),1) == 0
+
+   e = imag(e);
+   f = [min(e) max(e)];
+   e = iu*e; f = iu*f;
+
+else
+
+for m = 1:nk
+
+   ns = n+1-m;
+   A = B(1:ns, 1:ns);
+
+   for i = 0:thmax
+      th = i/thmax*pi;
+      Ath = exp(iu*th)*A;               % Rotate A through angle th.
+      H = 0.5*(Ath + Ath');             % Hermitian part of rotated A.
+      [X, D] = eig(H);
+      [lmbh, k] = sort(real(diag(D)));
+      z(1+i) = rq(A,X(:,k(1)));         % RQ's of A corr. to eigenvalues of H
+      z(1+i+thmax) = rq(A,X(:,k(ns)));  % with smallest/largest real part.
+   end
+
+   f = [f; z];
+
+end
+% Next line ensures boundary is `joined up' (needed for orthogonal matrices).
+f = [f; f(1,:)];
+
+end
+if thmax == 0; f = e; end
+
+if nargin < 4
+
+   ax = cpltaxes(f);
+   plot(real(f), imag(f),'k')      % Plot the field of values
+   axis(ax);
+   axis('square');
+
+   hold on
+   plot(real(e), imag(e), 'xb')    % Plot the eigenvalues too.
+   hold off
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/gallery.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,65 @@
+function [A, e] = gallery(n)
+%GALLERY    Famous, and not so famous, test matrices.
+%       A = GALLERY(N) is an N-by-N matrix with some special property.
+%       The following values of N are currently available:
+%         N = 3 is badly conditioned.
+%         N = 4 is the Wilson matrix.  Symmetric pos def, integer inverse.
+%         N = 5 is an interesting eigenvalue problem: defective, nilpotent.
+%         N = 8 is the Rosser matrix, a classic symmetric eigenvalue problem.
+%               [A, e] = GALLERY(8) returns the exact eigenvalues in e.
+%         N = 21 is Wilkinson's tridiagonal W21+, another eigenvalue problem.
+
+%       Original version supplied with MATLAB.  Modified by N.J. Higham.
+%
+%       References:
+%       J.R. Westlake, A Handbook of Numerical Matrix Inversion and Solution
+%          of Linear Equations, John Wiley, New York, 1968.
+%       J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
+%          Press, 1965.
+
+if n == 3
+   A = [ -149   -50  -154
+          537   180   546
+          -27    -9   -25 ];
+
+elseif n == 4
+   A = [10     7     8     7
+         7     5     6     5
+         8     6    10     9
+         7     5     9    10];
+
+elseif n == 5
+%   disp('Try to find the EXACT eigenvalues and eigenvectors.')
+%   Matrix devised by Cleve Moler.  Its Jordan form has just one block, with
+%   eigenvalue zero.  Proof: A^k is nonzero for k<5, zero for k=5.
+%   TRACE(A)=0.  No simple form for null vector.
+   A = [  -9     11    -21     63    -252
+          70    -69    141   -421    1684
+        -575    575  -1149   3451  -13801
+        3891  -3891   7782 -23345   93365
+        1024  -1024   2048  -6144   24572 ];
+
+elseif n == 8
+   A  = [ 611.  196. -192.  407.   -8.  -52.  -49.   29.
+          196.  899.  113. -192.  -71.  -43.   -8.  -44.
+         -192.  113.  899.  196.   61.   49.    8.   52.
+          407. -192.  196.  611.    8.   44.   59.  -23.
+           -8.  -71.   61.    8.  411. -599.  208.  208.
+          -52.  -43.   49.   44. -599.  411.  208.  208.
+          -49.   -8.    8.   59.  208.  208.   99. -911.
+           29.  -44.   52.  -23.  208.  208. -911.   99.  ];
+
+   %  Exact eigenvalues from Westlake (1968), p.150 (ei'vectors given too):
+   a = sqrt(10405); b = sqrt(26);
+   e = [-10*a,   0,   510-100*b,  1000,   1000,   510+100*b, ...
+        1020,   10*a]';
+
+elseif n == 21
+   % W21+, Wilkinson (1965), p.308.
+   E = diag(ones(n-1,1),1);
+   m = (n-1)/2;
+   A = diag(abs(-m:m)) + E + E';
+
+else
+   error('Sorry, that value of N is not available.')
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/gearm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,26 @@
+function A = gearm(n, i, j)
+%GEARM   Gear matrix.
+%        A = GEARM(N,I,J) is the N-by-N matrix with ones on the sub- and
+%        super-diagonals, SIGN(I) in the (1,ABS(I)) position, SIGN(J)
+%        in the (N,N+1-ABS(J)) position, and zeros everywhere else.
+%        Defaults: I = N, j = -N.
+%        All eigenvalues are of the form 2*COS(a) and the eigenvectors
+%        are of the form [SIN(w+a), SIN(w+2a), ..., SIN(w+Na)].
+%        The values of a and w are given in the reference below.
+%        A can have double and triple eigenvalues and can be defective.
+%        GEARM(N) is singular.
+
+%        (GEAR is a Simulink function, hence GEARM for Gear matrix.)
+%        Reference:
+%        C.W. Gear, A simple set of test matrices for eigenvalue programs,
+%        Math. Comp., 23 (1969), pp. 119-125.
+
+if nargin == 1, i = n; j = -n; end
+
+if ~(i~=0 & abs(i)<=n & j~=0 & abs(j)<=n)
+     error('Invalid I and J parameters')
+end
+
+A = diag(ones(n-1,1),-1) + diag(ones(n-1,1),1);
+A(1, abs(i)) = sign(i);
+A(n, n+1-abs(j)) = sign(j);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/gersh.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,38 @@
+function  [G, e] = gersh(A, noplot)
+%GERSH    Gershgorin disks.
+%         GERSH(A) draws the Gershgorin disks for the matrix A.
+%         The eigenvalues are plotted as crosses `x'.
+%         Alternative usage: [G, E] = GERSH(A, 1) suppresses the plot
+%         and returns the data in G, with A's eigenvalues in E.
+%
+%         Try GERSH(LESP(N)) and GERSH(SMOKE(N,1)).
+
+if diff(size(A)), error('Matrix must be square.'), end
+
+n = max(size(A));
+m = 40;
+G = zeros(m,n);
+
+d = diag(A);
+r = sum( abs( A.'-diag(d) ) )';
+e = eig(A);
+
+radvec = exp(i * seqa(0,2*pi,m)');
+
+for j=1:n
+    G(:,j) = d(j)*ones(size(radvec)) + r(j)*radvec;
+end
+
+if nargin < 2
+
+   ax = cpltaxes(G(:));
+   for j=1:n
+       plot(real(G(:,j)), imag(G(:,j)),'-c5')      % Plot the disks.
+       hold on
+   end
+   plot(real(e), imag(e), 'xg')    % Plot the eigenvalues too.
+   axis(ax);
+   axis('square');
+   hold off
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/gfpp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,40 @@
+function A = gfpp(T, c)
+%GFPP   Matrix giving maximal growth factor for Gaussian elim. with pivoting.
+%       GFPP(T) is a matrix of order N for which Gaussian elimination
+%       with partial pivoting yields a growth factor 2^(N-1).
+%       T is an arbitrary nonsingular upper triangular matrix of order N-1.
+%       GFPP(T, C) sets all the multipliers to C  (0 <= C <= 1)
+%       and gives growth factor (1+C)^(N-1).
+%       GFPP(N, C) (a special case) is the same as GFPP(EYE(N-1), C) and
+%       generates the well-known example of Wilkinson.
+
+%       Reference:
+%       N.J. Higham and D.J. Higham, Large growth factors in
+%       Gaussian elimination with pivoting, SIAM J. Matrix Analysis and
+%       Appl., 10 (1989), pp. 155-164.
+
+if norm(T-triu(T),1) | any(~diag(T))
+   error('First argument must be a nonsingular upper triangular matrix.')
+end
+
+if nargin == 1, c = 1; end
+
+if c < 0 | c > 1
+   error('Second parameter must be a scalar between 0 and 1 inclusive.')
+end
+
+[m, m] = size(T);
+if m == 1    % Handle the special case T = scalar
+   n = T;
+   m = n-1;
+   T = eye(n-1);
+else
+   n = m+1;
+end
+
+d = 1+c;
+L =  eye(n) - c*tril(ones(n), -1);
+U = [T  (d.^[0:n-2])'; zeros(1,m) d^(n-1)];
+A = L*U;
+theta = max(abs(A(:)));
+A(:,n) = (theta/norm(A(:,n),inf)) * A(:,n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/gj.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,31 @@
+function x = gj(A, b, piv)
+%GJ        Gauss-Jordan elimination to solve Ax = b.
+%          x = GJ(A, b, PIV) solves Ax = b by Gauss-Jordan elimination,
+%          where A is a square, nonsingular matrix.
+%          PIV determines the form of pivoting:
+%              PIV = 0:           no pivoting,
+%              PIV = 1 (default): partial pivoting.
+
+n = max(size(A));
+if nargin < 3, piv = 1; end
+
+for k=1:n
+    if piv
+       % Partial pivoting (below the diagonal).
+       [colmax, i] = max( abs(A(k:n, k)) );
+       i = k+i-1;
+       if i ~= k
+          A( [k, i], : ) = A( [i, k], : );
+          b( [k, i] ) = b( [i, k] );
+       end
+    end
+
+    irange = [1:k-1 k+1:n];
+    jrange = k:n;
+    mult = A(irange,k)/A(k,k); % Multipliers.
+    A(irange, jrange) =  A(irange, jrange) - mult*A(k, jrange);
+    b(irange) =  b(irange) - mult*b(k);
+
+end
+
+x = diag(diag(A))\b;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/grcar.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,18 @@
+function G = grcar(n, k)
+%GRCAR     Grcar matrix - a Toeplitz matrix with sensitive eigenvalues.
+%          GRCAR(N, K) is an N-by-N matrix with -1s on the
+%          subdiagonal, 1s on the diagonal, and K superdiagonals of 1s.
+%          The default is K = 3.  The eigenvalues of this matrix form an
+%          interesting pattern in the complex plane (try PS(GRCAR(32))).
+
+%          References:
+%          J.F. Grcar, Operator coefficient methods for linear equations,
+%               Report SAND89-8691, Sandia National Laboratories, Albuquerque,
+%               New Mexico, 1989 (Appendix 2).
+%          N.M. Nachtigal, L. Reichel and L.N. Trefethen, A hybrid GMRES
+%               algorithm for nonsymmetric linear systems, SIAM J. Matrix Anal.
+%               Appl., 13 (1992), pp. 796-825.
+
+if nargin == 1, k = 3; end
+
+G = tril(triu(ones(n)), k) - diag(ones(n-1,1), -1);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/hadamard.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,41 @@
+function H = hadamard(n)
+%HADAMARD  Hadamard matrix.
+%          HADAMARD(N) is a Hadamard matrix of order N, that is,
+%          a matrix H with elements 1 or -1 such that H*H' = N*EYE(N).
+%          An N-by-N Hadamard matrix with N>2 exists only if REM(N,4) = 0.
+%          This function handles only the cases where N, N/12 or N/20
+%          is a power of 2.
+
+%          Reference:
+%          S.W. Golomb and L.D. Baumert, The search for Hadamard matrices,
+%          Amer. Math. Monthly, 70 (1963) pp. 12-17.
+
+%          History:
+%          NJH (11/14/91), revised by CBM, 6/24/92,
+%          comment lines revised by NJH, August 1993.
+
+[f,e] = log2([n n/12 n/20]);
+k = find(f==1/2 & e>0);
+if isempty(k)
+   error(['N, N/12 or N/20 must be a power of 2.']);
+end
+e = e(k)-1;
+
+if k == 1        % N = 1 * 2^e;
+   H = [1];
+
+elseif k == 2    % N = 12 * 2^e;
+   H = [ones(1,12); ones(11,1) ...
+        toeplitz([-1 -1 1 -1 -1 -1 1 1 1 -1 1],[-1 1 -1 1 1 1 -1 -1 -1 1 -1])];
+
+elseif k == 3    % N = 20 * 2^e;
+   H = [ones(1,20); ones(19,1)   ...
+        hankel([-1 -1 1 1 -1 -1 -1 -1 1 -1 1 -1 1 1 1 1 -1 -1 1], ...
+               [1 -1 -1 1 1 -1 -1 -1 -1 1 -1 1 -1 1 1 1 1 -1 -1])];
+end
+
+%  Kronecker product construction.
+for i = 1:e
+    H = [H  H
+         H -H];
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/hanowa.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,22 @@
+function A = hanowa(n, d)
+%HANOWA  A matrix whose eigenvalues lie on a vertical line in the complex plane.
+%        HANOWA(N, d) is the N-by-N block 2x2 matrix (thus N = 2M must be even)
+%                      [d*EYE(M)   -DIAG(1:M)
+%                       DIAG(1:M)   d*EYE(M)]
+%        It has complex eigenvalues lambda(k) = d +/- k*i  (1 <= k <= M).
+%        Parameter d defaults to -1.
+
+%        Reference:
+%        E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
+%        Differential Equations I: Nonstiff Problems, Springer-Verlag,
+%        Berlin, 1987. (pp. 86-87)
+
+if nargin == 1, d = -1; end
+
+m = n/2;
+if round(m) ~= m
+   error('N must be even.')
+end
+
+A = [ d*eye(m) -diag(1:m)
+      diag(1:m)  d*eye(m)];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/hilb.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,27 @@
+function H = hilb(n)
+%HILB   Hilbert matrix.
+%       HILB(N) is the N-by-N matrix with elements 1/(i+j-1).
+%       It is a famous example of a badly conditioned matrix.
+%       COND(HILB(N)) grows like EXP(3.5*N).
+%       See INVHILB (standard MATLAB routine) for the exact inverse, which
+%       has integer entries.
+%       HILB(N) is symmetric positive definite, totally positive, and a
+%       Hankel matrix.
+
+%       References:
+%       M.-D. Choi, Tricks or treats with the Hilbert matrix, Amer. Math.
+%           Monthly, 90 (1983), pp. 301-312.
+%       N.J. Higham, Accuracy and Stability of Numerical Algorithms,
+%           Society for Industrial and Applied Mathematics, Philadelphia, PA,
+%           USA, 1996; sec. 26.1.
+%       M. Newman and J. Todd, The evaluation of matrix inversion
+%           programs, J. Soc. Indust. Appl. Math., 6 (1958), pp. 466-476.
+%       D.E. Knuth, The Art of Computer Programming,
+%           Volume 1, Fundamental Algorithms, second edition, Addison-Wesley,
+%           Reading, Massachusetts, 1973, p. 37.
+
+if n == 1
+   H = 1;
+else
+    H = cauchy( (1:n) - .5);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/house.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,36 @@
+function [v, beta] = house(x)
+%HOUSE   Householder matrix.
+%        If [v, beta] = HOUSE(x) then H = EYE - beta*v*v' is a Householder
+%        matrix such that Hx = -sign(x(1))*norm(x)*e_1.
+%        NB: If x = 0 then v = 0, beta = 1 is returned.
+%            x can be real or complex.
+%            sign(x) := exp(i*arg(x)) ( = x./abs(x) when x ~= 0).
+
+%        Theory: (textbook references Golub & Van Loan 1989, 38-43;
+%                 Stewart 1973, 231-234, 262; Wilkinson 1965, 48-50).
+%        Hx = y: (I - beta*v*v')x = -s*e_1.
+%        Must have |s| = norm(x), v = x+s*e_1, and
+%        x'y = x'Hx =(x'Hx)' real => arg(s) = arg(x(1)).
+%        So take s = sign(x(1))*norm(x) (which avoids cancellation).
+%        v'v = (x(1)+s)^2 + x(2)^2 + ... + x(n)^2
+%            = 2*norm(x)*(norm(x) + |x(1)|).
+%
+%        References:
+%        G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
+%           Johns Hopkins University Press, Baltimore, Maryland, 1989.
+%        G.W. Stewart, Introduction to Matrix Computations, Academic Press,
+%           New York, 1973,
+%        J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
+%           Press, 1965.
+
+[m, n] = size(x);
+if n > 1, error('Argument must be a column vector.'), end
+
+s = norm(x) * (sign(x(1)) + (x(1)==0));    % Modification for sign(0)=1.
+v = x;
+if s == 0, beta = 1; return, end           % Quit if x is the zero vector.
+v(1) = v(1) + s;
+beta = 1/(s'*v(1));                        % NB the conjugated s.
+
+% beta = 1/(abs(s)*(abs(s)+abs(x(1)) would guarantee beta real.
+% But beta as above can be non-real (due to rounding) only when x is complex.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/invhess.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,39 @@
+function A = invhess(x, y)
+%INVHESS  Inverse of an upper Hessenberg matrix.
+%         INVHESS(X, Y), where X is an N-vector and Y an N-1 vector,
+%         is the matrix whose lower triangle agrees with that of
+%         ONES(N,1)*X' and whose strict upper triangle agrees with
+%         that of [1 Y]*ONES(1,N).
+%         The matrix is nonsingular if X(1) ~= 0 and X(i+1) ~= Y(i)
+%         for all i, and its inverse is an upper Hessenberg matrix.
+%         If Y is omitted it defaults to -X(1:N-1).
+%         Special case: if X is a scalar INVHESS(X) is the same as
+%         INVHESS(1:X).
+
+%         References:
+%         F.N. Valvi and V.S. Geroyannis, Analytic inverses and
+%             determinants for a class of matrices, IMA Journal of Numerical
+%             Analysis, 7 (1987), pp. 123-128.
+%         W.-L. Cao and W.J. Stewart, A note on inverses of Hessenberg-like
+%             matrices, Linear Algebra and Appl., 76 (1986), pp. 233-240.
+%         Y. Ikebe, On inverses of Hessenberg matrices, Linear Algebra and
+%             Appl., 24 (1979), pp. 93-97.
+%         P. Rozsa, On the inverse of band matrices, Integral Equations and
+%             Operator Theory, 10 (1987), pp. 82-95.
+
+n = max(size(x));
+%  Handle scalar x.
+if n == 1
+   n = x;
+   x = 1:n;
+end
+x = x(:);
+
+if nargin < 2, y = -x; end
+y = y(:);
+
+% On next line, z = x'; A = z(ones(n,1),:) would be more efficient.
+A = ones(n,1)*x';  
+for j=2:n
+    A(1:j-1,j) = y(1:j-1);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/invol.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function A = invol(n)
+%INVOL   An involutory matrix.
+%        A = INVOL(N) is an N-by-N involutory (A*A = EYE(N)) and
+%        ill-conditioned matrix.
+%        It is a diagonally scaled version of HILB(N).
+%        NB: B = (EYE(N)-A)/2 and B = (EYE(N)+A)/2 are idempotent (B*B = B).
+
+%        Reference:
+%        A.S. Householder and J.A. Carpenter, The singular values
+%        of involutory and of idempotent matrices, Numer. Math. 5 (1963),
+%        pp. 234-237.
+
+A = hilb(n);
+
+d = -n;
+A(:, 1) = d*A(:, 1);
+
+for i = 1:n-1
+    d = -(n+i)*(n-i)*d/(i*i);
+    A(i+1, :) = d*A(i+1, :);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/ipjfact.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,42 @@
+function [A, detA] = ipjfact(n, k)
+%IPJFACT   A Hankel matrix with factorial elements.
+%          A = IPJFACT(N, K) is the matrix with
+%                    A(i,j) = (i+j)!    (K = 0, default)
+%                    A(i,j) = 1/(i+j)!  (K = 1)
+%          Both are Hankel matrices.
+%          The determinant and inverse are known explicitly.
+%          If a second output argument is present, d = DET(A) is returned:
+%          [A, d] = IPJFACT(N, K);
+
+%          Suggested by P. R. Graves-Morris.
+%
+%          Reference:
+%          M.J.C. Gover, The explicit inverse of factorial Hankel matrices,
+%          Dept. of Mathematics, University of Bradford, 1993.
+
+if nargin == 1, k = 0; end
+
+c = cumprod(2:n+1);
+d = cumprod(n+1:2*n) * c(n-1);
+
+A = hankel(c, d);
+
+if k == 1
+   A = ones(n)./A;
+end
+
+if nargout == 2
+   d = 1;
+   if k == 0
+      for i=1:n-1
+          d = d*prod(1:i+1)*prod(1:n-i);
+      end
+      d = d*prod(1:n+1);
+   else
+      for i=0:n-1
+          d = d*prod(1:i)/prod(1:n+1+i);
+      end
+      if rem(n*(n-1)/2,2), d = -d; end
+   end
+   detA = d;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/jordbloc.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,8 @@
+function J = jordbloc(n, lambda)
+%JORDBLOC  Jordan block.
+%          JORDBLOC(N, LAMBDA) is the N-by-N Jordan block with eigenvalue
+%          LAMBDA.  LAMBDA = 1 is the default.
+
+if nargin == 1, lambda = 1; end
+
+J = lambda*eye(n) + diag(ones(n-1,1),1);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/kahan.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,43 @@
+function U = kahan(n, theta, pert)
+%KAHAN  Kahan matrix - upper trapezoidal.
+%       KAHAN(N, THETA) is an upper trapezoidal matrix
+%       that has some interesting properties regarding estimation of
+%       condition and rank.
+%       The matrix is N-by-N unless N is a 2-vector, in which case it
+%       is N(1)-by-N(2).
+%       The parameter THETA defaults to 1.2.
+%       The useful range of THETA is 0 < THETA < PI.
+%
+%       To ensure that the QR factorization with column pivoting does not
+%       interchange columns in the presence of rounding errors, the diagonal
+%       is perturbed by PERT*EPS*diag( [N:-1:1] ).
+%       The default is PERT = 25, which ensures no interchanges for KAHAN(N)
+%       up to at least N = 90 in IEEE arithmetic.
+%       KAHAN(N, THETA, PERT) uses the given value of PERT.
+
+%       The inverse of KAHAN(N, THETA) is known explicitly: see
+%       Higham (1987, p. 588), for example.
+%       The diagonal perturbation was suggested by Christian Bischof.
+%
+%       References:
+%       W. Kahan, Numerical linear algebra, Canadian Math. Bulletin,
+%          9 (1966), pp. 757-801.
+%       N.J. Higham, A survey of condition number estimation for
+%          triangular matrices, SIAM Review, 29 (1987), pp. 575-596.
+
+if nargin < 3, pert = 25; end
+if nargin < 2, theta = 1.2; end
+
+r = n(1);              % Parameter n specifies dimension: r-by-n.
+n = n(max(size(n)));
+
+s = sin(theta);
+c = cos(theta);
+
+U = eye(n) - c*triu(ones(n), 1);
+U = diag(s.^[0:n-1])*U + pert*eps*diag( [n:-1:1] );
+if r > n
+   U(r,n) = 0;         % Extend to an r-by-n matrix.
+else
+   U = U(1:r,:);       % Reduce to an r-by-n matrix.
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/kms.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,27 @@
+function A = kms(n, rho)
+%KMS   Kac-Murdock-Szego Toeplitz matrix.
+%      A = KMS(N, RHO) is the N-by-N Kac-Murdock-Szego Toeplitz matrix with
+%      A(i,j) = RHO^(ABS((i-j))) (for real RHO).
+%      If RHO is complex, then the same formula holds except that elements
+%      below the diagonal are conjugated.
+%      RHO defaults to 0.5.
+%      Properties:
+%         A has an LDL' factorization with
+%                  L = INV(TRIW(N,-RHO,1)'),
+%                  D(i,i) = (1-ABS(RHO)^2)*EYE(N) except D(1,1) = 1.
+%         A is positive definite if and only if 0 < ABS(RHO) < 1.
+%         INV(A) is tridiagonal.
+
+%       Reference:
+%       W.F. Trench, Numerical solution of the eigenvalue problem
+%       for Hermitian Toeplitz matrices, SIAM J. Matrix Analysis and Appl.,
+%       10 (1989), pp. 135-146 (and see the references therein).
+
+if nargin < 2, rho = 0.5; end
+
+A = (1:n)'*ones(1,n);
+A = abs(A - A');
+A = rho .^ A;
+if imag(rho)
+   A = conj(tril(A,-1)) + triu(A);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/krylov.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,28 @@
+function B = krylov(A, x, j)
+%KRYLOV    Krylov matrix.
+%          KRYLOV(A, x, j) is the Krylov matrix
+%               [x, Ax, A^2x, ..., A^(j-1)x],
+%          where A is an n-by-n matrix and x is an n-vector.
+%          Defaults: x = ONES(n,1), j = n.
+%          KRYLOV(n) is the same as KRYLOV(RANDN(n)).
+
+%          Reference:
+%          G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
+%          Johns Hopkins University Press, Baltimore, Maryland, 1989, p. 369.
+
+[n, n] = size(A);
+
+if n == 1   % Handle special case A = scalar.
+   n = A;
+   A = randn(n);
+end
+
+if nargin < 3, j = n; end
+if nargin < 2, x = ones(n,1); end
+
+
+B = ones(n,j);
+B(:,1) = x(:);
+for i=2:j
+    B(:,i) = A*B(:,i-1);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/lauchli.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,14 @@
+function A = lauchli(n, mu)
+%LAUCHLI   Lauchli matrix - rectangular.
+%          LAUCHLI(N, MU) is the (N+1)-by-N matrix [ONES(1,N); MU*EYE(N))].
+%          It is a well-known example in least squares and other problems
+%          that indicates the dangers of forming A'*A.
+%          MU defaults to SQRT(EPS).
+
+%          Reference:
+%          P. Lauchli, Jordan-Elimination und Ausgleichung nach
+%          kleinsten Quadraten, Numer. Math, 3 (1961), pp. 226-240.
+
+if nargin < 2, mu = sqrt(eps); end
+A = [ones(1,n);
+     mu*eye(n)];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/lehmer.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,19 @@
+function A = lehmer(n)
+%LEHMER  Lehmer matrix - symmetric positive definite.
+%        A = LEHMER(N) is the symmetric positive definite N-by-N matrix with
+%                         A(i,j) = i/j for j >= i.
+%        A is totally nonnegative.  INV(A) is tridiagonal, and explicit
+%        formulas are known for its entries.
+%        N <= COND(A) <= 4*N*N.
+
+%        References:
+%        M. Newman and J. Todd, The evaluation of matrix inversion
+%           programs, J. Soc. Indust. Appl. Math., 6 (1958), pp. 466-476.
+%        Solutions to problem E710 (proposed by D.H. Lehmer): The inverse
+%           of a matrix, Amer. Math. Monthly, 53 (1946), pp. 534-535.
+%        J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
+%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 154.
+
+A = ones(n,1)*(1:n);
+A = A./A';
+A = tril(A) + tril(A,-1)';
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/lesp.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,22 @@
+function T = lesp(n)
+%LESP   A tridiagonal matrix with real, sensitive eigenvalues.
+%       LESP(N) is an N-by-N matrix whose eigenvalues are real and smoothly
+%       distributed in the interval approximately [-2*N-3.5, -4.5].
+%       The sensitivities of the eigenvalues increase exponentially as
+%       the eigenvalues grow more negative.
+%       The matrix is similar to the symmetric tridiagonal matrix with
+%       the same diagonal entries and with off-diagonal entries 1,
+%       via a similarity transformation with D = diag(1!,2!,...,N!).
+
+%       References:
+%       H.W.J. Lenferink and M.N. Spijker, On the use of stability regions in
+%            the numerical analysis of initial value problems,
+%            Math. Comp., 57 (1991), pp. 221-237.
+%       L.N. Trefethen, Pseudospectra of matrices, in Numerical Analysis 1991,
+%            Proceedings of the 14th Dundee Conference,
+%            D.F. Griffiths and G.A. Watson, eds, Pitman Research Notes in
+%            Mathematics, volume 260, Longman Scientific and Technical, Essex,
+%            UK, 1992, pp. 234-266.
+
+x = 2:n;
+T = full(tridiag( ones(size(x))./x, -(2*[x n+1]+1), x));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/lotkin.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,12 @@
+function A = lotkin(n)
+%LOTKIN  Lotkin matrix.
+%        A = LOTKIN(N) is the Hilbert matrix with its first row altered to
+%        all ones.  A is unsymmetric, ill-conditioned, and has many negative
+%        eigenvalues of small magnitude.
+%        The inverse has integer entries and is known explicitly.
+
+%        Reference:
+%        M. Lotkin, A set of test matrices, MTAC, 9 (1955), pp. 153-161.
+
+A = hilb(n);
+A(1,:) = ones(1,n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/makejcf.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,39 @@
+function A = makejcf(n, e, m, X)
+%MAKEJCF   A matrix with given Jordan canonical form.
+%          MAKEJCF(N, E, M) is a matrix having the Jordan canonical form
+%          whose i'th Jordan block is of dimension M(i) with eigenvalue E(i),
+%          and where N = SUM(M).
+%          Defaults: E = 1:N, M = ONES(SIZE(E)) with M(1) so that SUM(M) = N.
+%          The matrix is constructed by applying a random similarity
+%          transformation to the Jordan form.
+%          Alternatively, the matrix used in the similarity transformation
+%          can be specified as a fifth parameter.
+%          In particular, MAKEJCF(N, E, M, EYE(N)) returns the Jordan form
+%          itself.
+%          NB: The JCF is very sensitive to rounding errors.
+
+if nargin < 2, e = 1:n; end
+if nargin < 3, m = ones(size(e)); m(1) = m(1) + n - sum(m); end
+
+if any( size(e(:)) ~= size(m(:)) )
+   error('Parameters E and M must be of same dimension.')
+end
+
+if sum(m) ~= n, error('Block dimensions must add up to N.'), end
+
+A = zeros(n);
+j = 1;
+for i=1:max(size(m))
+    if m(i) > 1
+        Jb = jordbloc(m(i),e(i));
+    else
+        Jb = e(i);  % JORDBLOC fails in n = 1 case.
+    end
+    A(j:j+m(i)-1,j:j+m(i)-1) = Jb;
+    j = j + m(i);
+end
+
+if nargin < 4
+   X = randn(n);
+end
+A = X\A*X;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/matrix.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,99 @@
+function A = matrix(k, n)
+%MATRIX    Test Matrix Toolbox information and matrix access by number.
+%          MATRIX(K, N) is the N-by-N instance of the matrix number K in
+%          the Test Matrix Toolbox (including some of the matrices provided
+%          with MATLAB), with all other parameters set to their default.
+%          N.B. Only those matrices which take an arbitrary dimension N
+%               are included (thus GALLERY is omitted, for example).
+%          MATRIX(K) is a string containing the name of the K'th matrix.
+%          MATRIX(0) is the number of matrices, i.e. the upper limit for K.
+%          Thus to set A to each N-by-N test matrix in turn use a loop like
+%                for k=1:matrix(0)
+%                    A = matrix(k, N);
+%                    Aname = matrix(k);   % The name of the matrix
+%                end
+%          MATRIX(-1) returns the version number and date of the toolbox.
+%          MATRIX with no arguments lists the names of the M-files in the
+%          collection.
+
+%          References:
+%          N.J. Higham. The Test Matrix Toolbox for Matlab (version 3.0),
+%             Numerical Analysis Report No. 276, Manchester Centre for
+%             Computational Mathematics, Manchester, England, September 1995.
+%          N.J. Higham, Algorithm 694: A collection of test matrices in
+%             MATLAB, ACM Trans. Math. Soft., 17 (1991), pp. 289-305.
+%
+%          Matrices omitted are: gallery, hadamard, hanowa, lauchli,
+%          neumann, wathen, wilk.
+%          Matrices provided with MATLAB that are included here: invhilb,
+%          magic.
+
+% Set up string array a few lines at a time to avoid `input buffer line
+% overflow'.
+
+matrices = '';
+
+matrices = [matrices
+'augment '; 'cauchy  '; 'chebspec'; 'chebvand';
+'chow    '; 'circul  '; 'clement '; 'compan  ';
+'condex  '; 'cycol   '; 'dingdong'; 'dorr    ';
+'dramadah'; 'fiedler '; 'forsythe'; 'frank   ';];
+
+matrices = [matrices
+'gearm   '; 'gfpp    '; 'grcar   '; 'hilb    ';
+'invhess '; 'invol   '; 'ipjfact '; 'jordbloc';
+'kahan   '; 'kms     '; 'krylov  '; 'lehmer  ';
+'lesp    '; 'lotkin  '; 'makejcf '; 'minij   ';];
+
+matrices = [matrices
+'moler   '; 'ohess   '; 'orthog  '; 'parter  ';
+'pascal  '; 'pdtoep  '; 'pei     '; 'pentoep ';
+'prolate '; 'rando   '; 'randsvd ';
+'redheff '; 'riemann '; 'rschur  '; 'smoke   ';
+'tridiag '; 'triw    '; 'vand    ';];
+
+if nargin == 0
+
+fprintf('Test matrices:                                                    \n')
+fprintf('                                                                  \n')
+fprintf('augment  cycol    gfpp     kahan   moler    poisson  triw         \n')
+fprintf('cauchy   dingdong grcar    kms     neumann  prolate  vand         \n')
+fprintf('chebspec dorr     hadamard krylov  ohess    rando    wathen       \n')
+fprintf('chebvand dramadah hanowa   lauchli orthog   randsvd  wilk         \n')
+fprintf('chow     fiedler  hilb     lehmer  parter   redheff               \n')
+fprintf('circul   forsythe invhess  lesp    pascal   riemann               \n')
+fprintf('clement  frank    invol    lotkin  pdtoep   rschur                \n')
+fprintf('compan   gallery  ipjfact  makejcf pei      smoke                 \n')
+fprintf('condex   gearm    jordbloc minij   pentoep  tridiag               \n')
+fprintf('                                                                  \n')
+fprintf('Visualization:   Decompositions:     Miscellaneous:              \n')
+fprintf('                                                                 \n')
+fprintf('fv               cgs      gj         bandred  matrix   show    \n')
+fprintf('gersh            cholp    mgs        chop     matsignt skewpart\n')
+fprintf('ps               cod      poldec     comp     pnorm    sparsify\n')
+fprintf('pscont           diagpiv  signm      cond     qmult    sub     \n')
+fprintf('see              ge                  cpltaxes rq       symmpart\n')
+fprintf('                 gecp                dual     seqa     trap2tri\n')
+fprintf('                                     eigsens  seqcheb          \n')
+fprintf('                                     house    seqm             \n')
+fprintf('                                     eigsens  seqcheb          \n\n')
+fprintf('Direct search optimization: adsmax, mdsmax, nmsmax             \n')
+fprintf('Demonstration: tmtdemo                                         \n')
+
+elseif nargin == 1
+   if k == 0
+      [A, temp] = size(matrices);
+   elseif k > 0
+      A = matrices(k,:);
+   else
+      % Version number and date of collection.
+%     A = 'Version 1.0, July 4 1989';
+%     A = 'Version 1.1, November 15 1989';
+%     A = 'Version 1.2, May 30 1990';
+%     A = 'Version 1.3, November 14 1991';
+%     A = 'Version 2.0, November 14 1993';
+      A = 'Version 3.0, September 19 1995';
+   end
+else
+   A = eval( [matrices(k,:) '(n)'] );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/matsignt.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,41 @@
+function S = matsignt(T)
+%MATSIGNT    Matrix sign function of a triangular matrix.
+%            S = MATSIGN(T) computes the matrix sign function S of the
+%            upper triangular matrix T using a recurrence.
+
+%            Adapted from FUNM.  Called by SIGNM.
+
+if norm(tril(T,-1),1), error('Matrix must be upper triangular.'), end
+
+n = max(size(T));
+
+S = diag( sign( diag(real(T)) ) );
+tol = 0;
+for p = 1:n-1
+   for i = 1:n-p
+
+      j = i+p;
+      d = T(j,j) - T(i,i);
+
+      if S(i,i) ~= -S(j,j)  % Solve via S^2 = I if we can.
+
+         % Get S(i,j) from S^2 = I.
+         k = i+1:j-1;
+         RHS = 0;
+         if k, RHS = RHS - S(i,k)*S(k,j); end
+         S(i,j) = RHS  / (S(i,i)+S(j,j));
+
+      else
+
+         % Get S(i,j) from S*T = T*S.
+         s = T(i,j)*(S(j,j)-S(i,i));
+         if p > 1
+            k = i+1:j-1;
+            s = s + T(i,k)*S(k,j) - S(i,k)*T(k,j);
+         end
+         S(i,j) = s/d;
+
+      end
+
+   end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/minij.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,25 @@
+function A = minij(n)
+%MINIJ   Symmetric positive definite matrix MIN(i,j).
+%        A = MINIJ(N) is the N-by-N symmetric positive definite matrix with
+%        A(i,j) = MIN(i,j).
+%        Properties, variations:
+%        A has eigenvalues .25*sec^2(r*PI/(2*N+1)), r=1:N, and the eigenvectors
+%                    are also known explicitly.
+%        INV(A) is tridiagonal: it is minus the second difference matrix
+%                    except its (N,N) element is 1.
+%        2*A-ONES(N) (Givens' matrix) has tridiagonal inverse and
+%                    eigenvalues .5*sec^2((2r-1)PI/4N), r=1:N.
+%        (N+1)*ONES(N)-A also has a tridiagonal inverse.
+%        FLIPUD(TRIW(N,1)) is a square root of A.
+
+%        References:
+%        J. Fortiana and C. M. Cuadras, A family of matrices, the discretized
+%           Brownian bridge, and distance-based regression, Linear Algebra
+%           Appl., 264 (1997), 173-188.  (For the eigensystem of A.)
+%        J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
+%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 158.
+%        D.E. Rutherford, Some continuant determinants arising in physics and
+%           chemistry---II, Proc. Royal Soc. Edin., 63, A (1952), pp. 232-241.
+%           (For the eigenvalues of Givens' matrix.)
+
+A = min( ones(n,1)*(1:n), (1:n)'*ones(1,n) );
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/moler.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function A = moler(n, alpha)
+%MOLER   Moler matrix - symmetric positive definite.
+%        A = MOLER(N, ALPHA) is the symmetric positive definite N-by-N matrix
+%        U'*U where U = TRIW(N, ALPHA).
+%        For ALPHA = -1 (the default) A(i,j) = MIN(i,j)-2, A(i,i) = i.
+%        A has one small eigenvalue.
+
+%        Nash (1990) attributes the ALPHA = -1 matrix to Moler.
+%
+%        Reference:
+%        J.C. Nash, Compact Numerical Methods for Computers: Linear
+%        Algebra and Function Minimisation, second edition, Adam Hilger,
+%        Bristol, 1990 (Appendix 1).
+
+if nargin == 1, alpha = -1; end
+
+A = triw(n, alpha)'*triw(n, alpha);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/neumann.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,24 @@
+function [A, T] = neumann(n)
+%NEUMANN  Singular matrix from the discrete Neumann problem (sparse).
+%         NEUMANN(N) is the singular, row diagonally dominant matrix resulting
+%         from discretizing the Neumann problem with the usual five point
+%         operator on a regular mesh.
+%         It has a one-dimensional null space with null vector ONES(N,1).
+%         The dimension N should be a perfect square, or else a 2-vector,
+%         in which case the dimension of the matrix is N(1)*N(2).
+
+%         Reference:
+%         R.J. Plemmons, Regular splittings and the discrete Neumann
+%         problem, Numer. Math., 25 (1976), pp. 153-161.
+
+if max(size(n)) == 1
+   m = sqrt(n);
+   if m^2 ~= n, error('N must be a perfect square.'), end
+   n(1) = m; n(2) = m;
+end
+
+T = tridiag(n(1), -1, 2, -1);
+T(1,2) = -2;
+T(n(1),n(1)-1) = -2;
+
+A = kron(T, eye(n(2))) + kron(eye(n(2)), T);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/ohess.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,43 @@
+function H = ohess(x)
+%OHESS  Random, orthogonal upper Hessenberg matrix.
+%       H = OHESS(N) is an N-by-N real, random, orthogonal
+%       upper Hessenberg matrix.
+%       Alternatively, H = OHESS(X), where X is an arbitrary real
+%       N-vector (N > 1) constructs H non-randomly using the elements
+%       of X as parameters.
+%       In both cases H is constructed via a product of N-1 Givens rotations.
+
+%       Note: See Gragg (1986) for how to represent an N-by-N (complex)
+%       unitary Hessenberg matrix with positive subdiagonal elements in terms
+%       of 2N-1 real parameters (the Schur parametrization).
+%       This M-file handles the real case only and is intended simply as a
+%       convenient way to generate random or non-random orthogonal Hessenberg
+%       matrices.
+%
+%       Reference:
+%       W.B. Gragg, The QR algorithm for unitary Hessenberg matrices,
+%       J. Comp. Appl. Math., 16 (1986), pp. 1-8.
+
+if any(imag(x)), error('Parameter must be real.'), end
+
+n = max(size(x));
+
+if n == 1
+%  Handle scalar x.
+   n = x;
+   x = rand(n-1,1)*2*pi;
+   H = eye(n);
+   H(n,n) = sign(randn);
+else
+   H = eye(n);
+   H(n,n) = sign(x(n)) + (x(n)==0);   % Second term ensures H(n,n) nonzero.
+end
+
+for i=n:-1:2
+    % Apply Givens rotation through angle x(i-1).
+    theta = x(i-1);
+    c = cos(theta);
+    s = sin(theta);
+    H( [i-1 i], : ) = [ c*H(i-1,:)+s*H(i,:)
+                       -s*H(i-1,:)+c*H(i,:) ];
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/orthog.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,80 @@
+function Q = orthog(n, k)
+%ORTHOG Orthogonal and nearly orthogonal matrices.
+%       Q = ORTHOG(N, K) selects the K'th type of matrix of order N.
+%       K > 0 for exactly orthogonal matrices, K < 0 for diagonal scalings of
+%       orthogonal matrices.
+%       Available types: (K = 1 is the default)
+%       K = 1:  Q(i,j) = SQRT(2/(n+1)) * SIN( i*j*PI/(n+1) )
+%               Symmetric eigenvector matrix for second difference matrix.
+%       K = 2:  Q(i,j) = 2/SQRT(2*n+1)) * SIN( 2*i*j*PI/(2*n+1) )
+%               Symmetric.
+%       K = 3:  Q(r,s) = EXP(2*PI*i*(r-1)*(s-1)/n) / SQRT(n)  (i=SQRT(-1))
+%               Unitary, the Fourier matrix.  Q^4 is the identity.
+%               This is essentially the same matrix as FFT(EYE(N))/SQRT(N)!
+%       K = 4:  Helmert matrix: a permutation of a lower Hessenberg matrix,
+%               whose first row is ONES(1:N)/SQRT(N).
+%       K = 5:  Q(i,j) = SIN( 2*PI*(i-1)*(j-1)/n ) + COS( 2*PI*(i-1)*(j-1)/n ).
+%               Symmetric matrix arising in the Hartley transform.
+%       K = -1: Q(i,j) = COS( (i-1)*(j-1)*PI/(n-1) )
+%               Chebyshev Vandermonde-like matrix, based on extrema of T(n-1).
+%       K = -2: Q(i,j) = COS( (i-1)*(j-1/2)*PI/n) )
+%               Chebyshev Vandermonde-like matrix, based on zeros of T(n).
+
+%       References:
+%       N.J. Higham and D.J. Higham, Large growth factors in Gaussian
+%            elimination with pivoting, SIAM J. Matrix Analysis and  Appl.,
+%            10 (1989), pp. 155-164.
+%       P. Morton, On the eigenvectors of Schur's matrix, J. Number Theory,
+%            12 (1980), pp. 122-127. (Re. ORTHOG(N, 3))
+%       H.O. Lancaster, The Helmert Matrices, Amer. Math. Monthly, 72 (1965),
+%            pp. 4-12.
+%       D. Bini and P. Favati, On a matrix algebra related to the discrete
+%            Hartley transform, SIAM J. Matrix Anal. Appl., 14 (1993),
+%            pp. 500-507.
+
+if nargin == 1, k = 1; end
+
+if k == 1
+                                       % E'vectors second difference matrix
+   m = (1:n)'*(1:n) * (pi/(n+1));
+   Q = sin(m) * sqrt(2/(n+1));
+
+elseif k == 2
+
+   m = (1:n)'*(1:n) * (2*pi/(2*n+1));
+   Q = sin(m) * (2/sqrt(2*n+1));
+
+elseif k == 3                          %  Vandermonde based on roots of unity
+
+   m = 0:n-1;
+   Q = exp(m'*m*2*pi*sqrt(-1)/n) / sqrt(n);
+
+elseif k == 4                          %  Helmert matrix
+
+   Q = tril(ones(n));
+   Q(1,2:n) = ones(1,n-1);
+   for i=2:n
+       Q(i,i) = -(i-1);
+   end
+   Q = diag( sqrt( [n 1:n-1] .* [1:n] ) ) \ Q;
+
+elseif k == 5                          %  Hartley matrix
+
+   m = (0:n-1)'*(0:n-1) * (2*pi/n);
+   Q = (cos(m) + sin(m))/sqrt(n);
+
+elseif k == -1
+                                       %  extrema of T(n-1)
+   m = (0:n-1)'*(0:n-1) * (pi/(n-1));
+   Q = cos(m);
+
+elseif k == -2
+                                       % zeros of T(n)
+   m = (0:n-1)'*(.5:n-.5) * (pi/n);
+   Q = cos(m);
+
+else
+
+   error('Illegal value for second parameter.')
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/parter.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,18 @@
+function A = parter(n)
+%PARTER    Parter matrix - a Toeplitz matrix with singular values near PI.
+%          PARTER(N) is the matrix with (i,j) element 1/(i-j+0.5).
+%          It is a Cauchy matrix and a Toeplitz matrix.
+
+%          At the Second SIAM Conference on Linear Algebra, Raleigh, N.C.,
+%          1985, Cleve Moler noted that most of the singular values of
+%          PARTER(N) are very close to PI.  An explanation of the phenomenon
+%          was given by Parter; see also the paper by Tyrtyshnikov.
+%
+%          References:
+%          The MathWorks Newsletter, Volume 1, Issue 1, March 1986, page 2.
+%          S.V. Parter, On the distribution of the singular values of Toeplitz
+%               matrices, Linear Algebra and Appl., 80 (1986), pp. 115-130.
+%          E.E. Tyrtyshnikov, Cauchy-Toeplitz matrices and some applications,
+%               Linear Algebra and Appl., 149 (1991), pp. 1-18.
+
+A = cauchy( (1:n)+0.5, -(1:n) );
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/pascal.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,58 @@
+function P = pascal(n, k)
+%PASCAL  Pascal matrix.
+%        P = PASCAL(N) is the Pascal matrix of order N: a symmetric positive
+%        definite matrix with integer entries taken from Pascal's
+%        triangle.
+%        The Pascal matrix is totally positive and its inverse has
+%        integer entries.  Its eigenvalues occur in reciprocal pairs.
+%        COND(P) is approximately 16^N/(N*PI) for large N.
+%        PASCAL(N,1) is the lower triangular Cholesky factor (up to signs
+%        of columns) of the Pascal matrix.   It is involutary (is its own
+%        inverse).
+%        PASCAL(N,2) is a transposed and permuted version of PASCAL(N,1)
+%        which is a cube root of the identity.
+
+%        References:
+%        R. Brawer and M. Pirovino, The linear algebra of the Pascal matrix,
+%           Linear Algebra and Appl., 174 (1992), pp. 13-23 (this paper
+%           gives a factorization of L = PASCAL(N,1) and a formula for the
+%           elements of L^k).
+%        N.J. Higham, Accuracy and Stability of Numerical Algorithms,
+%           Society for Industrial and Applied Mathematics, Philadelphia, PA,
+%           USA, 1996; sec. 26.4.
+%        S. Karlin, Total Positivity, Volume 1, Stanford University Press,
+%           1968.  (Page 137: shows i+j-1 choose j is TP (i,j=0,1,...).
+%                   PASCAL(N) is a submatrix of this matrix.)
+%        M. Newman and J. Todd, The evaluation of matrix inversion programs,
+%           J. Soc. Indust. Appl. Math., 6(4):466--476, 1958.
+%        H. Rutishauser, On test matrices, Programmation en Mathematiques
+%           Numeriques, Editions Centre Nat. Recherche Sci., Paris, 165,
+%           1966, pp. 349-365.  (Gives an integral formula for the
+%           elements of PASCAL(N).)
+%        J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
+%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 172.
+%        H.W. Turnbull, The Theory of Determinants, Matrices, and Invariants,
+%           Blackie, London and Glasgow, 1929.  (PASCAL(N,2) on page 332.)
+
+if nargin == 1, k = 0; end
+
+P = diag( (-1).^[0:n-1] );
+P(:, 1) = ones(n,1);
+
+%  Generate the Pascal Cholesky factor (up to signs).
+for j=2:n-1
+    for i=j+1:n
+        P(i,j) = P(i-1,j) - P(i-1,j-1);
+    end
+end
+
+if k == 0
+
+   P = P*P';
+
+elseif k == 2
+
+   P = rot90(P,3);
+   if n/2 == round(n/2), P = -P; end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/pdtoep.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,29 @@
+function T = pdtoep(n, m, w, theta)
+%PDTOEP   Symmetric positive definite Toeplitz matrix.
+%         PDTOEP(N, M, W, THETA) is an N-by-N symmetric positive (semi-)
+%         definite (SPD) Toeplitz matrix, comprised of the sum of M rank 2
+%         (or, for certain THETA, rank 1) SPD Toeplitz matrices.
+%         Specifically,
+%                 T = W(1)*T(THETA(1)) + ... + W(M)*T(THETA(M)),
+%         where T(THETA(k)) has (i,j) element COS(2*PI*THETA(k)*(i-j)).
+%         Defaults: M = N, W = RAND(M,1), THETA = RAND(M,1).
+
+%         Reference:
+%         G. Cybenko and C.F. Van Loan, Computing the minimum eigenvalue of
+%         a symmetric positive definite Toeplitz matrix, SIAM J. Sci. Stat.
+%         Comput., 7 (1986), pp. 123-131.
+
+if nargin < 2, m = n; end
+if nargin < 3, w = rand(m,1); end
+if nargin < 4, theta = rand(m,1); end
+
+if max(size(w)) ~= m | max(size(theta)) ~= m
+   error('Arguments W and THETA must be vectors of length M.')
+end
+
+T = zeros(n);
+E = 2*pi*( (1:n)'*ones(1,n) - ones(n,1)*(1:n) );
+
+for i=1:m
+    T = T + w(i) * cos( theta(i)*E );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/pei.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,14 @@
+function P = pei(n, alpha)
+%PEI    Pei matrix.
+%       PEI(N, ALPHA), where ALPHA is a scalar, is the symmetric matrix
+%       ALPHA*EYE(N) + ONES(N).
+%       If ALPHA is omitted then ALPHA = 1 is used.
+%       The matrix is singular for ALPHA = 0, -N.
+
+%       Reference:
+%       M.L. Pei, A test matrix for inversion procedures,
+%       Comm. ACM, 5 (1962), p. 508.
+
+if nargin == 1, alpha = 1; end
+
+P = alpha*eye(n) + ones(n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/pentoep.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,26 @@
+function P = pentoep(n, a, b, c, d, e)
+%PENTOEP   Pentadiagonal Toeplitz matrix (sparse).
+%          P = PENTOEP(N, A, B, C, D, E) is the N-by-N pentadiagonal
+%          Toeplitz matrix with diagonals composed of the numbers
+%          A =: P(3,1), B =: P(2,1), C =: P(1,1), D =: P(1,2), E =: P(1,3).
+%          Default: (A,B,C,D,E) = (1,-10,0,10,1) (a matrix of Rutishauser).
+%                    This matrix has eigenvalues lying approximately on
+%                    the line segment 2*cos(2*t) + 20*i*sin(t).
+%
+%          Interesting plots are
+%          PS(FULL(PENTOEP(32,0,1,0,0,1/4)))  - `triangle'
+%          PS(FULL(PENTOEP(32,0,1/2,0,0,1)))  - `propeller'
+%          PS(FULL(PENTOEP(32,0,1/2,1,1,1)))  - `fish'
+
+%          References:
+%          R.M. Beam and R.F. Warming, The asymptotic spectra of
+%             banded Toeplitz and quasi-Toeplitz matrices, SIAM J. Sci.
+%             Comput. 14 (4), 1993, pp. 971-1006.
+%          H. Rutishauser, On test matrices, Programmation en Mathematiques
+%             Numeriques, Editions Centre Nat. Recherche Sci., Paris, 165,
+%             1966, pp. 349-365.
+
+if nargin == 1, a = 1; b = -10; c = 0; d = 10; e = 1; end
+
+P = spdiags([ a*ones(n,1) b*ones(n,1) c*ones(n,1) d*ones(n,1) ....
+              e*ones(n,1) ], -2:2, n, n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/pnorm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,102 @@
+function [est, x, k] = pnorm(A, p, tol, noprint)
+%PNORM   Estimate of matrix p-norm (1 <= p <= inf).
+%        [EST, x, k] = PNORM(A, p, TOL) estimates the Holder p-norm of a
+%        matrix A, using the p-norm power method with a specially
+%        chosen starting vector.
+%        TOL is a relative convergence tolerance (default 1E-4).
+%        Returned are the norm estimate EST (which is a lower bound for the
+%        exact p-norm), the corresponding approximate maximizing vector x,
+%        and the number of power method iterations k.
+%        A nonzero fourth argument causes trace output to the screen.
+%        If A is a vector, this routine simply returns NORM(A, p).
+%
+%        See also NORM, NORMEST.
+
+%        Note: The estimate is exact for p = 1, but is not always exact for
+%        p = 2 or p = inf.  Code could be added to treat p = 2 and p = inf
+%        separately.
+%
+%        Calls DUAL and SEQA.
+%
+%        Reference:
+%        N.J. Higham, Estimating the matrix p-norm,
+%        Numer. Math., 62 (1992), pp. 539-555.
+
+if nargin < 2, error('Must specify norm via second parameter.'), end
+[m,n] = size(A);
+if min(m,n) == 1, est = norm(A,p); return, end
+
+if nargin < 4, noprint = 0; end
+if nargin < 3, tol = 1e-4; end
+
+% Stage I.  Use Algorithm OSE to get starting vector x for power method.
+% Form y = B*x, at each stage choosing x(k) = c and scaling previous
+% x(k+1:n) by s, where norm([c s],p)=1.
+
+sm = 9;  % Number of samples.
+y = zeros(m,1); x = zeros(n,1);
+
+for k=1:n
+
+    if k == 1
+       c = 1; s = 0;
+    else
+       W = [A(:,k) y];
+
+       if p == 2   % Special case.  Solve exactly for 2-norm.   
+          [U,S,V] = svd(full(W));
+          c = V(1,1); s = V(2,1);
+
+       else
+
+          fopt = 0;
+          for th=seqa(0,pi,sm)
+              c1 = cos(th); s1 = sin(th);
+              nrm = norm([c1 s1],p);
+              c1 = c1/nrm; s1 = s1/nrm;   % [c1 s1] has unit p-norm.
+              f = norm( W*[c1 s1]', p );
+              if f > fopt
+                 fopt = f;
+                 c = c1; s = s1;
+              end
+          end
+
+       end
+    end
+
+    x(k) = c;
+    y = x(k)*A(:,k) + s*y;
+    if k > 1, x(1:k-1) = s*x(1:k-1); end
+
+end
+
+est = norm(y,p);
+if noprint, fprintf('Alg OSE: %9.4e\n', est), end
+
+% Stage II.  Apply Algorithm PM (the power method).
+
+q = dual(p);
+k = 1;
+
+while 1
+
+    y = A*x;
+    est_old = est;
+    est = norm(y,p);
+
+    z = A' * dual(y,p);
+
+    if noprint
+        fprintf('%2.0f: norm(y) = %9.4e,  norm(z) = %9.4e', ...
+                 k, norm(y,p), norm(z,q))
+        fprintf('  rel_incr(est) = %9.4e\n', (est-est_old)/est)
+    end
+
+    if ( norm(z,q) <= z'*x | abs(est-est_old)/est <= tol ) & k > 1
+       return
+    end
+
+    x = dual(z,q);
+    k = k + 1;
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/poisson.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,14 @@
+function A = poisson(n)
+%POISSON   Block tridiagonal matrix from Poisson's equation (sparse).
+%          POISSON(N) is the block tridiagonal matrix of order N^2
+%          resulting from discretizing Poisson's equation with the
+%          5-point operator on an N-by-N mesh.
+
+%          Reference:
+%          G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
+%          Johns Hopkins University Press, Baltimore, Maryland, 1989
+%          (Section 4.5.4).
+
+S = tridiag(n,-1,2,-1);
+I = speye(n);
+A = kron(I,S) + kron(S,I);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/poldec.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,28 @@
+function [U, H] = poldec(A)
+%POLDEC   Polar decomposition.
+%         [U, H] = POLDEC(A) computes a matrix U of the same dimension
+%         (m-by-n) as A, and a Hermitian positive semi-definite matrix H,
+%         such that A = U*H.
+%         U has orthonormal columns if m >= n, and orthonormal rows if m <= n.
+%         U and H are computed via an SVD of A.
+%         U is a nearest unitary matrix to A in both the 2-norm and the
+%         Frobenius norm.
+
+%         Reference:
+%         N.J. Higham, Computing the polar decomposition---with applications,
+%         SIAM J. Sci. Stat. Comput., 7(4):1160--1174, 1986.
+%
+%         (The name `polar' is reserved for a graphics routine.)
+
+[m, n] = size(A);
+
+[P, S, Q] = svd(A, 0);  % Economy size.
+if m < n                % Ditto for the m<n case.
+   S = S(:, 1:m);
+   Q = Q(:, 1:m);
+end
+U = P*Q';
+if nargout == 2
+   H = Q*S*Q';
+   H = (H + H')/2;      % Force Hermitian by taking nearest Hermitian matrix.
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/prolate.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function A = prolate(n, w)
+%PROLATE   Prolate matrix - symmetric, ill-conditioned Toeplitz matrix.
+%          A = PROLATE(N, W) is the N-by-N prolate matrix with parameter W.
+%          It is a symmetric Toeplitz matrix.
+%          If 0 < W < 0.5 then
+%             - A is positive definite
+%             - the eigenvalues of A are distinct, lie in (0, 1), and
+%               tend to cluster around 0 and 1.
+%          W defaults to 0.25.
+
+%          Reference:
+%          J.M. Varah. The Prolate matrix. Linear Algebra and Appl.,
+%          187:269--278, 1993.
+
+if nargin == 1, w = 0.25; end
+
+a = zeros(n,1);
+a(1) = 2*w;
+a(2:n) = sin( 2*pi*w*(1:n-1) ) ./ ( pi*(1:n-1) );
+
+A = toeplitz(a);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/ps.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,94 @@
+function y = ps(A, m, tol, rl, marksize)
+%PS     Dot plot of a pseudospectrum.
+%       PS(A, M, TOL, RL) plots an approximation to a pseudospectrum
+%       of the square matrix A, using M random perturbations of size TOL.
+%       M defaults to a SIZE(A)-dependent value and TOL to 1E-3.
+%       RL defines the type of perturbation:
+%         RL =  0 (default): absolute complex perturbations of 2-norm TOL.
+%         RL =  1:           absolute real perturbations of 2-norm TOL.
+%         RL = -1:           componentwise real perturbations of size TOL.
+%       The eigenvalues of A are plotted as crosses `x'.
+%       PS(A, M, TOL, RL, MARKSIZE) uses the specified marker size instead
+%       of a size that depends on the figure size, the matrix order, and M.
+%       If MARKSIZE < 0, the plot is suppressed and the plot data is returned
+%       as an output argument.
+%       PS(A, 0) plots just the eigenvalues of A.
+
+%       For a given TOL, the pseudospectrum of A is the set of
+%       pseudo-eigenvalues of A, that is, the set
+%       { e : e is an eigenvalue of A+E, for some E with NORM(E) <= TOL }.
+%
+%       Reference:
+%       L.N. Trefethen, Pseudospectra of matrices, in D.F. Griffiths and
+%            G.A. Watson, eds, Numerical Analysis 1991, Proceedings of the 14th
+%            Dundee Conference, vol. 260, Pitman Research Notes in Mathematics,
+%            Longman Scientific and Technical, Essex, UK, 1992, pp. 234-266.
+
+if diff(size(A)), error('Matrix must be square.'), end
+n = max(size(A));
+
+if nargin < 5, marksize = 0; end
+if nargin < 4, rl = 0; end
+if nargin < 3, tol = 1e-3; end
+if nargin < 2, m = max(1, round( 25*exp(-0.047*n) )); end
+
+if m == 0
+   e = eig(A);
+   ax = cpltaxes(e);
+   plot(real(e), imag(e), 'xg')
+   axis(ax);
+   axis('square');
+   return
+end
+
+x = zeros(m*n,1);
+i = sqrt(-1);
+
+for j = 1:m
+   if rl == -1     % Componentwise.
+      dA = -ones(n) + 2*rand(n);   % Uniform random numbers on [-1,1].
+      dA = tol * A .* dA;
+   else
+      if rl == 0   % Complex absolute.
+         dA = randn(n) + i*randn(n);
+      else         % Real absolute.
+         dA = randn(n);
+      end
+      dA = tol/norm(dA)*dA;
+   end
+   e = eig(A + dA);
+   x((j-1)*n+1:j*n) = e;
+end
+
+if marksize >= 0
+
+   ax = cpltaxes(x);
+   h = plot(real(x),imag(x),'.');
+   axis(ax);
+   axis('square');
+
+   % Next block adapted from SPY.M.
+   if marksize == 0
+      units = get(gca,'units');
+      set(gca,'units','points');
+      pos = get(gca,'position');
+      nps = 2.4*sqrt(n*m);  % Factor based on number of pseudo-ei'vals plotted.
+      myguess = round(3*min(pos(3:4))/nps);
+%      [nps myguess]
+      marksize = max(1,myguess);
+      set(gca,'units',units);
+   end
+   set(h,'markersize',marksize);
+%   set(h,'linemarkersize',marksize);
+
+   hold on
+   e = eig(A);
+   plot(real(e),imag(e),'xw');
+   set(h,'markersize',marksize);
+   hold off
+
+else
+
+  y = x;
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/pscont.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,104 @@
+function [x, y, z, m] = pscont(A, k, npts, ax, levels)
+%PSCONT   Contours and colour pictures of pseudospectra.
+%         PSCONT(A, K, NPTS, AX, LEVELS) plots LOG10(1/NORM(R(z))),
+%         where R(z) = INV(z*I-A) is the resolvent of the square matrix A,
+%         over an NPTS-by-NPTS grid.
+%         NPTS defaults to a SIZE(A)-dependent value.
+%         The limits are AX(1) and AX(2) on the x-axis and
+%                        AX(3) and AX(4) on the y-axis.
+%         If AX is omitted, suitable limits are guessed based on the
+%         eigenvalues of A.
+%         The eigenvalues of A are plotted as crosses `x'.
+%         K determines the type of plot:
+%             K = 0 (default) PCOLOR and CONTOUR
+%             K = 1           PCOLOR only
+%             K = 2           SURFC (SURF and CONTOUR)
+%             K = 3           SURF only
+%             K = 4           CONTOUR only
+%         The contours levels are specified by the vector LEVELS, which
+%         defaults to -10:-1 (recall we are plotting log10 of the data).
+%         Thus, by default, the contour lines trace out the boundaries of
+%         the epsilon pseudospectra for epsilon = 1e-10, ..., 1e-1.
+%         [X, Y, Z, NPTS] = PSCONT(A, ...) returns the plot data X, Y, Z
+%         and the value of NPTS used.
+%
+%         After calling this function you may want to change the
+%         color map (e.g., type COLORMAP HOT - see HELP COLOR) and the
+%         shading (e.g., type SHADING INTERP - see HELP INTERP).
+%         For an explanation of the term `pseudospectra' see PS.M.
+%         When A is real and the grid is symmetric about the x-axis, this
+%         routine exploits symmetry to halve the computational work.
+
+%         Colour pseduospectral pictures of this type are referred to as
+%         `spectral portraits' by Godunov, Kostin, and colleagues.
+%         References:
+%         V. I. Kostin, Linear algebra algorithms with guaranteed accuracy,
+%            Technical Report TR/PA/93/05, CERFACS, Toulouse, France, 1993.
+%         L.N. Trefethen, Pseudospectra of matrices, in D.F. Griffiths and
+%            G.A. Watson, eds, Numerical Analysis 1991, Proceedings of the 14th
+%            Dundee Conference, vol. 260, Pitman Research Notes in Mathematics,
+%            Longman Scientific and Technical, Essex, UK, 1992, pp. 234-266.
+
+Areal = ~norm(imag(A),1);
+
+if nargin < 5, levels = -10:-1; end
+e = eig(A);
+if nargin < 4
+   ax = cpltaxes(e);
+   if Areal, ax(3) = -ax(4); end  % Make sure region symmetric about x-axis.
+end
+n = max(size(A));
+if nargin < 3, npts = round( min(max(5, sqrt(20^2*10^3/n^3) ), 30)); end
+if nargin < 2, k = 0; end
+
+nptsx = npts; nptsy = npts;
+Ysymmetry = (Areal & ax(3) == -ax(4));
+
+x = seqa(ax(1), ax(2), npts);
+y = seqa(ax(3), ax(4), npts);
+if Ysymmetry                    % Exploit symmetry about x-axis.
+   nptsy = ceil(npts/2);
+   y1 = y;
+   y = y(1:nptsy);
+end
+
+[xx, yy] = meshgrid(x,y);
+z = xx + sqrt(-1)*yy;
+I = eye(n);
+Smin = zeros(nptsy, nptsx);
+
+for j=1:nptsx
+    for i=1:nptsy
+        Smin(i,j) = min( svd( z(i,j)*I-A ) );
+    end
+end
+
+z = log10( Smin + eps );
+if Ysymmetry
+   z = [z; z(nptsy-rem(npts,2):-1:1,:)];
+   y = y1;
+end
+
+if k == 0 | k == 1
+   pcolor(x, y, z); hold on
+elseif k == 2
+   surfc(x, y, z); hold on
+elseif k == 3
+   surf(x, y, z); hold on
+end
+
+if k == 0 | k == 4
+   contour(x, y, z, levels); hold on
+end
+
+if k ~= 2 & k ~= 3
+   if k == 0 | k == 1
+      s = 'k';   % Black.
+   else
+      s = 'w';   % White.
+   end
+   plot(real(e),imag(e),['x' s]);
+end
+
+axis('square');
+hold off
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/qmult.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,47 @@
+function B = qmult(A)
+%QMULT  Pre-multiply by random orthogonal matrix.
+%       QMULT(A) is Q*A where Q is a random real orthogonal matrix from
+%       the Haar distribution, of dimension the number of rows in A.
+%       Special case: if A is a scalar then QMULT(A) is the same as
+%                     QMULT(EYE(A)).
+
+%       Called by RANDSVD.
+%
+%       Reference:
+%       G.W. Stewart, The efficient generation of random
+%       orthogonal matrices with an application to condition estimators,
+%       SIAM J. Numer. Anal., 17 (1980), 403-409.
+
+[n, m] = size(A);
+
+%  Handle scalar A.
+if max(n,m) == 1
+   n = A;
+   A = eye(n);
+end
+
+d = zeros(n);
+
+for k = n-1:-1:1
+
+    % Generate random Householder transformation.
+    x = randn(n-k+1,1);
+    s = norm(x);
+    sgn = sign(x(1)) + (x(1)==0);    % Modification for sign(1)=1.
+    s = sgn*s;
+    d(k) = -sgn;
+    x(1) = x(1) + s;
+    beta = s*x(1);
+
+    % Apply the transformation to A.
+    y = x'*A(k:n,:);
+    A(k:n,:) = A(k:n,:) - x*(y/beta);
+
+end
+
+% Tidy up signs.
+for i=1:n-1
+    A(i,:) = d(i)*A(i,:);
+end
+A(n,:) = A(n,:)*sign(randn);
+B = A;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/rando.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function A = rando(n, k)
+%RANDO   Random matrix with elements -1, 0 or 1.
+%        A = RANDO(N, K) is a random N-by-N matrix with elements from
+%        one of the following discrete distributions (default K = 1):
+%          K = 1:  A(i,j) =  0 or 1    with equal probability,
+%          K = 2:  A(i,j) = -1 or 1    with equal probability,
+%          K = 3:  A(i,j) = -1, 0 or 1 with equal probability.
+%        N may be a 2-vector, in which case the matrix is N(1)-by-N(2).
+
+if nargin < 2, k = 1; end
+
+m = n(1);                    % Parameter n specifies dimension: m-by-n.
+n = n(max(size(n)));
+
+if k == 1                    % {0, 1}
+   A = floor( rand(m,n) + .5 );
+elseif k == 2                % {-1, 1}
+   A = 2*floor( rand(m,n) + .5 ) - 1;
+elseif k == 3                % {-1, 0, 1}
+   A = round( 3*rand(m,n) - 1.5 );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/randsvd.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,104 @@
+function A = randsvd(n, kappa, mode, kl, ku)
+%RANDSVD  Random matrix with pre-assigned singular values.
+%      RANDSVD(N, KAPPA, MODE, KL, KU) is a (banded) random matrix of order N
+%      with COND(A) = KAPPA and singular values from the distribution MODE.
+%      N may be a 2-vector, in which case the matrix is N(1)-by-N(2).
+%      Available types:
+%             MODE = 1:   one large singular value,
+%             MODE = 2:   one small singular value,
+%             MODE = 3:   geometrically distributed singular values,
+%             MODE = 4:   arithmetically distributed singular values,
+%             MODE = 5:   random singular values with unif. dist. logarithm.
+%      If omitted, MODE defaults to 3, and KAPPA defaults to SQRT(1/EPS).
+%      If MODE < 0 then the effect is as for ABS(MODE) except that in the
+%      original matrix of singular values the order of the diagonal entries
+%      is reversed: small to large instead of large to small.
+%      KL and KU are the lower and upper bandwidths respectively; if they
+%      are omitted a full matrix is produced.
+%      If only KL is present, KU defaults to KL.
+%      Special case: if KAPPA < 0 then a random full symmetric positive
+%                    definite matrix is produced with COND(A) = -KAPPA and
+%                    eigenvalues distributed according to MODE.
+%                    KL and KU, if present, are ignored.
+
+%      Reference:
+%      N.J. Higham, Accuracy and Stability of Numerical Algorithms,
+%         Society for Industrial and Applied Mathematics, Philadelphia, PA,
+%         USA, 1996; sec. 26.3.
+
+%      This routine is similar to the more comprehensive Fortran routine xLATMS
+%      in the following reference:
+%      J.W. Demmel and A. McKenney, A test matrix generation suite,
+%      LAPACK Working Note #9, Courant Institute of Mathematical Sciences,
+%      New York, 1989.
+
+if nargin < 2, kappa = sqrt(1/eps); end
+if nargin < 3, mode = 3; end
+if nargin < 4, kl = n-1; end  % Full matrix.
+if nargin < 5, ku = kl; end   % Same upper and lower bandwidths.
+
+if abs(kappa) < 1, error('Condition number must be at least 1!'), end
+posdef = 0; if kappa < 0, posdef = 1; kappa = -kappa; end  % Special case.
+
+p = min(n);
+m = n(1);              % Parameter n specifies dimension: m-by-n.
+n = n(max(size(n)));
+
+if p == 1              % Handle case where A is a vector.
+   A = randn(m, n);
+   A = A/norm(A);
+   return
+end
+
+j = abs(mode);
+
+% Set up vector sigma of singular values.
+if j == 3
+   factor = kappa^(-1/(p-1));
+   sigma = factor.^[0:p-1];
+
+elseif j == 4
+   sigma = ones(p,1) - (0:p-1)'/(p-1)*(1-1/kappa);
+
+elseif j == 5    % In this case cond(A) <= kappa.
+   sigma = exp( -rand(p,1)*log(kappa) );
+
+elseif j == 2
+   sigma = ones(p,1);
+   sigma(p) = 1/kappa;
+
+elseif j == 1
+   sigma = ones(p,1)./kappa;
+   sigma(1) = 1;
+end
+
+% Convert to diagonal matrix of singular values.
+if mode < 0
+  sigma = sigma(p:-1:1);
+end
+sigma = diag(sigma);
+
+if posdef                % Handle special case.
+   Q = qmult(p);
+   A = Q'*sigma*Q;
+   A = (A + A')/2;       % Ensure matrix is symmetric.
+   return
+end
+
+if m ~= n
+   sigma(m, n) = 0;      % Expand to m-by-n diagonal matrix.
+end
+
+if kl == 0 & ku == 0     % Diagonal matrix requested - nothing more to do.
+   A = sigma;
+   return
+end
+
+% A = U*sigma*V, where U, V are random orthogonal matrices from the
+% Haar distribution.
+A = qmult(sigma');
+A = qmult(A');
+
+if kl < n-1 | ku < n-1   % Bandwidth reduction.
+   A = bandred(A, kl, ku);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/redheff.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,28 @@
+function A = redheff(n)
+%REDHEFF    A (0,1) matrix of Redheffer associated with the Riemann hypothesis.
+%           A = REDHEFF(N) is an N-by-N matrix of 0s and 1s defined by
+%               A(i,j) = 1 if j = 1 or if i divides j,
+%               A(i,j) = 0 otherwise.
+%           It has N - FLOOR(LOG2(N)) - 1 eigenvalues equal to 1,
+%           a real eigenvalue (the spectral radius) approximately SQRT(N),
+%           a negative eigenvalue approximately -SQRT(N),
+%           and the remaining eigenvalues are provably ``small''.
+%           Barrett and Jarvis (1992) conjecture that
+%             ``the small eigenvalues all lie inside the unit circle
+%               ABS(Z) = 1'',
+%           and a proof of this conjecture, together with a proof that some
+%           eigenvalue tends to zero as N tends to infinity, would yield
+%           a new proof of the prime number theorem.
+%           The Riemann hypothesis is true if and only if
+%           DET(A) = O( N^(1/2+epsilon) ) for every epsilon > 0
+%                                             (`!' denotes factorial).
+%           See also RIEMANN.
+
+%           Reference:
+%           W.W. Barrett and T.J. Jarvis,
+%           Spectral Properties of a Matrix of Redheffer,
+%           Linear Algebra and Appl., 162 (1992), pp. 673-683.
+
+i = (1:n)'*ones(1,n);
+A = ~rem(i',i);
+A(:,1) = ones(n,1);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/riemann.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,23 @@
+function A = riemann(n)
+%RIEMANN    A matrix associated with the Riemann hypothesis.
+%           A = RIEMANN(N) is an N-by-N matrix for which the
+%           Riemann hypothesis is true if and only if
+%           DET(A) = O( N! N^(-1/2+epsilon) ) for every epsilon > 0
+%                                             (`!' denotes factorial).
+%           A = B(2:N+1, 2:N+1), where
+%           B(i,j) = i-1 if i divides j and -1 otherwise.
+%           Properties include, with M = N+1:
+%              Each eigenvalue E(i) satisfies ABS(E(i)) <= M - 1/M.
+%              i <= E(i) <= i+1 with at most M-SQRT(M) exceptions.
+%              All integers in the interval (M/3, M/2] are eigenvalues.
+%
+%           See also REDHEFF.
+
+%           Reference:
+%           F. Roesler, Riemann's hypothesis as an eigenvalue problem,
+%           Linear Algebra and Appl., 81 (1986), pp. 153-198.
+
+n = n+1;
+i = (2:n)'*ones(1,n-1);
+j = i';
+A = i .* (~rem(j,i)) - ones(n-1);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/rq.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,7 @@
+function z = rq(A,x)
+%RQ      Rayleigh quotient.
+%        RQ(A,x) is the Rayleigh quotient of A and x, x'*A*x/(x'*x).
+
+%        Called by FV.
+
+z = x'*A*x/(x'*x);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/rschur.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,33 @@
+function A = rschur(n, mu, x, y)
+%RSCHUR   An upper quasi-triangular matrix.
+%         A = RSCHUR(N, MU, X, Y) is an N-by-N matrix in real Schur form.
+%         All the diagonal blocks are 2-by-2 (except for the last one, if N
+%         is odd) and the k'th has the form [x(k) y(k); -y(k) x(k)].
+%         Thus the eigenvalues of A are x(k) +/- i*y(k).
+%         MU (default 1) controls the departure from normality.
+%         Defaults: X(k) = -k^2/10, Y(k) = -k, i.e., the eigenvalues
+%                   lie on the parabola x = -y^2/10.
+
+%         References:
+%         F. Chatelin, Eigenvalues of Matrices, John Wiley, Chichester, 1993;
+%            Section 4.2.7.
+%         F. Chatelin and V. Fraysse, Qualitative computing: Elements
+%            of a theory for finite precision computation, Lecture notes,
+%            CERFACS, Toulouse, France and THOMSON-CSF, Orsay, France,
+%            June 1993.
+
+m = floor(n/2)+1;
+alpha = 10; beta = 1;
+
+if nargin < 4, y = -(1:m)/beta; end
+if nargin < 3, x = -(1:m).^2/alpha; end
+if nargin < 2, mu = 1; end
+
+A = diag( mu*ones(n-1,1), 1 );
+for i=1:2:2*(m-1)
+    j = (i+1)/2;
+    A(i:i+1,i:i+1) = [x(j) y(j); -y(j) x(j)];
+end
+if 2*m ~= n,
+   A(n,n) = x(m);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/see.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,64 @@
+function see(A, k)
+%SEE    Pictures of a matrix and its (pseudo-) inverse.
+%       SEE(A) displays MESH(A), MESH(PINV(A)), SEMILOGY(SVD(A),'o'),
+%       and (if A is square) FV(A) in four subplot windows.
+%       SEE(A, 1) plots an approximation to the pseudospectrum in the
+%       third window instead of the singular values.
+%       SEE(A, -1) plots only the eigenvalues in the fourth window,
+%       which is much quicker than plotting the field of values.
+%       If A is complex, only real parts are used for the mesh plots.
+%       If A is sparse, just SPY(A) is shown.
+
+if nargin < 2, k = 0; end
+[m, n] = size(A);
+square = (m == n);
+clf
+
+if issparse(A)
+
+   spy(A);
+
+else
+
+   B = pinv(A);
+   s = svd(A);
+   zs = (s == zeros(size(s)));
+   if any( zs )
+      s( zs ) = [];  % Remove zero singular values for semilogy plot.
+   end
+
+   subplot(2,2,1)
+   mesh(real(A)), axis('ij'),  drawnow
+   subplot(2,2,2)
+   mesh(real(B)), axis('ij'),  drawnow
+
+   if k <= 0
+      subplot(2,2,3)
+      semilogy(s, 'og')
+      hold on, semilogy(s, '-'), hold off, drawnow
+      if any(zs), subplot(2,2,3), title('Zero(s) omitted'), subplot(2,2,4), end
+   elseif k == 1
+      subplot(2,2,3)
+      ps(A);  drawnow
+   end
+
+   if square
+      if k == -1
+         subplot(2,2,4)
+         ps(A, 0);
+      else
+         subplot(2,2,4)
+         fv(A);
+      end
+   else
+      if k == 0
+         subplot(2,2,4)
+         axis off
+      else
+         clf
+      end
+      text(0,0,'Matrix not square.')
+   end
+   subplot;
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/seqa.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,13 @@
+function y = seqa(a, b, n)
+%SEQA   Additive sequence.
+%       Y = SEQA(A, B, N) produces a row vector comprising N equally
+%       spaced numbers starting at A and finishing at B.
+%       If N is omitted then 10 points are generated.
+
+if nargin == 2, n = 10; end
+
+if n <= 1
+   y = a;
+   return
+end
+y = [a+(0:n-2)*(b-a)/(n-1), b];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/seqcheb.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function x = seqcheb(n, k)
+%SEQCHEB   Sequence of points related to Chebyshev polynomials.
+%          X = SEQCHEB(N, K) produces a row vector of length N.
+%          There are two choices:
+%              K = 1:  zeros of T_N,         (the default)
+%              K = 2:  extrema of T_{N-1},
+%          where T_k is the Chebsyhev polynomial of degree k.
+
+if nargin == 1, k = 1; end
+
+if k == 1                     %  Zeros of T_n
+   i = 1:n; j = .5*ones(1,n);
+   x = cos( (i-j) * (pi/n) );
+elseif k == 2                 %  Extrema of T_(n-1)
+   i = 0:n-1;
+   x = cos( i * (pi/(n-1)) );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/seqm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function y = seqm(a, b, n)
+%SEQM   Multiplicative sequence.
+%       Y = SEQM(A, B, N) produces a row vector comprising N
+%       logarithmically equally spaced numbers, starting at A ~= 0
+%       and finishing at B ~= 0.
+%       If A*B < 0 and N > 2 then complex results are produced.
+%       If N is omitted then 10 points are generated.
+
+if nargin == 2, n = 10; end
+
+if n <= 1
+   y = a;
+   return
+end
+p = [0:n-2]/(n-1);
+r = (b/a).^p;
+y = [a*r, b];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/show.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,9 @@
+function show(x)
+%SHOW   Display signs of matrix elements.
+%       SHOW(X) displays X in `FORMAT +' form, that is,
+%       with `+', `-' and  blank representing positive, negative
+%       and zero elements respectively.
+
+format +
+disp(x)
+format
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/signm.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,21 @@
+function [S, N] = signm(A)
+%SIGNM   Matrix sign decomposition.
+%        [S, N] = SIGNM(A) is the matrix sign decomposition A = S*N,
+%        computed via the Schur decomposition.
+%        S is the matrix sign function, sign(A).
+
+%        Reference:
+%        N.J. Higham, The matrix sign decomposition and its relation to the
+%        polar decomposition, Linear Algebra and Appl., 212/213:3-20, 1994.
+
+[Q, T] = schur(A);
+[Q, T] = rsf2csf(Q, T);
+S = Q * matsignt(T) * Q';
+
+% Only problem with Schur method is possible nonzero imaginary part when
+% A is real.  Next line takes care of that.
+if ~norm(imag(A),1), S = real(S); end
+
+if nargout == 2
+   N = S*A;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/skewpart.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,8 @@
+function S = skewpart(A)
+%SKEWPART  Skew-symmetric (skew-Hermitian) part.
+%          SKEWPART(A) is the skew-symmetric (skew-Hermitian) part of A,
+%          (A - A')/2.
+%          It is the nearest skew-symmetric (skew-Hermitian) matrix to A in
+%          both the 2- and the Frobenius norms.
+
+S = (A - A')./2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/smoke.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,22 @@
+function A = smoke(n, k)
+%SMOKE     Smoke matrix - complex, with a `smoke ring' pseudospectrum.
+%          SMOKE(N) is an N-by-N matrix with 1s on the
+%          superdiagonal, 1 in the (N,1) position, and powers of
+%          roots of unity along the diagonal.
+%          SMOKE(N, 1) is the same except for a zero (N,1) element.
+%          The eigenvalues of SMOKE(N, 1) are the N'th roots of unity;
+%          those of SMOKE(N) are the N'th roots of unity times 2^(1/N).
+%
+%          Try PS(SMOKE(32)).  For SMOKE(N, 1) the pseudospectrum looks
+%          like a sausage folded back on itself.
+%          GERSH(SMOKE(N, 1)) is interesting.
+
+%          Reference:
+%          L. Reichel and L.N. Trefethen, Eigenvalues and pseudo-eigenvalues of
+%          Toeplitz matrices, Linear Algebra and Appl., 162-164:153-185, 1992.
+
+if nargin < 2, k = 0; end
+
+w = exp(2*pi*i/n);
+A = diag( [w.^(1:n-1) 1] ) + diag(ones(n-1,1),1);
+if k == 0, A(n,1) = 1; end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/sparsify.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,24 @@
+function A = sparsify(A, p)
+%SPARSIFY   Randomly sets matrix elements to zero.
+%           S = SPARSIFY(A, P) is A with elements randomly set to zero
+%           (S = S' if A is square and A = A', i.e. symmetry is preserved).
+%           Each element has probability P of being zeroed.
+%           Thus on average 100*P percent of the elements of A will be zeroed.
+%           Default: P = 0.25.
+
+if nargin < 2, p = 0.25; end
+if p<0 | p>1, error('Second parameter must be between 0 and 1 inclusive.'), end
+
+% Is A square and symmetric?
+symm = 0;
+if min(size(A)) == max(size(A))
+   if norm(A-A',1) == 0, symm = 1; end
+end
+
+if ~symm
+   A = A .* (rand(size(A)) > p);        % Unsymmetric case
+else
+   A = triu(A,1) .* (rand(size(A)) > p);  % Preserve symmetry
+   A = A + A';
+   A = A + diag( diag(A) .* (rand(size(diag(A))) > p) );
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/sub.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,17 @@
+function S = sub(A, i, j)
+%SUB     Principal submatrix.
+%        SUB(A,i,j) is A(i:j,i:j).
+%        SUB(A,i)  is the leading principal submatrix of order i,
+%        A(1:i,1:i), if i>0, and the trailing principal submatrix
+%        of order ABS(i) if i<0.
+
+if nargin == 2
+   if i >= 0
+      S = A(1:i, 1:i);
+   else
+      n = min(size(A));
+      S = A(n+i+1:n, n+i+1:n);
+   end
+else
+   S = A(i:j, i:j);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/symmpart.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,7 @@
+function S = symmpart(A)
+%SYMMPART  Symmetric (Hermitian) part.
+%          SYMMPART(A) is the symmetric (Hermitian) part of A, (A + A')/2.
+%          It is the nearest symmetric (Hermitian) matrix to A in both the
+%          2- and the Frobenius norms.
+
+S = (A + A')./2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/tmtdemo.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,263 @@
+clc
+format compact
+echo on
+
+%TMTDEMO       Demonstration of Test Matrix Toolbox.
+%              N. J. Higham.
+
+% The Test Matrix Toolbox contains test matrices, visualization routines,
+% and other miscellaneous routines.
+
+% The version of the toolbox is
+
+matrix(-1)
+echo on
+
+% For this demonstration you will need to view both the command window
+% and one figure window.
+% This demonstration emphasises graphics and shows only
+% some of the features of the toolbox.
+
+pause  % Press any key to continue after pauses.
+
+% A list of the available M-files is obtained by typing `matrix':
+
+matrix
+
+pause
+
+% The FV command plots the boundary of the field of values of a matrix
+% (the set of all Rayleigh quotients) and plots the eigenvalues as
+% crosses (`x').  Here are some examples:
+
+% The Grcar matrix is a Toeplitz matrix of the following form:
+
+grcar(5)
+
+% Here is the field of values of the 10-by-10 Grcar matrix:
+
+fv(grcar(10));
+title('fv(grcar(10))')
+
+pause
+
+% Next, we form a random orthogonal matrix and look at its field of values.
+% The boundary is the convex hull of the eigenvalues since A is normal.
+
+A = randsvd(10, 1);
+fv(A);
+title('randsvd(10, 1)')
+pause
+
+% The RANDSVD commands generates random matrices with pre-assigned
+% condition number, and various singular value distributions:
+
+A = randsvd(6, 1e6, 3);  % Exponential distribution.
+
+format short e
+svd(A)'
+cond(A)
+pause
+
+% The PS command plots an approximation to a pseudospectrum of A,
+% which is the set of complex numbers that are eigenvalues of some
+% perturbed matrix A + E, with the norm of E at most epsilon
+% (default: epsilon = 1E-3).
+% The eigenvalues of A are plotted as crosses (`x').
+% Here are some interesting PS plots.
+
+% First, we use the KAHAN matrix, a triangular matrix made up of sines and
+% cosines.  Here is an approximate pseudospectrum of the 10-by-10 matrix:
+
+ps(kahan(10),25);
+title('ps(kahan(10))')
+pause
+
+% Next, a different way of looking at pseudospectra, via norms of
+% the resolvent.  (The resolvent of A is INV(z*I-A), where z is a complex
+% variable).  PSCONT gives a color map with a superimposed contour
+% plot.  Here we specify a region of the complex plane in
+% which the 8-by-8 Kahan matrix is interesting to look at.
+
+pscont(kahan(8), 0, 20, [0.2 1.2 -0.5 0.5]);
+title('pscont(kahan(8))')
+pause
+
+% The TRIW matrix is upper triangular, made up of 1s and -1s:
+
+triw(4)
+
+% Here is a combined surface and contour plot of its resolvent.
+% Notice how the repeated eigenvalue 1 `sucks in' the resolvent.
+
+pscont(triw(11), 2, 15, [-2 2 -2 2]);
+title('pscont(triw(11))')
+pause
+
+% The next PSCONT plot is for the companion matrix of the characteristic
+% polynomial of the CHEBSPEC matrix:
+
+A = chebspec(8); C = compan(A);
+
+% The SHOW command shows the +/- pattern of the elements of a matrix, with
+% blanks for zero elements:
+
+show(C)
+
+pscont(C, 2, 20, [-.1 .1 -.1 .1]);
+title('pscont(compan(chebspec(8)))')
+pause
+
+% The following matrix has a pseudospectrum in the form of a limacon.
+
+n = 25; A = triw(n,1,2) - eye(n);
+sub(A, 6)               % Leading principal 6-by-6 submatrix of A.
+ps(A);
+pause
+
+% Here is the 8-by-8 Frank matrix.
+A = frank(8)
+
+% We can get a visual representation of the matrix using the SEE
+% command, which produces subplots with the following layout:
+%     /---------------------------------\
+%     | MESH(A)            MESH(INV(A)) |
+%     | SEMILOGY(SVD(A))   FV(A)        |
+%     \---------------------------------/
+% where FV is the field of values.
+
+see(A)
+
+pause
+
+% The Frank matrix is well-known for having ill-conditioned eigenvalues.
+% Here are the eigenvalues (in column 1) together with the corresponding
+% eigenvalue condition numbers (in column 2):
+
+format short e
+[V, D, c] = eigsens(A);
+[diag(D) c 1./diag(D)]
+
+% In the last column are shown the reciprocals of the eigenvalues.
+% Notice that if LAMBDA is an eigenvalue, so is 1/LAMBDA!
+
+pause
+
+% Matlab's MAGIC function produces magic squares:
+
+A = magic(5)
+
+% Using the toolbox routine PNORM we can estimate the matrix p-norm
+% for any value of p.
+
+[pnorm(A,1) pnorm(A,1.5) pnorm(A,2) pnorm(A,pi) pnorm(A,inf)]
+
+% As this example suggests, the p-norm of a magic square is
+% constant for all p!
+
+pause
+
+% GERSH plots Gershgorin disks.  Here are some interesting examples.
+
+gersh(lesp(12));
+title('gersh(lesp(12))')
+pause
+
+gersh(hanowa(10));
+title('gersh(hanowa(10))')
+pause
+
+gersh(ipjfact(6,1));
+title('gersh(ipjfact(6,1))')
+pause
+
+gersh(smoke(16,1));
+title('gersh(smoke(16,1))')
+pause
+
+% A Hadamard matrix has elements 1 or -1 and mutually orthogonal rows:
+
+show(hadamard(16))
+
+% A CONTOUR plot of this matrix is interesting:
+
+contour(hadamard(16))
+pause
+
+% There are a few sparse matrices in the toolbox.
+% WATHEN is a finite element matrix with random entries.
+
+spy(wathen(6,6));  % SPY plot of sparsity pattern.
+
+pause
+
+% GFPP generates matrices for which Gaussian elimination with partial
+% pivoting produces a large growth factor.
+
+gfpp(6)
+pause
+
+% Let's find the growth factor for partial pivoting and complete pivoting
+% for a bigger matrix:
+
+A = gfpp(20);
+[L, U] = lu(A);    % Partial pivoting.
+max(max(abs(U))) / max(max(abs(A)))
+
+[L, U, P, Q, rho] = gecp(A);  % Complete pivoting using toolbox routine GECP.
+rho
+% As expected, complete pivoting does not produce large growth here.
+pause
+
+% The toolbox function MATRIX allows the test matrices to be accessed
+% by number.  The following piece of code steps through all the
+% square matrices of arbitrary dimension, setting A to each 10-by-10
+% matrix in turn. It evaluate the 2-norm condition number and the
+% ratio of the largest to smallest eigenvalue (in absolute values).
+c = []; e = []; j = 1;
+for i=1:matrix(0)
+    A = full(matrix(i, 10));
+    if norm(skewpart(A),1)  % If not Hermitian...
+       c1 = cond(A);
+       eg = eig(A);
+       e1 = max(abs(eg)) / min(abs(eg));
+       % Filter out extremely ill-conditioned matrices.
+       if c1 <= 1e10, c(j) = c1; e(j) = e1; j = j + 1; end
+    end
+end
+
+% The following plots confirm that the condition number can be much
+% larger than the extremal eigenvalue ratio.
+echo off
+j = max(size(c));
+subplot(2,1,1)
+semilogy(1:j, c, 'x', 1:j, e, 'o'), hold on
+semilogy(1:j, c, '-', 1:j, e, '--'), hold off
+title('cond: x, eig_ratio: o')
+subplot(2,1,2)
+semilogy(1:j, c./e)
+title('cond/eig_ratio')
+echo on
+pause
+
+% Finally, here are three interesting pseudospectra based on pentadiagonal
+% Toeplitz matrices:
+
+A = full(pentoep(6,0,1/2,0,0,1))
+
+subplot(1,1,1)
+ps(full(pentoep(32,0,1/2,0,0,1)));            % Propeller
+title('ps(full(pentoep(32,0,1/2,0,0,1))')
+pause
+
+ps(inv(full(pentoep(32,0,1,1,0,.25))));       % Man in the moon
+title('ps(inv(full(pentoep(32,0,1,1,0,.25))')
+pause
+
+ps(full(pentoep(32,0,1/2,1,1,1)));            % Fish
+title('ps(full(pentoep(32,0,1/2,1,1,1)))')
+pause
+
+echo off
+clear A L U P Q V D
+format
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/trap2tri.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,56 @@
+function [Q, T] = trap2tri(L)
+%TRAP2TRI  Unitary reduction of trapezoidal matrix to triangular form.
+%          [Q, T] = TRAP2TRI(L), where L is an m-by-n lower trapezoidal
+%          matrix with m >= n, produces a unitary Q such that QL = [T; 0],
+%          where T is n-by-n and lower triangular.
+%          Q is a product of Householder transformations.
+
+%          Called by RANDSVD.
+%
+%          Reference:
+%          G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
+%          Johns Hopkins University Press, Baltimore, Maryland, 1989.
+%          P5.2.5, p. 220.
+
+[n, r] = size(L);
+
+if r > n  | norm(L-tril(L),1)
+   error('Matrix must be lower trapezoidal and m-by-n with m >= n.')
+end
+
+Q = eye(n);  % To hold product of H.T.s
+
+if r ~= n
+
+   % Reduce nxr L =   r  [L1]  to lower triangular form: QL = [T].
+   %                 n-r [L2]                                 [0]
+
+   for j=r:-1:1
+       % x is the vector to be reduced, which we overwrite with the H.T. vector.
+       x = L(j:n,j);
+       x(2:r-j+1) = zeros(r-j,1);  % These elts of column left unchanged.
+       s = norm(x)*(sign(x(1)) + (x(1)==0));    % Modification for sign(1)=1.
+
+       % Nothing to do if x is zero (or x=a*e_1, but we don't check for that).
+       if s ~= 0
+          x(1) = x(1) + s;
+          beta = s'*x(1);
+
+          %  Implicitly apply H.T. to pivot column.
+          % L(r+1:n,j) = zeros(n-r,1); % We throw these elts away at the end.
+          L(j,j) = -s;
+
+          % Apply H.T. to rest of matrix.
+          if j > 1
+             y = x'*L(j:n, 1:j-1);
+             L(j:n, 1:j-1) = L(j:n, 1:j-1) - x*(y/beta);
+          end
+
+          % Update H.T. product.
+          y = x'*Q(j:n,:);
+          Q(j:n,:) = Q(j:n,:) - x*(y/beta);
+       end
+   end
+end
+
+T = L(1:r,:);   % Rows r+1:n have been zeroed out.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/tridiag.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,41 @@
+function T = tridiag(n, x, y, z)
+%TRIDIAG  Tridiagonal matrix (sparse).
+%         TRIDIAG(X, Y, Z) is the tridiagonal matrix with subdiagonal X,
+%         diagonal Y, and superdiagonal Z.
+%         X and Z must be vectors of dimension one less than Y.
+%         Alternatively TRIDIAG(N, C, D, E), where C, D, and E are all
+%         scalars, yields the Toeplitz tridiagonal matrix of order N
+%         with subdiagonal elements C, diagonal elements D, and superdiagonal
+%         elements E.   This matrix has eigenvalues (Todd 1977)
+%                  D + 2*SQRT(C*E)*COS(k*PI/(N+1)), k=1:N.
+%         TRIDIAG(N) is the same as TRIDIAG(N,-1,2,-1), which is
+%         a symmetric positive definite M-matrix (the negative of the
+%         second difference matrix).
+
+%         References:
+%         J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
+%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 155.
+%         D.E. Rutherford, Some continuant determinants arising in physics and
+%           chemistry---II, Proc. Royal Soc. Edin., 63, A (1952), pp. 232-241.
+
+if nargin == 1, x = -1; y = 2; z = -1; end
+if nargin == 3, z = y; y = x; x = n; end
+
+x = x(:); y = y(:); z = z(:);   % Force column vectors.
+
+if max( [ size(x) size(y) size(z) ] ) == 1
+   x = x*ones(n-1,1);
+   z = z*ones(n-1,1);
+   y = y*ones(n,1);
+else
+   [nx, m] = size(x);
+   [ny, m] = size(y);
+   [nz, m] = size(z);
+   if (ny - nx - 1) | (ny - nz -1)
+      error('Dimensions of vector arguments are incorrect.')
+   end
+end
+
+% T = diag(x, -1) + diag(y) + diag(z, 1);  % For non-sparse matrix.
+n = max(size(y));
+T = spdiags([ [x;0] y [0;z] ], -1:1, n, n);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/triw.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,41 @@
+function t = triw(n, alpha, k)
+%TRIW   Upper triangular matrix discussed by Wilkinson and others.
+%       TRIW(N, ALPHA, K) is the upper triangular matrix with ones on
+%       the diagonal and ALPHAs on the first K >= 0 superdiagonals.
+%       N may be a 2-vector, in which case the matrix is N(1)-by-N(2) and
+%       upper trapezoidal.
+%       Defaults: ALPHA = -1,
+%                 K = N - 1     (full upper triangle).
+%       TRIW(N) is a matrix discussed by Kahan, Golub and Wilkinson.
+%
+%       Ostrowski (1954) shows that
+%         COND(TRIW(N,2)) = COT(PI/(4*N))^2,
+%       and for large ABS(ALPHA),
+%         COND(TRIW(N,ALPHA)) is approximately ABS(ALPHA)^N*SIN(PI/(4*N-2)).
+%
+%       Adding -2^(2-N) to the (N,1) element makes TRIW(N) singular,
+%       as does adding -2^(1-N) to all elements in the first column.
+
+%       References:
+%       G.H. Golub and J.H. Wilkinson, Ill-conditioned eigensystems and the
+%          computation of the Jordan canonical form, SIAM Review,
+%          18(4), 1976, pp. 578-619.
+%       W. Kahan, Numerical linear algebra, Canadian Math. Bulletin,
+%          9 (1966), pp. 757-801.
+%       A.M. Ostrowski, On the spectrum of a one-parametric family of
+%          matrices, J. Reine Angew. Math., 193 (3/4), 1954, pp. 143-160.
+%       J.H. Wilkinson, Singular-value decomposition---basic aspects,
+%          in D.A.H. Jacobs, ed., Numerical Software---Needs and Availability,
+%          Academic Press, London, 1978, pp. 109-135.
+
+m = n(1);              % Parameter n specifies dimension: m-by-n.
+n = n(max(size(n)));
+
+if nargin < 3, k = n-1; end
+if nargin < 2, alpha = -1; end
+
+if max(size(alpha)) ~= 1
+   error('Second argument must be a scalar.')
+end 
+
+t = tril( eye(m,n) + alpha*triu(ones(m,n), 1), k);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/vand.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,29 @@
+function V = vand(m, p)
+%VAND   Vandermonde matrix.
+%       V = VAND(P), where P is a vector, produces the (primal)
+%       Vandermonde matrix based on the points P, i.e. V(i,j) = P(j)^(i-1).
+%       VAND(M,P) is a rectangular version of VAND(P) with M rows.
+%       Special case: If P is a scalar then P equally spaced points on [0,1]
+%                     are used.
+
+%       Reference:
+%       N.J. Higham, Stability analysis of algorithms for solving
+%       confluent Vandermonde-like systems, SIAM J. Matrix Anal. Appl.,
+%       11 (1990), pp. 23-41.
+
+if nargin == 1, p = m; end
+n = max(size(p));
+
+%  Handle scalar p.
+if n == 1
+   n = p;
+   p = seqa(0,1,n);
+end
+
+if nargin == 1, m = n; end
+
+p = p(:).';                    % Ensure p is a row vector.
+V = ones(m,n);
+for i=2:m
+    V(i,:) = p.*V(i-1,:);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/wathen.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,54 @@
+function A = wathen(nx, ny, k)
+%WATHEN  Wathen matrix - a finite element matrix (sparse, random entries).
+%        A = WATHEN(NX,NY) is a sparse random N-by-N finite element matrix
+%        where N = 3*NX*NY + 2*NX + 2*NY + 1.
+%        A is precisely the `consistent mass matrix' for a regular NX-by-NY
+%        grid of 8-node (serendipity) elements in 2 space dimensions.
+%        A is symmetric positive definite for any (positive) values of
+%        the `density', RHO(NX,NY), which is chosen randomly in this routine.
+%        In particular, if D = DIAG(DIAG(A)), then
+%              0.25 <= EIG(INV(D)*A) <= 4.5
+%        for any positive integers NX and NY and any densities RHO(NX,NY).
+%        This diagonally scaled matrix is returned by WATHEN(NX,NY,1).
+
+%        Reference:
+%        A.J. Wathen, Realistic eigenvalue bounds for the Galerkin
+%        mass matrix, IMA J. Numer. Anal., 7 (1987), pp. 449-457.
+
+if nargin < 2, error('Two dimensioning arguments must be specified.'), end
+if nargin < 3, k = 0; end
+
+e1 = [6 -6 2 -8;-6 32 -6 20;2 -6 6 -6;-8 20 -6 32];
+e2 = [3 -8 2 -6;-8 16 -8 20;2 -8 3 -8;-6 20 -8 16];
+e = [e1 e2; e2' e1]/45;
+n = 3*nx*ny+2*nx+2*ny+1;
+A = sparse(n,n);
+
+RHO = 100*rand(nx,ny);
+
+ for j=1:ny
+     for i=1:nx
+
+      nn(1) = 3*j*nx+2*i+2*j+1;
+      nn(2) = nn(1)-1;
+      nn(3) = nn(2)-1;
+      nn(4) = (3*j-1)*nx+2*j+i-1;
+      nn(5) = 3*(j-1)*nx+2*i+2*j-3;
+      nn(6) = nn(5)+1;
+      nn(7) = nn(6)+1;
+      nn(8) = nn(4)+1;
+
+      em = e*RHO(i,j);
+
+         for krow=1:8
+             for kcol=1:8
+                 A(nn(krow),nn(kcol)) = A(nn(krow),nn(kcol))+em(krow,kcol);
+             end
+         end
+
+      end
+  end
+
+if k == 1
+   A = diag(diag(A)) \ A;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/toolbox/wilk.m	Wed May 06 14:56:53 2015 +0200
@@ -0,0 +1,45 @@
+function [A, b] = wilk(n)
+%WILK   Various specific matrices devised/discussed by Wilkinson.
+%       [A, b] = WILK(N) is the matrix or system of order N.
+%       N = 3: upper triangular system Ux=b illustrating inaccurate solution.
+%       N = 4: lower triangular system Lx=b, ill-conditioned.
+%       N = 5: HILB(6)(1:5,2:6)*1.8144.  Symmetric positive definite.
+%       N = 21: W21+, tridiagonal.   Eigenvalue problem.
+
+%       References:
+%       J.H. Wilkinson, Error analysis of direct methods of matrix inversion,
+%          J. Assoc. Comput. Mach., 8 (1961),  pp. 281-330.
+%       J.H. Wilkinson, Rounding Errors in Algebraic Processes, Notes on Applied
+%          Science No. 32, Her Majesty's Stationery Office, London, 1963.
+%       J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
+%          Press, 1965.
+
+if n == 3
+   % Wilkinson (1961) p.323.
+   A = [ 1e-10   .9  -.4
+           0     .9  -.4
+           0      0  1e-10];
+   b = [   0      0    1]';
+
+elseif n == 4
+   % Wilkinson (1963) p.105.
+   A = [0.9143e-4  0          0          0
+        0.8762     0.7156e-4  0          0
+        0.7943     0.8143     0.9504e-4  0
+        0.8017     0.6123     0.7165     0.7123e-4];
+   b = [0.6524     0.3127     0.4186     0.7853]';
+
+elseif n == 5
+   % Wilkinson (1965), p.234.
+   A = hilb(6);
+   A = A(1:5, 2:6)*1.8144;
+
+elseif n == 21
+   % Taken from gallery.m.  Wilkinson (1965), p.308.
+   E = diag(ones(n-1,1),1);
+   m = (n-1)/2;
+   A = diag(abs(-m:m)) + E + E';
+
+else
+   error('Sorry, that value of N is not available.')
+end