changeset 1:e471a92d17be draft

Remove toolbox folder containing The Test Matrix Toolbox (Higham 1995), because it is not GPLed.
author Antonio Pino Robles <data.script93@gmail.com>
date Wed, 06 May 2015 15:59:22 +0200
parents 8f23314345f4
children c124219d7bfa
files toolbox/augment.m toolbox/bandred.m toolbox/cauchy.m toolbox/chebspec.m toolbox/chebvand.m toolbox/cholp.m toolbox/chop.m toolbox/chow.m toolbox/circul.m toolbox/clement.m toolbox/cod.m toolbox/comp.m toolbox/compan.m toolbox/cond.m toolbox/condex.m toolbox/contents.m toolbox/cpltaxes.m toolbox/cycol.m toolbox/diagpiv.m toolbox/dingdong.m toolbox/dorr.m toolbox/dramadah.m toolbox/dual.m toolbox/eigsens.m toolbox/fdemo.m toolbox/fiedler.m toolbox/forsythe.m toolbox/frank.m toolbox/fv.m toolbox/gallery.m toolbox/gearm.m toolbox/gersh.m toolbox/gfpp.m toolbox/gj.m toolbox/grcar.m toolbox/hadamard.m toolbox/hanowa.m toolbox/hilb.m toolbox/house.m toolbox/invhess.m toolbox/invol.m toolbox/ipjfact.m toolbox/jordbloc.m toolbox/kahan.m toolbox/kms.m toolbox/krylov.m toolbox/lauchli.m toolbox/lehmer.m toolbox/lesp.m toolbox/lotkin.m toolbox/makejcf.m toolbox/matrix.m toolbox/matsignt.m toolbox/minij.m toolbox/moler.m toolbox/neumann.m toolbox/ohess.m toolbox/orthog.m toolbox/parter.m toolbox/pascal.m toolbox/pdtoep.m toolbox/pei.m toolbox/pentoep.m toolbox/pnorm.m toolbox/poisson.m toolbox/poldec.m toolbox/prolate.m toolbox/ps.m toolbox/pscont.m toolbox/qmult.m toolbox/rando.m toolbox/randsvd.m toolbox/redheff.m toolbox/riemann.m toolbox/rq.m toolbox/rschur.m toolbox/see.m toolbox/seqa.m toolbox/seqcheb.m toolbox/seqm.m toolbox/show.m toolbox/signm.m toolbox/skewpart.m toolbox/smoke.m toolbox/sparsify.m toolbox/sub.m toolbox/symmpart.m toolbox/tmtdemo.m toolbox/trap2tri.m toolbox/tridiag.m toolbox/triw.m toolbox/vand.m toolbox/wathen.m toolbox/wilk.m
diffstat 94 files changed, 0 insertions(+), 3777 deletions(-) [+]
line wrap: on
line diff
--- a/toolbox/augment.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-function C = augment(A, alpha)
-%AUGMENT  Augmented system matrix.
-%         AUGMENT(A, ALPHA) is the square matrix
-%         [ALPHA*EYE(m) A; A' ZEROS(n)] of dimension m+n, where A is m-by-n.
-%         It is the symmetric and indefinite coefficient matrix of the
-%         augmented system associated with a least squares problem
-%         minimize NORM(A*x-b).  ALPHA defaults to 1.
-%         Special case: if A is a scalar, n say, then AUGMENT(A) is the
-%                       same as AUGMENT(RANDN(p,q)) where n = p+q and
-%                       p = ROUND(n/2), that is, a random augmented matrix
-%                       of dimension n is produced.
-%         The eigenvalues of AUGMENT(A) are given in terms of the singular
-%         values s(i) of A (where m>n) by
-%                  1/2 +/- SQRT( s(i)^2 + 1/4 ),  i=1:n  (2n eigenvalues),
-%                  1,  (m-n eigenvalues).
-%         If m < n then the first expression provides 2m eigenvalues and the
-%         remaining n-m eigenvalues are zero.
-%
-%         See also SPAUGMENT.
-
-%         Reference:
-%         G.H. Golub and C.F. Van Loan, Matrix Computations, Second
-%         Edition, Johns Hopkins University Press, Baltimore, Maryland,
-%         1989, sec. 5.6.4.
-
-[m, n] = size(A);
-if nargin < 2, alpha = 1; end
-
-if max(m,n) == 1
-   n = A;
-   p = round(n/2);
-   q = n - p;
-   A = randn(p,q);
-   m = p; n = q;
-end
-
-C = [alpha*eye(m) A; A' zeros(n)];
--- a/toolbox/bandred.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-function A = bandred(A, kl, ku)
-%BANDRED  Band reduction by two-sided unitary transformations.
-%         B = BANDRED(A, KL, KU) is a matrix unitarily equivalent to A
-%         with lower bandwidth KL and upper bandwidth KU
-%         (i.e. B(i,j) = 0 if i > j+KL or j > i+KU).
-%         The reduction is performed using Householder transformations.
-%         If KU is omitted it defaults to KL.
-
-%         Called by RANDSVD.
-%         This is a `standard' reduction.  Cf. reduction to bidiagonal form
-%         prior to computing the SVD.  This code is a little wasteful in that
-%         it computes certain elements which are immediately set to zero!
-%
-%         Reference:
-%         G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
-%         Johns Hopkins University Press, Baltimore, Maryland, 1989.
-%         Section 5.4.3.
-
-if nargin == 2, ku = kl; end
-
-if kl == 0 & ku == 0
-   error('You''ve asked for a diagonal matrix.  In that case use the SVD!')
-end
-
-% Check for special case where order of left/right transformations matters.
-% Easiest approach is to work on the transpose, flipping back at the end.
-flip = 0;
-if ku == 0
-   A = A';
-   temp = kl; kl = ku; ku = temp; flip = 1;
-end
-
-[m, n] = size(A);
-
-for j=1 : min( min(m,n), max(m-kl-1,n-ku-1) )
-
-    if j+kl+1 <= m
-       [v, beta] = house(A(j+kl:m,j));
-       temp = A(j+kl:m,j:n);
-       A(j+kl:m,j:n) = temp - beta*v*(v'*temp);
-       A(j+kl+1:m,j) = zeros(m-j-kl,1);
-    end
-
-    if j+ku+1 <= n
-       [v, beta] = house(A(j,j+ku:n)');
-       temp = A(j:m,j+ku:n);
-       A(j:m,j+ku:n) = temp - beta*(temp*v)*v';
-       A(j,j+ku+1:n) = zeros(1,n-j-ku);
-    end
-
-end
-
-if flip
-   A = A';
-end
--- a/toolbox/cauchy.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-function C = cauchy(x, y)
-%CAUCHY  Cauchy matrix.
-%        C = CAUCHY(X, Y), where X, Y are N-vectors, is the N-by-N matrix
-%        with C(i,j) = 1/(X(i)+Y(j)).   By default, Y = X.
-%        Special case: if X is a scalar CAUCHY(X) is the same as CAUCHY(1:X).
-%        Explicit formulas are known for DET(C) (which is nonzero if X and Y
-%        both have distinct elements) and the elements of INV(C).
-%        C is totally positive if 0 < X(1) < ... < X(N) and
-%        0 < Y(1) < ... < Y(N).
-
-%        References:
-%        N.J. Higham, Accuracy and Stability of Numerical Algorithms,
-%           Society for Industrial and Applied Mathematics, Philadelphia, PA,
-%           USA, 1996; sec. 26.1.
-%        D.E. Knuth, The Art of Computer Programming, Volume 1,
-%           Fundamental Algorithms, second edition, Addison-Wesley, Reading,
-%           Massachusetts, 1973, p. 36.
-%        E.E. Tyrtyshnikov, Cauchy-Toeplitz matrices and some applications,
-%           Linear Algebra and Appl., 149 (1991), pp. 1-18.
-%        O. Taussky and M. Marcus, Eigenvalues of finite matrices, in
-%           Survey of Numerical Analysis, J. Todd, ed., McGraw-Hill, New York,
-%           pp. 279-313, 1962. (States the totally positive property on p. 295.)
-
-n = max(size(x));
-%  Handle scalar x.
-if n == 1
-   n = x;
-   x = 1:n;
-end
-
-if nargin == 1, y = x; end
-
-x = x(:); y = y(:);   % Ensure x and y are column vectors.
-if any(size(x) ~= size(y))
-   error('Parameter vectors must be of same dimension.')
-end
-
-C = x*ones(1,n) + ones(n,1)*y.';
-C = ones(n) ./ C;
--- a/toolbox/chebspec.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-function C = chebspec(n, k)
-%CHEBSPEC  Chebyshev spectral differentiation matrix.
-%          C = CHEBSPEC(N, K) is a Chebyshev spectral differentiation
-%          matrix of order N.  K = 0 (the default) or 1.
-%          For K = 0 (`no boundary conditions'), C is nilpotent, with
-%              C^N = 0 and it has the null vector ONES(N,1).
-%              C is similar to a Jordan block of size N with eigenvalue zero.
-%          For K = 1, C is nonsingular and well-conditioned, and its eigenvalues
-%              have negative real parts.
-%          For both K, the computed eigenvector matrix X from EIG is
-%              ill-conditioned (MESH(REAL(X)) is interesting).
-
-%          References:
-%          C. Canuto, M.Y. Hussaini, A. Quarteroni and T.A. Zang, Spectral
-%             Methods in Fluid Dynamics, Springer-Verlag, Berlin, 1988; p. 69.
-%          L.N. Trefethen and M.R. Trummer, An instability phenomenon in
-%             spectral methods, SIAM J. Numer. Anal., 24 (1987), pp. 1008-1023.
-%          D. Funaro, Computing the inverse of the Chebyshev collocation
-%             derivative, SIAM J. Sci. Stat. Comput., 9 (1988), pp. 1050-1057.
-
-if nargin == 1, k = 0; end
-
-% k = 1 case obtained from k = 0 case with one bigger n.
-if k == 1, n = n + 1; end
-
-n = n-1;
-C = zeros(n+1);
-
-one = ones(n+1,1);
-x = cos( (0:n)' * (pi/n) );
-d = ones(n+1,1); d(1) = 2; d(n+1) = 2;
-
-% eye(size(C)) on next line avoids div by zero.
-C = (d * (one./d)') ./ (x*one'-one*x' + eye(size(C)));
-
-%  Now fix diagonal and signs.
-
-C(1,1) = (2*n^2+1)/6;
-for i=2:n+1
-    if rem(i,2) == 0
-       C(:,i) = -C(:,i);
-       C(i,:) = -C(i,:);
-    end
-    if i < n+1
-       C(i,i) = -x(i)/(2*(1-x(i)^2));
-    else
-       C(n+1,n+1) = -C(1,1);
-    end
-end
-
-if k == 1
-   C = C(2:n+1,2:n+1);
-end
--- a/toolbox/chebvand.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-function C = chebvand(m,p)
-%CHEBVAND  Vandermonde-like matrix for the Chebyshev polynomials.
-%          C = CHEBVAND(P), where P is a vector, produces the (primal)
-%          Chebyshev Vandermonde matrix based on the points P,
-%          i.e., C(i,j) = T_{i-1}(P(j)), where T_{i-1} is the Chebyshev
-%          polynomial of degree i-1.
-%          CHEBVAND(M,P) is a rectangular version of CHEBVAND(P) with M rows.
-%          Special case: If P is a scalar then P equally spaced points on
-%                        [0,1] are used.
-
-%           Reference:
-%           N.J. Higham, Stability analysis of algorithms for solving confluent
-%           Vandermonde-like systems, SIAM J. Matrix Anal. Appl., 11 (1990),
-%           pp. 23-41.
-
-if nargin == 1, p = m; end
-n = max(size(p));
-
-%  Handle scalar p.
-if n == 1
-   n = p;
-   p = seqa(0,1,n);
-end
-
-if nargin == 1, m = n; end
-
-p = p(:).';                    % Ensure p is a row vector.
-C = ones(m,n);
-if m == 1, return, end
-C(2,:) = p;
-%      Use Chebyshev polynomial recurrence.
-for i=3:m
-    C(i,:) = 2.*p.*C(i-1,:) - C(i-2,:);
-end
--- a/toolbox/cholp.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-function [R, P, I] = cholp(A, piv)
-%CHOLP  Cholesky factorization with pivoting of a pos. semidefinite matrix.
-%       [R, P] = CHOLP(A) returns R and a permutation matrix P such that
-%       R'*R = P'*A*P.  Only the upper triangular part of A is used.
-%       If A is not positive definite, an error message is printed.
-%
-%       [R, P, I] = CHOLP(A) never produces an error message.
-%       If A is positive definite then I = 0 and R is the Cholesky factor.
-%       If A is not positive definite then I is positive and
-%       R is (I-1)-by-N with P'*A*P - R'*R zeros in columns 1:I-1 and
-%       rows 1:I-1.
-%       [R, I] = CHOLP(A, 0) forces P = EYE(SIZE(A)), and therefore behaves
-%       like [R, I] = CHOL(A).
-
-%       This routine is based on the LINPACK routine CCHDC.  It works
-%       for both real and complex matrices.
-%
-%       Reference:
-%       G.H. Golub and C.F. Van Loan, Matrix Computations, Second
-%       Edition, Johns Hopkins University Press, Baltimore, Maryland,
-%       1989, sec. 4.2.9.
-
-if nargin == 1, piv = 1; end
-
-n = length(A);
-pp = 1:n;
-I = 0;
-
-for k = 1:n
-
-    if piv
-       d = diag(A);
-       [big, m] = max( d(k:n) );
-       m = m+k-1;
-    else
-       big = A(k,k);  m = k;
-    end
-    if big <= 0, I = k; break, end
-
-%   Symmetric row/column permutations.
-    if m ~= k
-       A(:, [k m]) = A(:, [m k]);
-       A([k m], :) = A([m k], :);
-       pp( [k m] ) = pp( [m k] );
-    end
-
-    A(k,k) = sqrt( A(k,k) );
-    if k == n, break, end
-    A(k, k+1:n) = A(k, k+1:n) / A(k,k);
-
-%   For simplicity update the whole of the remaining submatrix (rather
-%   than just the upper triangle).
-
-    j = k+1:n;
-    A(j,j) = A(j,j) - A(k,j)'*A(k,j);
-
-end
-
-R = triu(A);
-if I > 0
-    if nargout < 3, error('Matrix must be positive definite.'), end
-    R = R(1:I-1,:);
-end
-
-if piv == 0
-   P = I;
-else
-   P = eye(n); P = P(:,pp);
-end
--- a/toolbox/chop.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-function c = chop(x, t)
-%CHOP    Round matrix elements.
-%        CHOP(X, t) is the matrix obtained by rounding the elements of X
-%        to t significant binary places.
-%        Default is t = 24, corresponding to IEEE single precision.
-
-if nargin < 2, t = 24; end
-[m, n] = size(x);
-
-%  Use the representation:
-%  x(i,j) = 2^e(i,j) * .d(1)d(2)...d(s) * sign(x(i,j))
-
-%  On the next line `+(x==0)' avoids passing a zero argument to LOG, which
-%  would cause a warning message to be generated.
-
-y = abs(x) + (x==0);
-e = floor(log2(y) + 1);
-c = pow2(round( pow2(x, t-e) ), e-t);
--- a/toolbox/chow.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-function A = chow(n, alpha, delta)
-%CHOW    Chow matrix - a singular Toeplitz lower Hessenberg matrix.
-%        A = CHOW(N, ALPHA, DELTA) is a Toeplitz lower Hessenberg matrix
-%        A = H(ALPHA) + DELTA*EYE, where H(i,j) = ALPHA^(i-j+1).
-%        H(ALPHA) has p = FLOOR(N/2) zero eigenvalues, the rest being
-%        4*ALPHA*COS( k*PI/(N+2) )^2, k=1:N-p.
-%        Defaults: ALPHA = 1, DELTA = 0.
-
-%        References:
-%        T.S. Chow, A class of Hessenberg matrices with known
-%           eigenvalues and inverses, SIAM Review, 11 (1969), pp. 391-395.
-%        G. Fairweather, On the eigenvalues and eigenvectors of a class of
-%           Hessenberg matrices, SIAM Review, 13 (1971), pp. 220-221.
-%        I. Singh, G. Poole and T. Boullion, A class of Hessenberg matrices
-%           with known pseudoinverse and Drazin inverse, Math. Comp.,
-%           29 (1975), pp. 615-619.
-
-if nargin < 3, delta = 0; end
-if nargin < 2, alpha = 1; end
-
-A = toeplitz( alpha.^(1:n), [alpha 1 zeros(1,n-2)] ) + delta*eye(n);
--- a/toolbox/circul.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-function C = circul(v)
-%CIRCUL  Circulant matrix.
-%        C = CIRCUL(V) is the circulant matrix whose first row is V.
-%        (A circulant matrix has the property that each row is obtained
-%        from the previous one by cyclically permuting the entries one step
-%        forward; it is a special Toeplitz matrix in which the diagonals
-%        `wrap round'.)
-%        Special case: if V is a scalar then C = CIRCUL(1:V).
-%        The eigensystem of C (N-by-N) is known explicitly.   If t is an Nth
-%        root of unity, then the inner product of V with W = [1 t t^2 ... t^N]
-%        is an eigenvalue of C, and W(N:-1:1) is an eigenvector of C.
-
-%        Reference:
-%        P.J. Davis, Circulant Matrices, John Wiley, 1977.
-
-n = max(size(v));
-
-if n == 1
-   n = v;
-   v = 1:n;
-end
-
-v = v(:).';   % Make sure v is a row vector.
-
-C = toeplitz( [ v(1) v(n:-1:2) ], v );
--- a/toolbox/clement.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-function A = clement(n, k)
-%CLEMENT   Clement matrix - tridiagonal with zero diagonal entries.
-%          CLEMENT(N, K) is a tridiagonal matrix with zero diagonal entries
-%          and known eigenvalues.  It is singular if N is odd.  About 64
-%          percent of the entries of the inverse are zero.  The eigenvalues
-%          are plus and minus the numbers N-1, N-3, N-5, ..., (1 or 0).
-%          For K = 0 (the default) the matrix is unsymmetric, while for
-%          K = 1 it is symmetric.
-%          CLEMENT(N, 1) is diagonally similar to CLEMENT(N).
-
-%          Similar properties hold for TRIDIAG(X,Y,Z) where Y = ZEROS(N,1).
-%          The eigenvalues still come in plus/minus pairs but they are not
-%          known explicitly.
-%
-%          References:
-%          P.A. Clement, A class of triple-diagonal matrices for test
-%             purposes, SIAM Review, 1 (1959), pp. 50-52.
-%          A. Edelman and E. Kostlan, The road from Kac's matrix to Kac's
-%             random polynomials. In John~G. Lewis, editor, Proceedings of
-%             the Fifth SIAM Conference on Applied Linear Algebra Society
-%             for Industrial and Applied Mathematics, Philadelphia, 1994,
-%             pp. 503-507.
-%          O. Taussky and J. Todd, Another look at a matrix of Mark Kac,
-%             Linear Algebra and Appl., 150 (1991), pp. 341-360.
-
-if nargin == 1, k = 0; end
-
-n = n-1;
-
-x = n:-1:1;
-z = 1:n;
-
-if k == 0
-   A = diag(x, -1) + diag(z, 1);
-else
-   y = sqrt(x.*z);
-   A = diag(y, -1) + diag(y, 1);
-end
--- a/toolbox/cod.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-function [U, R, V] = cod(A, tol)
-%COD    Complete orthogonal decomposition.
-%       [U, R, V] = COD(A, TOL) computes a decomposition A = U*T*V,
-%       where U and V are unitary, T = [R 0; 0 0] has the same dimensions as
-%       A, and R is upper triangular and nonsingular of dimension rank(A).
-%       Rank decisions are made using TOL, which defaults to approximately
-%       MAX(SIZE(A))*NORM(A)*EPS.
-%       By itself, COD(A, TOL) returns R.
-
-%       Reference:
-%       G.H. Golub and C.F. Van Loan, Matrix Computations, Second
-%       Edition, Johns Hopkins University Press, Baltimore, Maryland,
-%       1989, sec. 5.4.2.
-
-[m, n] = size(A);
-
-% QR decomposition.
-[U, R, P] = qr(A);    % AP = UR
-V = P';               % A = URV;
-if nargin == 1, tol = max(m,n)*eps*abs(R(1,1)); end  % |R(1,1)| approx NORM(A).
-
-% Determine r = effective rank.
-r = sum(abs(diag(R)) > tol);
-r = r(1);             % Fix for case where R is vector.
-R = R(1:r,:);         % Throw away negligible rows (incl. all zero rows, m>n).
-
-if r ~= n
-
-   % Reduce nxr R' =  r  [L]  to lower triangular form: QR' = [Lbar].
-   %                 n-r [M]                                  [0]
-
-   [Q, R] = trap2tri(R');
-   V = Q*V;
-   R = R';
-
-end
-
-if nargout <= 1, U = R; end
--- a/toolbox/comp.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-function C = comp(A, k)
-%COMP    Comparison matrices.
-%        COMP(A) is DIAG(B) - TRIL(B,-1) - TRIU(B,1), where B = ABS(A).
-%        COMP(A, 1) is A with each diagonal element replaced by its
-%        absolute value, and each off-diagonal element replaced by minus
-%        the absolute value of the largest element in absolute value in
-%        its row.  However, if A is triangular COMP(A, 1) is too.
-%        COMP(A, 0) is the same as COMP(A).
-%        COMP(A) is often denoted by M(A) in the literature.
-
-%        Reference (e.g.):
-%        N.J. Higham, A survey of condition number estimation for
-%        triangular matrices, SIAM Review, 29 (1987), pp. 575-596.
-
-if nargin == 1, k = 0; end
-[m, n] = size(A);
-p = min(m, n);
-
-if k == 0
-
-% This code uses less temporary storage than the `high level' definition above.
-   C = -abs(A);
-   for j=1:p
-     C(j,j) = abs(A(j,j));
-   end
-
-elseif k == 1
-
-   C = A';
-   for j=1:p
-       C(k,k) = 0;
-   end
-   mx = max(abs(C));
-   C = -mx'*ones(1,n);
-   for j=1:p
-       C(j,j) = abs(A(j,j));
-   end
-   if all( A == tril(A) ), C = tril(C); end
-   if all( A == triu(A) ), C = triu(C); end
-
-end
--- a/toolbox/compan.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-function A = compan(p)
-%COMPAN  Companion matrix.
-%        COMPAN(P) is a companion matrix.  There are three cases.
-%        If P is a scalar then COMPAN(P) is the P-by-P matrix COMPAN(1:P+1).
-%        If P is an (n+1)-vector, COMPAN(P) is the n-by-n companion matrix
-%           whose first row is -P(2:n+1)/P(1).
-%        If P is a square matrix, COMPAN(P) is the companion matrix
-%           of the characteristic polynomial of P, computed as
-%           COMPAN(POLY(P)).
-
-%        References:
-%        J.H. Wilkinson, The Algebraic Eigenvalue Problem,
-%           Oxford University Press, 1965, p. 12.
-%        G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
-%           Johns Hopkins University Press, Baltimore, Maryland, 1989,
-%           sec 7.4.6.
-%        C. Kenney and A.J. Laub, Controllability and stability radii for
-%          companion form systems, Math. Control Signals Systems, 1 (1988),
-%          pp. 239-256. (Gives explicit formulas for the singular values of
-%          COMPAN(P).)
-
-[n,m] = size(p);
-
-if n == m & n > 1
-   % Matrix argument.
-   A = compan(poly(p));
-   return
-end
-
-n = max(n,m);
-%  Handle scalar p.
-if n == 1
-   n = p+1;
-   p = 1:n;
-end
-
-p = p(:)';                    % Ensure p is a row vector.
-
-% Construct matrix of order n-1.
-if n == 2
-   A = 1;
-else
-    A = diag(ones(1,n-2),-1);
-    A(1,:) = -p(2:n)/p(1);
-end
--- a/toolbox/cond.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-function y = cond(A, p)
-%COND   Matrix condition number in 1, 2, Frobenius, or infinity norm.
-%       For p = 1, 2, 'fro', inf,  COND(A,p) = NORM(A,p) * NORM(INV(A),p).
-%       If p is omitted then p = 2 is used.
-%       A may be a rectangular matrix if p = 2; in this case COND(A)
-%       is the ratio of the largest singular value of A to the smallest
-%       (and hence is infinite if A is rank deficient).
-
-%	See also RCOND, NORM, CONDEST, NORMEST.
-%       Generalises and incorporates MATFUN/COND.M from Matlab 4.
-
-if length(A) == 0  % Handle null matrix.
-    y = NaN;
-    return
-end
-if issparse(A)
-    error('Matrix must be non-sparse.')
-end
-
-if nargin == 1, p = 2; end
-
-[m, n] = size(A);
-if m ~= n & p ~= 2
-   error('A is rectangular.  Use the 2 norm.')
-end
-
-if p == 2
-   s = svd(A);
-   if any(s == 0)   % Handle singular matrix
-        disp('Condition is infinite')
-        y = Inf;
-        return
-   end
-   y = max(s)./min(s);
-else
-%  We'll let NORM pick up any invalid p argument.
-   y = norm(A, p) * norm(inv(A), p);
-end
--- a/toolbox/condex.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-function A = condex(n, k, theta)
-%CONDEX   `Counterexamples' to matrix condition number estimators.
-%         CONDEX(N, K, THETA) is a `counterexample' matrix to a condition
-%         estimator.  It has order N and scalar parameter THETA (default 100).
-%         If N is not equal to the `natural' size of the matrix then
-%         the matrix is padded out with an identity matrix to order N.
-%         The matrix, its natural size, and the estimator to which it applies
-%         are specified by K (default K = 4) as follows:
-%             K = 1:   4-by-4,     LINPACK (RCOND)
-%             K = 2:   3-by-3,     LINPACK (RCOND)
-%             K = 3:   arbitrary,  LINPACK (RCOND) (independent of THETA)
-%             K = 4:   N >= 4,     SONEST (Higham 1988)
-%         (Note that in practice the K = 4 matrix is not usually a
-%          counterexample because of the rounding errors in forming it.)
-
-%         References:
-%         A.K. Cline and R.K. Rew, A set of counter-examples to three
-%            condition number estimators, SIAM J. Sci. Stat. Comput.,
-%            4 (1983), pp. 602-611.
-%         N.J. Higham, FORTRAN codes for estimating the one-norm of a real or
-%            complex matrix, with applications to condition estimation
-%            (Algorithm 674), ACM Trans. Math. Soft., 14 (1988), pp. 381-396.
-
-if nargin < 3, theta = 100; end
-if nargin < 2, k = 4; end
-
-if k == 1    % Cline and Rew (1983), Example B.
-
-   A = [1  -1  -2*theta     0
-        0   1     theta  -theta
-        0   1   1+theta  -(theta+1)
-        0   0   0         theta];
-
-elseif k == 2   % Cline and Rew (1983), Example C.
-
-   A = [1   1-2/theta^2  -2
-        0   1/theta      -1/theta
-        0   0             1];
-
-elseif k == 3    % Cline and Rew (1983), Example D.
-
-    A = triw(n,-1)';
-    A(n,n) = -1;
-
-elseif k == 4    % Higham (1988), p. 390.
-
-    x = ones(n,3);            %  First col is e
-    x(2:n,2) = zeros(n-1,1);  %  Second col is e(1)
-
-    % Third col is special vector b in SONEST
-    x(:, 3) = (-1).^[0:n-1]' .* ( 1 + [0:n-1]'/(n-1) );
-
-    Q = orth(x);  %  Q*Q' is now the orthogonal projector onto span(e(1),e,b)).
-    P = eye(n) - Q*Q';
-    A = eye(n) + theta*P;
-
-end
-
-% Pad out with identity as necessary.
-[m, m] = size(A);
-if m < n
-   for i=n:-1:m+1, A(i,i) = 1; end
-end
--- a/toolbox/contents.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,114 +0,0 @@
-% Test Matrix Toolbox.
-% Version 3.0, September 19 1995
-% Copyright (c) 1995 by N. J. Higham
-%
-% Demonstration
-%TMTDEMO   Demonstration of Test Matrix Toolbox.
-%
-% Test Matrices
-%AUGMENT   Augmented system matrix.
-%CAUCHY    Cauchy matrix.
-%CHEBSPEC  Chebyshev spectral differentiation matrix.
-%CHEBVAND  Vandermonde-like matrix for the Chebyshev polynomials.
-%CHOW      Chow matrix - a singular Toeplitz lower Hessenberg matrix.
-%CIRCUL    Circulant matrix.
-%CLEMENT   Clement matrix - tridiagonal with zero diagonal entries.
-%COMPAN    Companion matrix.
-%CONDEX    `Counterexamples' to matrix condition estimators.
-%CYCOL     Matrix whose columns repeat cyclically.
-%DINGDONG  Dingdong matrix - a symmetric Hankel matrix.
-%DORR      Dorr matrix - diag. dominant, ill-conditioned, tridiagonal (sparse).
-%DRAMADAH  A (0,1) matrix with large inverse.
-%FIEDLER   Fiedler matrix - symmetric.
-%FORSYTHE  Forsythe matrix - a perturbed Jordan block.
-%FRANK     Frank matrix - ill-conditioned eigenvalues.
-%GALLERY   Famous, and not so famous, test matrices.
-%GEARM     Gear matrix.
-%GFPP      Matrix giving maximal growth factor for Gaussian elim. with pivoting.
-%GRCAR     Grcar matrix - a Toeplitz matrix with sensitive eigenvalues.
-%HADAMARD  Hadamard matrix.
-%HANOWA    Hanowa matrix.
-%HILB      Hilbert matrix.
-%INVHESS   Inverse of an upper Hessenberg matrix.
-%INVOL     An involutory matrix.
-%IPJFACT   A Hankel matrix with factorial elements.
-%JORDBLOC  Jordan block.
-%KAHAN     Kahan matrix - upper trapezoidal.
-%KMS       Kac-Murdock-Szego Toeplitz matrix.
-%KRYLOV    Krylov matrix.
-%LAUCHLI   Lauchli matrix.
-%LEHMER    Lehmer matrix - symmetric positive definite.
-%LESP      A tridiagonal matrix with real, sensitive eigenvalues.
-%LOTKIN    Lotkin matrix.
-%MAKEJCF   A matrix with given Jordan canonical form.
-%MINIJ     Symmetric positive definite matrix MIN(i,j).
-%MOLER     Moler matrix - symmetric positive definite.
-%NEUMANN   Singular matrix from the discrete Neumann problem.
-%OHESS     Random, orthogonal upper Hessenberg matrix.
-%ORTHOG    Orthogonal and nearly orthogonal matrices.
-%PARTER    Parter matrix - a Toeplitz matrix with singular values near PI.
-%PASCAL    Pascal matrix.
-%PDTOEP    Symmetric positive definite Toeplitz matrix.
-%PEI       Pei matrix.
-%PENTOEP   Pentadiagonal Toeplitz matrix (sparse).
-%POISSON   Block tridiagonal matrix from Poisson's equation (sparse).
-%PROLATE   Prolate matrix - symmetric, ill-conditioned Toeplitz matrix.
-%RANDO     Random matrix with elements -1, 0 or 1.
-%RANDSVD   Random matrices with pre-assigned singular values.
-%REDHEFF   A matrix of 0s and 1s of Redheffer.
-%RIEMANN   A matrix associated with the Riemann hypothesis.
-%RSCHUR    An upper quasi-triangular matrix.
-%SMOKE     Smoke matrix - complex, with a `smoke ring' pseudospectrum.
-%TRIDIAG   Tridiagonal matrix (sparse).
-%TRIW      Upper triangular matrix discussed by Wilkinson and others.
-%VAND      Vandermonde matrix.
-%WATHEN    Wathen matrix - a finite element matrix (sparse, random entries).
-%WILK      Various specific matrices devised/discussed by Wilkinson.
-%
-% Visualization
-%FV        Field of values (or numerical range).
-%GERSH     Gershgorin disks.
-%PS        Approximation to the pseudospectrum.
-%PSCONT    Contours and colour pictures of pseudospectra.
-%SEE       Pictures of a matrix and its (pseudo-) inverse.
-%
-% Decompositions and factorizations.
-%CGS       Gram-Schmidt QR factorization.
-%CHOLP     Cholesky factorization with pivoting of a pos. semidefinite matrix.
-%COD       Complete orthogonal decomposition.
-%DIAGPIV   Diagonal pivoting factorization with partial pivoting.
-%GE        Gaussian elimination without pivoting.
-%GECP      Gaussian elimination with complete pivoting.
-%GJ        Gauss-Jordan elimination to solve Ax = b.
-%MGS       Modified Gram-Schmidt QR factorization.
-%POLDEC    Polar decomposition.
-%SIGNM     Matrix sign decomposition.
-%
-% Direct Search Optimization.
-%ADSMAX  Alternating directions direct search method.
-%MDSMAX  Multidirectional search method for direct search optimization.
-%NMSMAX  Nedler-Mead simplex method for direct search optimization.
-%
-% Miscellaneous
-%BANDRED   Band reduction by two-sided orthogonal transformations.
-%CHOP      Round matrix elements.
-%COMP      Comparison matrices.
-%COND      Matrix condition number in 1, 2, Frobenius, or infinity norm.
-%CPLTAXES  Determine suitable AXIS for plot of complex vector.
-%DUAL      Dual vector with respect to Holder p-norm.
-%EIGSENS   Eigenvalue condition numbers.
-%HOUSE     Householder matrix.
-%MATRIX    Test Matrix Collection information and access by number.
-%MATSIGNT  Matrix sign function of a triangular matrix.
-%PNORM     Estimate of matrix p-norm (1 <= p <= inf).
-%QMULT     Pre-multiply by random orthogonal matrix.
-%RQ        Rayleigh quotient.
-%SEQA      An additive sequence.
-%SEQCHEB   Sequence of points related to Chebyshev polynomials, T_N.
-%SEQM      A multiplicative sequence.
-%SHOW      Display signs of matrix elements.
-%SKEWPART  Skew-symmetric (Hermitian) part.
-%SPARSIFY  Randomly sets matrix elements to zero.
-%SUB       Principal submatrix.
-%SYMMPART  Symmetric (Hermitian) part.
-%TRAP2TRI  Trapezoidal matrix to triangular form.
--- a/toolbox/cpltaxes.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-function x = cpltaxes(z)
-%CPLTAXES   Determine suitable AXIS for plot of complex vector.
-%           X = CPLTAXES(Z), where Z is a complex vector,
-%           determines a 4-vector X such that AXIS(X) sets axes for a plot
-%           of Z that has axes of equal length and leaves a reasonable amount
-%           of space around the edge of the plot.
-
-%           Called by FV, GERSH, PS and PSCONT.
-
-% Set x and y axis ranges so both have the same length.
-
-xmin = min(real(z)); xmax = max(real(z));
-ymin = min(imag(z)); ymax = max(imag(z));
-
-% Fix for rare case of `trivial data'.
-if xmin == xmax, xmin = xmin - 1/2; xmax = xmax + 1/2; end
-if ymin == ymax, ymin = ymin - 1/2; ymax = ymax + 1/2; end
-
-if xmax-xmin >= ymax-ymin
-   ymid = (ymin + ymax)/2;
-   ymin =  ymid - (xmax-xmin)/2; ymax = ymid + (xmax-xmin)/2;
-else
-   xmid = (xmin + xmax)/2;
-   xmin = xmid - (ymax-ymin)/2; xmax = xmid + (ymax-ymin)/2;
-end
-axis('square')
-
-% Scale ranges by 1+2*alpha to give extra space around edges of plot.
-
-alpha = 0.1;
-x(1) = xmin - alpha*(xmax-xmin);
-x(2) = xmax + alpha*(xmax-xmin);
-x(3) = ymin - alpha*(ymax-ymin);
-x(4) = ymax + alpha*(ymax-ymin);
-
-if x(1) == x(2), x(2) = x(2) + 0.1; end
-if x(3) == x(4), x(4) = x(3) + 0.1; end
--- a/toolbox/cycol.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-function A = cycol(n, k)
-%CYCOL   Matrix whose columns repeat cyclically.
-%        A = CYCOL([M N], K) is an M-by-N matrix of the form A = B(1:M,1:N)
-%        where B = [C C C...] and C = RANDN(M, K).  Thus A's columns repeat
-%        cyclically, and A has rank at most K.   K need not divide N.
-%        K defaults to ROUND(N/4).
-%        CYCOL(N, K), where N is a scalar, is the same as CYCOL([N N], K).
-%
-%        This type of matrix can lead to underflow problems for Gaussian
-%        elimination: see NA Digest Volume 89, Issue 3 (January 22, 1989).
-
-m = n(1);              % Parameter n specifies dimension: m-by-n.
-n = n(max(size(n)));
-
-if nargin < 2, k = max(round(n/4),1); end
-
-A = randn(m, k);
-for i=2:ceil(n/k)
-    A = [A A(:,1:k)];
-end
-
-A = A(:, 1:n);
--- a/toolbox/diagpiv.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,108 +0,0 @@
-function [L, D, P, rho] = diagpiv(A)
-%DIAGPIV     Diagonal pivoting factorization with partial pivoting.
-%            Given a Hermitian matrix A,
-%            [L, D, P, rho] = DIAGPIV(A) computes a permutation P,
-%            a unit lower triangular L, and a real block diagonal D
-%            with 1x1 and 2x2 diagonal blocks, such that
-%            P*A*P' = L*D*L'.
-%            The Bunch-Kaufman partial pivoting strategy is used.
-%            Rho is the growth factor.
-
-%            Reference:
-%            J.R. Bunch and L. Kaufman, Some stable methods for calculating
-%            inertia and solving symmetric linear systems, Math. Comp.,
-%            31(137):163-179, 1977.
-
-%            This routine does not exploit symmetry and is not designed to be
-%            efficient.
-
-if norm(triu(A,1)'-tril(A,-1),1), error('Must supply Hermitian matrix.'), end
-
-n = max(size(A));
-k = 1;
-D = eye(n);
-L = eye(n);
-pp = 1:n;
-normA = norm(A(:),inf);
-rho = normA;
-
-alpha = (1 + sqrt(17))/8;
-
-while k < n
-      [lambda, r] = max( abs(A(k+1:n,k)) );
-      r = r(1) + k;
-
-      if lambda > 0
-          swap = 0;
-          if abs(A(k,k)) >= alpha*lambda
-             s = 1;
-          else
-             temp = A(k:n,r); temp(r-k+1) = 0;
-             sigma = norm(temp, inf);
-             if alpha*lambda^2 <= abs(A(k,k))*sigma
-                s = 1;
-             elseif abs(A(r,r)) >= alpha*sigma
-                swap = 1;
-                m1 = k; m2 = r;
-                s = 1;
-             else
-                swap = 1;
-                m1 = k+1; m2 = r;
-                s = 2;
-             end
-          end
-
-          if swap
-             A( [m1, m2], : ) = A( [m2, m1], : );
-             L( [m1, m2], : ) = L( [m2, m1], : );
-             A( :, [m1, m2] ) = A( :, [m2, m1] );
-             L( :, [m1, m2] ) = L( :, [m2, m1] );
-             pp( [m1, m2] ) = pp( [m2, m1] );
-          end
-
-          if s == 1
-
-             D(k,k) = A(k,k);
-             A(k+1:n,k) = A(k+1:n,k)/A(k,k);
-             L(k+1:n,k) = A(k+1:n,k);
-             i = k+1:n;
-             A(i,i) = A(i,i) - A(i,k) * A(k,i);
-
-          elseif s == 2
-
-             E = A(k:k+1,k:k+1);
-             D(k:k+1,k:k+1) = E;
-             C = A(k+2:n,k:k+1);
-             temp = C/E;
-             L(k+2:n,k:k+1) = temp;
-             A(k+2:n,k+2:n) = A(k+2:n,k+2:n) - temp*C';
-
-         end
-
-         % Make diagonal real (see LINPACK User's Guide, p. 5.17).
-         for i=k+s:n
-             A(i,i) = real(A(i,i));
-         end
-
-         if k+s <= n
-            rho = max(rho, max(max(abs(A(k+s:n,k+s:n)))) );
-         end
-
-      else  % Nothing to do.
-
-         s = 1;
-         D(k,k) = A(k,k);
-
-      end
-
-      k = k + s;
-
-      if k == n
-         D(n,n) = A(n,n);
-         break
-      end
-
-end
-
-if nargout >= 3, P = eye(n); P = P(pp,:); end
-rho = rho/normA;
--- a/toolbox/dingdong.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,15 +0,0 @@
-function A = dingdong(n)
-%DINGDONG  Dingdong matrix - a symmetric Hankel matrix.
-%          A = DINGDONG(N) is the symmetric N-by-N Hankel matrix with
-%                         A(i,j) = 0.5/(N-i-j+1.5).
-%          The eigenvalues of A cluster around PI/2 and -PI/2.
-
-%          Invented by F.N. Ris.
-%
-%          Reference:
-%          J.C. Nash, Compact Numerical Methods for Computers: Linear
-%          Algebra and Function Minimisation, second edition, Adam Hilger,
-%          Bristol, 1990 (Appendix 1).
-
-p= -2*(1:n) + (n+1.5);
-A = cauchy(p);
--- a/toolbox/dorr.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-function [c, d, e] = dorr(n, theta)
-%DORR  Dorr matrix - diagonally dominant, ill conditioned, tridiagonal.
-%      [C, D, E] = DORR(N, THETA) returns the vectors defining a row diagonally
-%      dominant, tridiagonal M-matrix that is ill conditioned for small
-%      values of the parameter THETA >= 0.
-%      If only one output parameter is supplied then
-%      C = FULL(TRIDIAG(C,D,E)), i.e., the matrix iself is returned.
-%      The columns of INV(C) vary greatly in norm.  THETA defaults to 0.01.
-%      The amount of diagonal dominance is given by (ignoring rounding errors):
-%            COMP(C)*ONES(N,1) = THETA*(N+1)^2 * [1 0 0 ... 0 1]'.
-
-%      Reference:
-%      F.W. Dorr, An example of ill-conditioning in the numerical
-%      solution of singular perturbation problems, Math. Comp., 25 (1971),
-%      pp. 271-283.
-
-if nargin < 2, theta = 0.01; end
-
-c = zeros(n,1); e = c; d = c;
-% All length n for convenience.  Make c, e of length n-1 later.
-
-h = 1/(n+1);
-m = floor( (n+1)/2 );
-term = theta/h^2;
-
-i = (1:m)';
-    c(i) = -term*ones(m,1);
-    e(i) = c(i) - (0.5-i*h)/h;
-    d(i) = -(c(i) + e(i));
-
-i = (m+1:n)';
-    e(i) = -term*ones(n-m,1);
-    c(i) = e(i) + (0.5-i*h)/h;
-    d(i) = -(c(i) + e(i));
-
-c = c(2:n);
-e = e(1:n-1);
-
-if nargout <= 1
-   c = tridiag(c, d, e);
-end
--- a/toolbox/dramadah.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-function A = dramadah(n, k)
-%DRAMADAH  A (0,1) matrix whose inverse has large integer entries.
-%          An anti-Hadamard matrix A is a matrix with elements 0 or 1 for
-%          which MU(A) := NORM(INV(A),'FRO') is maximal.
-%          A = DRAMADAH(N, K) is an N-by-N (0,1) matrix for which MU(A) is
-%          relatively large, although not necessarily maximal.
-%          Available types (the default is K = 1):
-%          K = 1: A is Toeplitz, with ABS(DET(A)) = 1, and MU(A) > c(1.75)^N,
-%                 where c is a constant.
-%          K = 2: A is upper triangular and Toeplitz.
-%          The inverses of both types have integer entries.
-%
-%          Another interesting (0,1) matrix:
-%          K = 3: A has maximal determinant among (0,1) lower Hessenberg
-%          matrices: det(A) = the n'th Fibonacci number.  A is Toeplitz.
-%          The eigenvalues have an interesting distribution in the complex
-%          plane.
-
-%          References:
-%          R.L. Graham and N.J.A. Sloane, Anti-Hadamard matrices,
-%             Linear Algebra and Appl., 62 (1984), pp. 113-137.
-%          L. Ching, The maximum determinant of an nxn lower Hessenberg
-%             (0,1) matrix, Linear Algebra and Appl., 183 (1993), pp. 147-153.
-
-if nargin < 2, k = 1; end
-
-if k == 1  % Toeplitz
-
-   c = ones(n,1);
-   for i=2:4:n
-       m = min(1,n-i);
-       c(i:i+m) = zeros(m+1,1);
-   end
-   r = zeros(n,1);
-   r(1:4) = [1 1 0 1];
-   if n < 4, r = r(1:n); end
-   A = toeplitz(c,r);
-
-elseif k == 2  % Upper triangular and Toeplitz
-
-   c = zeros(n,1);
-   c(1) = 1;
-   r = ones(n,1);
-   for i=3:2:n
-       r(i) = 0;
-   end
-   A = toeplitz(c,r);
-
-elseif k == 3  % Lower Hessenberg.
-
-   c = ones(n,1);
-   for i=2:2:n, c(i)=0; end;
-   A = toeplitz(c, [1 1 zeros(1,n-2)]);
-
-end
--- a/toolbox/dual.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-function y = dual(x, p)
-%DUAL    Dual vector with respect to Holder p-norm.
-%        Y = DUAL(X, p), where 1 <= p <= inf, is a vector of unit q-norm
-%        that is dual to X with respect to the p-norm, that is,
-%        norm(Y, q) = 1 where 1/p + 1/q = 1 and there is
-%        equality in the Holder inequality: X'*Y = norm(X, p)*norm(Y, q).
-%        Special case: DUAL(X), where X >= 1 is a scalar, returns Y such
-%                      that 1/X + 1/Y = 1.
-
-%        Called by PNORM.
-
-if max(size(x)) == 1 & nargin == 1
-   p = x;
-end
-
-% The following test avoids a `division by zero message' when p = 1.
-if p == 1
-   q = inf;
-else
-   q = 1/(1-1/p);
-end
-
-if max(size(x)) == 1 & nargin == 1
-   y = q;
-   return
-end
-
-if norm(x,inf) == 0, y = x; return, end
-
-if p == 1
-
-   y = sign(x) + (x == 0);   % y(i) = +1 or -1 (if x(i) real).
-
-elseif p == inf
-
-   [xmax, k] = max(abs(x));
-   f = find(abs(x)==xmax); k = f(1);
-   y = zeros(size(x));
-   y(k) = sign(x(k));        % y is a multiple of unit vector e_k.
-
-else  % 1 < p < inf.  Dual is unique in this case.
-
-  x = x/norm(x,inf);         % This scaling helps to avoid under/over-flow.
-  y = abs(x).^(p-1) .* ( sign(x) + (x==0) );
-  y = y / norm(y,q);         % Normalize to unit q-norm.
-
-end
--- a/toolbox/eigsens.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-function [X, D, s] = eigsens(A)
-%EIGSENS   Eigenvalue condition numbers.
-%          EIGSENS(A) is a vector of condition numbers for the eigenvalues
-%          of A (reciprocals of the Wilkinson s(lambda) numbers).
-%          These condition numbers are the reciprocals of the cosines of the
-%          angles between the left and right eigenvectors.
-%          [V, D, s] = EIGSENS(A) is equivalent to
-%                      [V, D] = EIG(A); s = EIGSENS(A);
-
-%          Reference:
-%          G.H. Golub and C.F. Van Loan, Matrix Computations, Second
-%          Edition, Johns Hopkins University Press, Baltimore, Maryland,
-%          1989, sec. 7.2.2.
-
-n = max(size(A));
-s = zeros(n,1);
-
-[X, D] = eig(A);
-Y = inv(X);
-
-for i=1:n
-    s(i) = norm(Y(i,:)) * norm(X(:,i)) / abs( Y(i,:)*X(:,i) );
-end
-
-if nargout <= 1, X = s; end
--- a/toolbox/fdemo.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-function f = fdemo(A)
-%FDEMO   Demonstration function for direct search maximizers.
-%        FDEMO(A) is the reciprocal of the underestimation ratio for RCOND
-%        applied to the square matrix A.
-%        Demonstration function for ADSMAX, MDSMAX and NMSMAX.
-
-f = norm(A,1)*norm(inv(A),1)*rcond(A);  % f >= 1
--- a/toolbox/fiedler.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-function A = fiedler(c)
-%FIEDLER  Fiedler matrix - symmetric.
-%         A = FIEDLER(C), where C is an n-vector, is the n-by-n symmetric
-%         matrix with elements ABS(C(i)-C(j)).
-%         Special case: if C is a scalar, then A = FIEDLER(1:C)
-%                       (i.e. A(i,j) = ABS(i-j)).
-%         Properties:
-%           FIEDLER(N) has a dominant positive eigenvalue and all the other
-%                      eigenvalues are negative (Szego, 1936).
-%           Explicit formulas for INV(A) and DET(A) are given by Todd (1977)
-%           and attributed to Fiedler.  These indicate that INV(A) is
-%           tridiagonal except for nonzero (1,n) and (n,1) elements.
-%           [I think these formulas are valid only if the elements of
-%           C are in increasing or decreasing order---NJH.]
-
-%           References:
-%           G. Szego, Solution to problem 3705, Amer. Math. Monthly,
-%              43 (1936), pp. 246-259.
-%           J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
-%              Birkhauser, Basel, and Academic Press, New York, 1977, p. 159.
-
-n = max(size(c));
-
-%  Handle scalar c.
-if n == 1
-   n = c;
-   c = 1:n;
-end
-
-c = c(:).';                    % Ensure c is a row vector.
-
-A = ones(n,1)*c;
-A = abs(A - A.');              % NB. array transpose.
--- a/toolbox/forsythe.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-function A = forsythe(n, alpha, lambda)
-%FORSYTHE  Forsythe matrix - a perturbed Jordan block.
-%          FORSYTHE(N, ALPHA, LAMBDA) is the N-by-N matrix equal to
-%          JORDBLOC(N, LAMBDA) except it has an ALPHA in the (N,1) position.
-%          It has the characteristic polynomial
-%                  DET(A-t*EYE) = (LAMBDA-t)^N - (-1)^N ALPHA.
-%          ALPHA defaults to SQRT(EPS) and LAMBDA to 0.
-
-if nargin < 2, alpha = sqrt(eps); end
-if nargin < 3, lambda = 0; end
-
-A = jordbloc(n, lambda);
-A(n,1) = alpha;
--- a/toolbox/frank.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-function F = frank(n, k)
-%FRANK   Frank matrix---ill conditioned eigenvalues.
-%        F = FRANK(N, K) is the Frank matrix of order N.  It is upper
-%        Hessenberg with determinant 1.  K = 0 is the default; if K = 1 the
-%        elements are reflected about the anti-diagonal (1,N)--(N,1).
-%        F has all positive eigenvalues and they occur in reciprocal pairs
-%        (so that 1 is an eigenvalue if N is odd).
-%        The eigenvalues of F may be obtained in terms of the zeros of the
-%        Hermite polynomials.
-%        The FLOOR(N/2) smallest eigenvalues of F are ill conditioned,
-%        the more so for bigger N.
-
-%        DET(FRANK(N)') comes out far from 1 for large N---see Frank (1958)
-%        and Wilkinson (1960) for discussions.
-%
-%        This version incorporates improvements suggested by W. Kahan.
-%
-%        References:
-%        W.L. Frank, Computing eigenvalues of complex matrices by determinant
-%           evaluation and by methods of Danilewski and Wielandt, J. Soc.
-%           Indust. Appl. Math., 6 (1958), pp. 378-392 (see pp. 385, 388).
-%        G.H. Golub and J.H. Wilkinson, Ill-conditioned eigensystems and the
-%           computation of the Jordan canonical form, SIAM Review, 18 (1976),
-%             pp. 578-619 (Section 13).
-%        H. Rutishauser, On test matrices, Programmation en Mathematiques
-%           Numeriques, Editions Centre Nat. Recherche Sci., Paris, 165,
-%           1966, pp. 349-365.  Section 9.
-%        J.H. Wilkinson, Error analysis of floating-point computation,
-%           Numer. Math., 2 (1960), pp. 319-340 (Section 8).
-%        J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
-%           Press, 1965 (pp. 92-93).
-%        The next two references give details of the eigensystem, as does
-%        Rutishauser (see above).
-%        P.J. Eberlein, A note on the matrices denoted by B_n, SIAM J. Appl.
-%           Math., 20 (1971), pp. 87-92.
-%        J.M. Varah, A generalization of the Frank matrix, SIAM J. Sci. Stat.
-%           Comput., 7 (1986), pp. 835-839.
-
-if nargin == 1, k = 0; end
-
-p = n:-1:1;
-F = triu( p( ones(n,1), :) - diag( ones(n-1,1), -1), -1 );
-if k ~= 0
-   F = F(p,p)';
-end
--- a/toolbox/fv.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-function [f, e] = fv(B, nk, thmax, noplot)
-%FV     Field of values (or numerical range).
-%       FV(A, NK, THMAX) evaluates and plots the field of values of the
-%       NK largest leading principal submatrices of A, using THMAX
-%       equally spaced angles in the complex plane.
-%       The defaults are NK = 1 and THMAX = 16.
-%       (For a `publication quality' picture, set THMAX higher, say 32.)
-%       The eigenvalues of A are displayed as `x'.
-%       Alternative usage: [F, E] = FV(A, NK, THMAX, 1) suppresses the
-%       plot and returns the field of values plot data in F, with A's 
-%       eigenvalues in E.   Note that NORM(F,INF) approximates the
-%       numerical radius,
-%                 max {abs(z): z is in the field of values of A}.
-
-%       Theory:
-%       Field of values FV(A) = set of all Rayleigh quotients. FV(A) is a
-%       convex set containing the eigenvalues of A.  When A is normal FV(A) is
-%       the convex hull of the eigenvalues of A (but not vice versa).
-%               z = x'Ax/(x'x),  z' = x'A'x/(x'x)
-%               => REAL(z) = x'Hx/(x'x),   H = (A+A')/2
-%       so      MIN(EIG(H)) <= REAL(z) <= MAX(EIG(H))
-%       with equality for x = corresponding eigenvectors of H.  For these x,
-%       RQ(A,x) is on the boundary of FV(A).
-%
-%       Based on an original routine by A. Ruhe.
-%
-%       References:
-%       R.A. Horn and C.R. Johnson, Topics in Matrix Analysis, Cambridge
-%            University Press, 1991, Section 1.5.
-%       A.S. Householder, The Theory of Matrices in Numerical Analysis,
-%            Blaisdell, New York, 1964, Section 3.3.
-%       C.R. Johnson, Numerical determination of the field of values of a
-%            general complex matrix, SIAM J. Numer. Anal., 15 (1978),
-%            pp. 595-602.
-
-if nargin < 2, nk = 1; end
-if nargin < 3, thmax = 16; end
-thmax = thmax - 1;  % Because code below uses thmax + 1 angles.
-
-iu = sqrt(-1);
-[n, p] = size(B);
-if n ~= p, error('Matrix must be square.'), end
-f = [];
-z = zeros(2*thmax+1,1);
-e = eig(B);
-
-% Filter out cases where B is Hermitian or skew-Hermitian, for efficiency.
-if norm(skewpart(B),1) == 0
-
-   f = [min(e) max(e)];
-
-elseif norm(symmpart(B),1) == 0
-
-   e = imag(e);
-   f = [min(e) max(e)];
-   e = iu*e; f = iu*f;
-
-else
-
-for m = 1:nk
-
-   ns = n+1-m;
-   A = B(1:ns, 1:ns);
-
-   for i = 0:thmax
-      th = i/thmax*pi;
-      Ath = exp(iu*th)*A;               % Rotate A through angle th.
-      H = 0.5*(Ath + Ath');             % Hermitian part of rotated A.
-      [X, D] = eig(H);
-      [lmbh, k] = sort(real(diag(D)));
-      z(1+i) = rq(A,X(:,k(1)));         % RQ's of A corr. to eigenvalues of H
-      z(1+i+thmax) = rq(A,X(:,k(ns)));  % with smallest/largest real part.
-   end
-
-   f = [f; z];
-
-end
-% Next line ensures boundary is `joined up' (needed for orthogonal matrices).
-f = [f; f(1,:)];
-
-end
-if thmax == 0; f = e; end
-
-if nargin < 4
-
-   ax = cpltaxes(f);
-   plot(real(f), imag(f),'k')      % Plot the field of values
-   axis(ax);
-   axis('square');
-
-   hold on
-   plot(real(e), imag(e), 'xb')    % Plot the eigenvalues too.
-   hold off
-
-end
--- a/toolbox/gallery.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-function [A, e] = gallery(n)
-%GALLERY    Famous, and not so famous, test matrices.
-%       A = GALLERY(N) is an N-by-N matrix with some special property.
-%       The following values of N are currently available:
-%         N = 3 is badly conditioned.
-%         N = 4 is the Wilson matrix.  Symmetric pos def, integer inverse.
-%         N = 5 is an interesting eigenvalue problem: defective, nilpotent.
-%         N = 8 is the Rosser matrix, a classic symmetric eigenvalue problem.
-%               [A, e] = GALLERY(8) returns the exact eigenvalues in e.
-%         N = 21 is Wilkinson's tridiagonal W21+, another eigenvalue problem.
-
-%       Original version supplied with MATLAB.  Modified by N.J. Higham.
-%
-%       References:
-%       J.R. Westlake, A Handbook of Numerical Matrix Inversion and Solution
-%          of Linear Equations, John Wiley, New York, 1968.
-%       J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
-%          Press, 1965.
-
-if n == 3
-   A = [ -149   -50  -154
-          537   180   546
-          -27    -9   -25 ];
-
-elseif n == 4
-   A = [10     7     8     7
-         7     5     6     5
-         8     6    10     9
-         7     5     9    10];
-
-elseif n == 5
-%   disp('Try to find the EXACT eigenvalues and eigenvectors.')
-%   Matrix devised by Cleve Moler.  Its Jordan form has just one block, with
-%   eigenvalue zero.  Proof: A^k is nonzero for k<5, zero for k=5.
-%   TRACE(A)=0.  No simple form for null vector.
-   A = [  -9     11    -21     63    -252
-          70    -69    141   -421    1684
-        -575    575  -1149   3451  -13801
-        3891  -3891   7782 -23345   93365
-        1024  -1024   2048  -6144   24572 ];
-
-elseif n == 8
-   A  = [ 611.  196. -192.  407.   -8.  -52.  -49.   29.
-          196.  899.  113. -192.  -71.  -43.   -8.  -44.
-         -192.  113.  899.  196.   61.   49.    8.   52.
-          407. -192.  196.  611.    8.   44.   59.  -23.
-           -8.  -71.   61.    8.  411. -599.  208.  208.
-          -52.  -43.   49.   44. -599.  411.  208.  208.
-          -49.   -8.    8.   59.  208.  208.   99. -911.
-           29.  -44.   52.  -23.  208.  208. -911.   99.  ];
-
-   %  Exact eigenvalues from Westlake (1968), p.150 (ei'vectors given too):
-   a = sqrt(10405); b = sqrt(26);
-   e = [-10*a,   0,   510-100*b,  1000,   1000,   510+100*b, ...
-        1020,   10*a]';
-
-elseif n == 21
-   % W21+, Wilkinson (1965), p.308.
-   E = diag(ones(n-1,1),1);
-   m = (n-1)/2;
-   A = diag(abs(-m:m)) + E + E';
-
-else
-   error('Sorry, that value of N is not available.')
-end
--- a/toolbox/gearm.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-function A = gearm(n, i, j)
-%GEARM   Gear matrix.
-%        A = GEARM(N,I,J) is the N-by-N matrix with ones on the sub- and
-%        super-diagonals, SIGN(I) in the (1,ABS(I)) position, SIGN(J)
-%        in the (N,N+1-ABS(J)) position, and zeros everywhere else.
-%        Defaults: I = N, j = -N.
-%        All eigenvalues are of the form 2*COS(a) and the eigenvectors
-%        are of the form [SIN(w+a), SIN(w+2a), ..., SIN(w+Na)].
-%        The values of a and w are given in the reference below.
-%        A can have double and triple eigenvalues and can be defective.
-%        GEARM(N) is singular.
-
-%        (GEAR is a Simulink function, hence GEARM for Gear matrix.)
-%        Reference:
-%        C.W. Gear, A simple set of test matrices for eigenvalue programs,
-%        Math. Comp., 23 (1969), pp. 119-125.
-
-if nargin == 1, i = n; j = -n; end
-
-if ~(i~=0 & abs(i)<=n & j~=0 & abs(j)<=n)
-     error('Invalid I and J parameters')
-end
-
-A = diag(ones(n-1,1),-1) + diag(ones(n-1,1),1);
-A(1, abs(i)) = sign(i);
-A(n, n+1-abs(j)) = sign(j);
--- a/toolbox/gersh.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-function  [G, e] = gersh(A, noplot)
-%GERSH    Gershgorin disks.
-%         GERSH(A) draws the Gershgorin disks for the matrix A.
-%         The eigenvalues are plotted as crosses `x'.
-%         Alternative usage: [G, E] = GERSH(A, 1) suppresses the plot
-%         and returns the data in G, with A's eigenvalues in E.
-%
-%         Try GERSH(LESP(N)) and GERSH(SMOKE(N,1)).
-
-if diff(size(A)), error('Matrix must be square.'), end
-
-n = max(size(A));
-m = 40;
-G = zeros(m,n);
-
-d = diag(A);
-r = sum( abs( A.'-diag(d) ) )';
-e = eig(A);
-
-radvec = exp(i * seqa(0,2*pi,m)');
-
-for j=1:n
-    G(:,j) = d(j)*ones(size(radvec)) + r(j)*radvec;
-end
-
-if nargin < 2
-
-   ax = cpltaxes(G(:));
-   for j=1:n
-       plot(real(G(:,j)), imag(G(:,j)),'-c5')      % Plot the disks.
-       hold on
-   end
-   plot(real(e), imag(e), 'xg')    % Plot the eigenvalues too.
-   axis(ax);
-   axis('square');
-   hold off
-
-end
--- a/toolbox/gfpp.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-function A = gfpp(T, c)
-%GFPP   Matrix giving maximal growth factor for Gaussian elim. with pivoting.
-%       GFPP(T) is a matrix of order N for which Gaussian elimination
-%       with partial pivoting yields a growth factor 2^(N-1).
-%       T is an arbitrary nonsingular upper triangular matrix of order N-1.
-%       GFPP(T, C) sets all the multipliers to C  (0 <= C <= 1)
-%       and gives growth factor (1+C)^(N-1).
-%       GFPP(N, C) (a special case) is the same as GFPP(EYE(N-1), C) and
-%       generates the well-known example of Wilkinson.
-
-%       Reference:
-%       N.J. Higham and D.J. Higham, Large growth factors in
-%       Gaussian elimination with pivoting, SIAM J. Matrix Analysis and
-%       Appl., 10 (1989), pp. 155-164.
-
-if norm(T-triu(T),1) | any(~diag(T))
-   error('First argument must be a nonsingular upper triangular matrix.')
-end
-
-if nargin == 1, c = 1; end
-
-if c < 0 | c > 1
-   error('Second parameter must be a scalar between 0 and 1 inclusive.')
-end
-
-[m, m] = size(T);
-if m == 1    % Handle the special case T = scalar
-   n = T;
-   m = n-1;
-   T = eye(n-1);
-else
-   n = m+1;
-end
-
-d = 1+c;
-L =  eye(n) - c*tril(ones(n), -1);
-U = [T  (d.^[0:n-2])'; zeros(1,m) d^(n-1)];
-A = L*U;
-theta = max(abs(A(:)));
-A(:,n) = (theta/norm(A(:,n),inf)) * A(:,n);
--- a/toolbox/gj.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-function x = gj(A, b, piv)
-%GJ        Gauss-Jordan elimination to solve Ax = b.
-%          x = GJ(A, b, PIV) solves Ax = b by Gauss-Jordan elimination,
-%          where A is a square, nonsingular matrix.
-%          PIV determines the form of pivoting:
-%              PIV = 0:           no pivoting,
-%              PIV = 1 (default): partial pivoting.
-
-n = max(size(A));
-if nargin < 3, piv = 1; end
-
-for k=1:n
-    if piv
-       % Partial pivoting (below the diagonal).
-       [colmax, i] = max( abs(A(k:n, k)) );
-       i = k+i-1;
-       if i ~= k
-          A( [k, i], : ) = A( [i, k], : );
-          b( [k, i] ) = b( [i, k] );
-       end
-    end
-
-    irange = [1:k-1 k+1:n];
-    jrange = k:n;
-    mult = A(irange,k)/A(k,k); % Multipliers.
-    A(irange, jrange) =  A(irange, jrange) - mult*A(k, jrange);
-    b(irange) =  b(irange) - mult*b(k);
-
-end
-
-x = diag(diag(A))\b;
--- a/toolbox/grcar.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-function G = grcar(n, k)
-%GRCAR     Grcar matrix - a Toeplitz matrix with sensitive eigenvalues.
-%          GRCAR(N, K) is an N-by-N matrix with -1s on the
-%          subdiagonal, 1s on the diagonal, and K superdiagonals of 1s.
-%          The default is K = 3.  The eigenvalues of this matrix form an
-%          interesting pattern in the complex plane (try PS(GRCAR(32))).
-
-%          References:
-%          J.F. Grcar, Operator coefficient methods for linear equations,
-%               Report SAND89-8691, Sandia National Laboratories, Albuquerque,
-%               New Mexico, 1989 (Appendix 2).
-%          N.M. Nachtigal, L. Reichel and L.N. Trefethen, A hybrid GMRES
-%               algorithm for nonsymmetric linear systems, SIAM J. Matrix Anal.
-%               Appl., 13 (1992), pp. 796-825.
-
-if nargin == 1, k = 3; end
-
-G = tril(triu(ones(n)), k) - diag(ones(n-1,1), -1);
--- a/toolbox/hadamard.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-function H = hadamard(n)
-%HADAMARD  Hadamard matrix.
-%          HADAMARD(N) is a Hadamard matrix of order N, that is,
-%          a matrix H with elements 1 or -1 such that H*H' = N*EYE(N).
-%          An N-by-N Hadamard matrix with N>2 exists only if REM(N,4) = 0.
-%          This function handles only the cases where N, N/12 or N/20
-%          is a power of 2.
-
-%          Reference:
-%          S.W. Golomb and L.D. Baumert, The search for Hadamard matrices,
-%          Amer. Math. Monthly, 70 (1963) pp. 12-17.
-
-%          History:
-%          NJH (11/14/91), revised by CBM, 6/24/92,
-%          comment lines revised by NJH, August 1993.
-
-[f,e] = log2([n n/12 n/20]);
-k = find(f==1/2 & e>0);
-if isempty(k)
-   error(['N, N/12 or N/20 must be a power of 2.']);
-end
-e = e(k)-1;
-
-if k == 1        % N = 1 * 2^e;
-   H = [1];
-
-elseif k == 2    % N = 12 * 2^e;
-   H = [ones(1,12); ones(11,1) ...
-        toeplitz([-1 -1 1 -1 -1 -1 1 1 1 -1 1],[-1 1 -1 1 1 1 -1 -1 -1 1 -1])];
-
-elseif k == 3    % N = 20 * 2^e;
-   H = [ones(1,20); ones(19,1)   ...
-        hankel([-1 -1 1 1 -1 -1 -1 -1 1 -1 1 -1 1 1 1 1 -1 -1 1], ...
-               [1 -1 -1 1 1 -1 -1 -1 -1 1 -1 1 -1 1 1 1 1 -1 -1])];
-end
-
-%  Kronecker product construction.
-for i = 1:e
-    H = [H  H
-         H -H];
-end
--- a/toolbox/hanowa.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-function A = hanowa(n, d)
-%HANOWA  A matrix whose eigenvalues lie on a vertical line in the complex plane.
-%        HANOWA(N, d) is the N-by-N block 2x2 matrix (thus N = 2M must be even)
-%                      [d*EYE(M)   -DIAG(1:M)
-%                       DIAG(1:M)   d*EYE(M)]
-%        It has complex eigenvalues lambda(k) = d +/- k*i  (1 <= k <= M).
-%        Parameter d defaults to -1.
-
-%        Reference:
-%        E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
-%        Differential Equations I: Nonstiff Problems, Springer-Verlag,
-%        Berlin, 1987. (pp. 86-87)
-
-if nargin == 1, d = -1; end
-
-m = n/2;
-if round(m) ~= m
-   error('N must be even.')
-end
-
-A = [ d*eye(m) -diag(1:m)
-      diag(1:m)  d*eye(m)];
--- a/toolbox/hilb.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-function H = hilb(n)
-%HILB   Hilbert matrix.
-%       HILB(N) is the N-by-N matrix with elements 1/(i+j-1).
-%       It is a famous example of a badly conditioned matrix.
-%       COND(HILB(N)) grows like EXP(3.5*N).
-%       See INVHILB (standard MATLAB routine) for the exact inverse, which
-%       has integer entries.
-%       HILB(N) is symmetric positive definite, totally positive, and a
-%       Hankel matrix.
-
-%       References:
-%       M.-D. Choi, Tricks or treats with the Hilbert matrix, Amer. Math.
-%           Monthly, 90 (1983), pp. 301-312.
-%       N.J. Higham, Accuracy and Stability of Numerical Algorithms,
-%           Society for Industrial and Applied Mathematics, Philadelphia, PA,
-%           USA, 1996; sec. 26.1.
-%       M. Newman and J. Todd, The evaluation of matrix inversion
-%           programs, J. Soc. Indust. Appl. Math., 6 (1958), pp. 466-476.
-%       D.E. Knuth, The Art of Computer Programming,
-%           Volume 1, Fundamental Algorithms, second edition, Addison-Wesley,
-%           Reading, Massachusetts, 1973, p. 37.
-
-if n == 1
-   H = 1;
-else
-    H = cauchy( (1:n) - .5);
-end
--- a/toolbox/house.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-function [v, beta] = house(x)
-%HOUSE   Householder matrix.
-%        If [v, beta] = HOUSE(x) then H = EYE - beta*v*v' is a Householder
-%        matrix such that Hx = -sign(x(1))*norm(x)*e_1.
-%        NB: If x = 0 then v = 0, beta = 1 is returned.
-%            x can be real or complex.
-%            sign(x) := exp(i*arg(x)) ( = x./abs(x) when x ~= 0).
-
-%        Theory: (textbook references Golub & Van Loan 1989, 38-43;
-%                 Stewart 1973, 231-234, 262; Wilkinson 1965, 48-50).
-%        Hx = y: (I - beta*v*v')x = -s*e_1.
-%        Must have |s| = norm(x), v = x+s*e_1, and
-%        x'y = x'Hx =(x'Hx)' real => arg(s) = arg(x(1)).
-%        So take s = sign(x(1))*norm(x) (which avoids cancellation).
-%        v'v = (x(1)+s)^2 + x(2)^2 + ... + x(n)^2
-%            = 2*norm(x)*(norm(x) + |x(1)|).
-%
-%        References:
-%        G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
-%           Johns Hopkins University Press, Baltimore, Maryland, 1989.
-%        G.W. Stewart, Introduction to Matrix Computations, Academic Press,
-%           New York, 1973,
-%        J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
-%           Press, 1965.
-
-[m, n] = size(x);
-if n > 1, error('Argument must be a column vector.'), end
-
-s = norm(x) * (sign(x(1)) + (x(1)==0));    % Modification for sign(0)=1.
-v = x;
-if s == 0, beta = 1; return, end           % Quit if x is the zero vector.
-v(1) = v(1) + s;
-beta = 1/(s'*v(1));                        % NB the conjugated s.
-
-% beta = 1/(abs(s)*(abs(s)+abs(x(1)) would guarantee beta real.
-% But beta as above can be non-real (due to rounding) only when x is complex.
--- a/toolbox/invhess.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-function A = invhess(x, y)
-%INVHESS  Inverse of an upper Hessenberg matrix.
-%         INVHESS(X, Y), where X is an N-vector and Y an N-1 vector,
-%         is the matrix whose lower triangle agrees with that of
-%         ONES(N,1)*X' and whose strict upper triangle agrees with
-%         that of [1 Y]*ONES(1,N).
-%         The matrix is nonsingular if X(1) ~= 0 and X(i+1) ~= Y(i)
-%         for all i, and its inverse is an upper Hessenberg matrix.
-%         If Y is omitted it defaults to -X(1:N-1).
-%         Special case: if X is a scalar INVHESS(X) is the same as
-%         INVHESS(1:X).
-
-%         References:
-%         F.N. Valvi and V.S. Geroyannis, Analytic inverses and
-%             determinants for a class of matrices, IMA Journal of Numerical
-%             Analysis, 7 (1987), pp. 123-128.
-%         W.-L. Cao and W.J. Stewart, A note on inverses of Hessenberg-like
-%             matrices, Linear Algebra and Appl., 76 (1986), pp. 233-240.
-%         Y. Ikebe, On inverses of Hessenberg matrices, Linear Algebra and
-%             Appl., 24 (1979), pp. 93-97.
-%         P. Rozsa, On the inverse of band matrices, Integral Equations and
-%             Operator Theory, 10 (1987), pp. 82-95.
-
-n = max(size(x));
-%  Handle scalar x.
-if n == 1
-   n = x;
-   x = 1:n;
-end
-x = x(:);
-
-if nargin < 2, y = -x; end
-y = y(:);
-
-% On next line, z = x'; A = z(ones(n,1),:) would be more efficient.
-A = ones(n,1)*x';  
-for j=2:n
-    A(1:j-1,j) = y(1:j-1);
-end
--- a/toolbox/invol.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-function A = invol(n)
-%INVOL   An involutory matrix.
-%        A = INVOL(N) is an N-by-N involutory (A*A = EYE(N)) and
-%        ill-conditioned matrix.
-%        It is a diagonally scaled version of HILB(N).
-%        NB: B = (EYE(N)-A)/2 and B = (EYE(N)+A)/2 are idempotent (B*B = B).
-
-%        Reference:
-%        A.S. Householder and J.A. Carpenter, The singular values
-%        of involutory and of idempotent matrices, Numer. Math. 5 (1963),
-%        pp. 234-237.
-
-A = hilb(n);
-
-d = -n;
-A(:, 1) = d*A(:, 1);
-
-for i = 1:n-1
-    d = -(n+i)*(n-i)*d/(i*i);
-    A(i+1, :) = d*A(i+1, :);
-end
--- a/toolbox/ipjfact.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-function [A, detA] = ipjfact(n, k)
-%IPJFACT   A Hankel matrix with factorial elements.
-%          A = IPJFACT(N, K) is the matrix with
-%                    A(i,j) = (i+j)!    (K = 0, default)
-%                    A(i,j) = 1/(i+j)!  (K = 1)
-%          Both are Hankel matrices.
-%          The determinant and inverse are known explicitly.
-%          If a second output argument is present, d = DET(A) is returned:
-%          [A, d] = IPJFACT(N, K);
-
-%          Suggested by P. R. Graves-Morris.
-%
-%          Reference:
-%          M.J.C. Gover, The explicit inverse of factorial Hankel matrices,
-%          Dept. of Mathematics, University of Bradford, 1993.
-
-if nargin == 1, k = 0; end
-
-c = cumprod(2:n+1);
-d = cumprod(n+1:2*n) * c(n-1);
-
-A = hankel(c, d);
-
-if k == 1
-   A = ones(n)./A;
-end
-
-if nargout == 2
-   d = 1;
-   if k == 0
-      for i=1:n-1
-          d = d*prod(1:i+1)*prod(1:n-i);
-      end
-      d = d*prod(1:n+1);
-   else
-      for i=0:n-1
-          d = d*prod(1:i)/prod(1:n+1+i);
-      end
-      if rem(n*(n-1)/2,2), d = -d; end
-   end
-   detA = d;
-end
--- a/toolbox/jordbloc.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-function J = jordbloc(n, lambda)
-%JORDBLOC  Jordan block.
-%          JORDBLOC(N, LAMBDA) is the N-by-N Jordan block with eigenvalue
-%          LAMBDA.  LAMBDA = 1 is the default.
-
-if nargin == 1, lambda = 1; end
-
-J = lambda*eye(n) + diag(ones(n-1,1),1);
--- a/toolbox/kahan.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-function U = kahan(n, theta, pert)
-%KAHAN  Kahan matrix - upper trapezoidal.
-%       KAHAN(N, THETA) is an upper trapezoidal matrix
-%       that has some interesting properties regarding estimation of
-%       condition and rank.
-%       The matrix is N-by-N unless N is a 2-vector, in which case it
-%       is N(1)-by-N(2).
-%       The parameter THETA defaults to 1.2.
-%       The useful range of THETA is 0 < THETA < PI.
-%
-%       To ensure that the QR factorization with column pivoting does not
-%       interchange columns in the presence of rounding errors, the diagonal
-%       is perturbed by PERT*EPS*diag( [N:-1:1] ).
-%       The default is PERT = 25, which ensures no interchanges for KAHAN(N)
-%       up to at least N = 90 in IEEE arithmetic.
-%       KAHAN(N, THETA, PERT) uses the given value of PERT.
-
-%       The inverse of KAHAN(N, THETA) is known explicitly: see
-%       Higham (1987, p. 588), for example.
-%       The diagonal perturbation was suggested by Christian Bischof.
-%
-%       References:
-%       W. Kahan, Numerical linear algebra, Canadian Math. Bulletin,
-%          9 (1966), pp. 757-801.
-%       N.J. Higham, A survey of condition number estimation for
-%          triangular matrices, SIAM Review, 29 (1987), pp. 575-596.
-
-if nargin < 3, pert = 25; end
-if nargin < 2, theta = 1.2; end
-
-r = n(1);              % Parameter n specifies dimension: r-by-n.
-n = n(max(size(n)));
-
-s = sin(theta);
-c = cos(theta);
-
-U = eye(n) - c*triu(ones(n), 1);
-U = diag(s.^[0:n-1])*U + pert*eps*diag( [n:-1:1] );
-if r > n
-   U(r,n) = 0;         % Extend to an r-by-n matrix.
-else
-   U = U(1:r,:);       % Reduce to an r-by-n matrix.
-end
--- a/toolbox/kms.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-function A = kms(n, rho)
-%KMS   Kac-Murdock-Szego Toeplitz matrix.
-%      A = KMS(N, RHO) is the N-by-N Kac-Murdock-Szego Toeplitz matrix with
-%      A(i,j) = RHO^(ABS((i-j))) (for real RHO).
-%      If RHO is complex, then the same formula holds except that elements
-%      below the diagonal are conjugated.
-%      RHO defaults to 0.5.
-%      Properties:
-%         A has an LDL' factorization with
-%                  L = INV(TRIW(N,-RHO,1)'),
-%                  D(i,i) = (1-ABS(RHO)^2)*EYE(N) except D(1,1) = 1.
-%         A is positive definite if and only if 0 < ABS(RHO) < 1.
-%         INV(A) is tridiagonal.
-
-%       Reference:
-%       W.F. Trench, Numerical solution of the eigenvalue problem
-%       for Hermitian Toeplitz matrices, SIAM J. Matrix Analysis and Appl.,
-%       10 (1989), pp. 135-146 (and see the references therein).
-
-if nargin < 2, rho = 0.5; end
-
-A = (1:n)'*ones(1,n);
-A = abs(A - A');
-A = rho .^ A;
-if imag(rho)
-   A = conj(tril(A,-1)) + triu(A);
-end
--- a/toolbox/krylov.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-function B = krylov(A, x, j)
-%KRYLOV    Krylov matrix.
-%          KRYLOV(A, x, j) is the Krylov matrix
-%               [x, Ax, A^2x, ..., A^(j-1)x],
-%          where A is an n-by-n matrix and x is an n-vector.
-%          Defaults: x = ONES(n,1), j = n.
-%          KRYLOV(n) is the same as KRYLOV(RANDN(n)).
-
-%          Reference:
-%          G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
-%          Johns Hopkins University Press, Baltimore, Maryland, 1989, p. 369.
-
-[n, n] = size(A);
-
-if n == 1   % Handle special case A = scalar.
-   n = A;
-   A = randn(n);
-end
-
-if nargin < 3, j = n; end
-if nargin < 2, x = ones(n,1); end
-
-
-B = ones(n,j);
-B(:,1) = x(:);
-for i=2:j
-    B(:,i) = A*B(:,i-1);
-end
--- a/toolbox/lauchli.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-function A = lauchli(n, mu)
-%LAUCHLI   Lauchli matrix - rectangular.
-%          LAUCHLI(N, MU) is the (N+1)-by-N matrix [ONES(1,N); MU*EYE(N))].
-%          It is a well-known example in least squares and other problems
-%          that indicates the dangers of forming A'*A.
-%          MU defaults to SQRT(EPS).
-
-%          Reference:
-%          P. Lauchli, Jordan-Elimination und Ausgleichung nach
-%          kleinsten Quadraten, Numer. Math, 3 (1961), pp. 226-240.
-
-if nargin < 2, mu = sqrt(eps); end
-A = [ones(1,n);
-     mu*eye(n)];
--- a/toolbox/lehmer.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,19 +0,0 @@
-function A = lehmer(n)
-%LEHMER  Lehmer matrix - symmetric positive definite.
-%        A = LEHMER(N) is the symmetric positive definite N-by-N matrix with
-%                         A(i,j) = i/j for j >= i.
-%        A is totally nonnegative.  INV(A) is tridiagonal, and explicit
-%        formulas are known for its entries.
-%        N <= COND(A) <= 4*N*N.
-
-%        References:
-%        M. Newman and J. Todd, The evaluation of matrix inversion
-%           programs, J. Soc. Indust. Appl. Math., 6 (1958), pp. 466-476.
-%        Solutions to problem E710 (proposed by D.H. Lehmer): The inverse
-%           of a matrix, Amer. Math. Monthly, 53 (1946), pp. 534-535.
-%        J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
-%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 154.
-
-A = ones(n,1)*(1:n);
-A = A./A';
-A = tril(A) + tril(A,-1)';
--- a/toolbox/lesp.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-function T = lesp(n)
-%LESP   A tridiagonal matrix with real, sensitive eigenvalues.
-%       LESP(N) is an N-by-N matrix whose eigenvalues are real and smoothly
-%       distributed in the interval approximately [-2*N-3.5, -4.5].
-%       The sensitivities of the eigenvalues increase exponentially as
-%       the eigenvalues grow more negative.
-%       The matrix is similar to the symmetric tridiagonal matrix with
-%       the same diagonal entries and with off-diagonal entries 1,
-%       via a similarity transformation with D = diag(1!,2!,...,N!).
-
-%       References:
-%       H.W.J. Lenferink and M.N. Spijker, On the use of stability regions in
-%            the numerical analysis of initial value problems,
-%            Math. Comp., 57 (1991), pp. 221-237.
-%       L.N. Trefethen, Pseudospectra of matrices, in Numerical Analysis 1991,
-%            Proceedings of the 14th Dundee Conference,
-%            D.F. Griffiths and G.A. Watson, eds, Pitman Research Notes in
-%            Mathematics, volume 260, Longman Scientific and Technical, Essex,
-%            UK, 1992, pp. 234-266.
-
-x = 2:n;
-T = full(tridiag( ones(size(x))./x, -(2*[x n+1]+1), x));
--- a/toolbox/lotkin.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,12 +0,0 @@
-function A = lotkin(n)
-%LOTKIN  Lotkin matrix.
-%        A = LOTKIN(N) is the Hilbert matrix with its first row altered to
-%        all ones.  A is unsymmetric, ill-conditioned, and has many negative
-%        eigenvalues of small magnitude.
-%        The inverse has integer entries and is known explicitly.
-
-%        Reference:
-%        M. Lotkin, A set of test matrices, MTAC, 9 (1955), pp. 153-161.
-
-A = hilb(n);
-A(1,:) = ones(1,n);
--- a/toolbox/makejcf.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-function A = makejcf(n, e, m, X)
-%MAKEJCF   A matrix with given Jordan canonical form.
-%          MAKEJCF(N, E, M) is a matrix having the Jordan canonical form
-%          whose i'th Jordan block is of dimension M(i) with eigenvalue E(i),
-%          and where N = SUM(M).
-%          Defaults: E = 1:N, M = ONES(SIZE(E)) with M(1) so that SUM(M) = N.
-%          The matrix is constructed by applying a random similarity
-%          transformation to the Jordan form.
-%          Alternatively, the matrix used in the similarity transformation
-%          can be specified as a fifth parameter.
-%          In particular, MAKEJCF(N, E, M, EYE(N)) returns the Jordan form
-%          itself.
-%          NB: The JCF is very sensitive to rounding errors.
-
-if nargin < 2, e = 1:n; end
-if nargin < 3, m = ones(size(e)); m(1) = m(1) + n - sum(m); end
-
-if any( size(e(:)) ~= size(m(:)) )
-   error('Parameters E and M must be of same dimension.')
-end
-
-if sum(m) ~= n, error('Block dimensions must add up to N.'), end
-
-A = zeros(n);
-j = 1;
-for i=1:max(size(m))
-    if m(i) > 1
-        Jb = jordbloc(m(i),e(i));
-    else
-        Jb = e(i);  % JORDBLOC fails in n = 1 case.
-    end
-    A(j:j+m(i)-1,j:j+m(i)-1) = Jb;
-    j = j + m(i);
-end
-
-if nargin < 4
-   X = randn(n);
-end
-A = X\A*X;
--- a/toolbox/matrix.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,99 +0,0 @@
-function A = matrix(k, n)
-%MATRIX    Test Matrix Toolbox information and matrix access by number.
-%          MATRIX(K, N) is the N-by-N instance of the matrix number K in
-%          the Test Matrix Toolbox (including some of the matrices provided
-%          with MATLAB), with all other parameters set to their default.
-%          N.B. Only those matrices which take an arbitrary dimension N
-%               are included (thus GALLERY is omitted, for example).
-%          MATRIX(K) is a string containing the name of the K'th matrix.
-%          MATRIX(0) is the number of matrices, i.e. the upper limit for K.
-%          Thus to set A to each N-by-N test matrix in turn use a loop like
-%                for k=1:matrix(0)
-%                    A = matrix(k, N);
-%                    Aname = matrix(k);   % The name of the matrix
-%                end
-%          MATRIX(-1) returns the version number and date of the toolbox.
-%          MATRIX with no arguments lists the names of the M-files in the
-%          collection.
-
-%          References:
-%          N.J. Higham. The Test Matrix Toolbox for Matlab (version 3.0),
-%             Numerical Analysis Report No. 276, Manchester Centre for
-%             Computational Mathematics, Manchester, England, September 1995.
-%          N.J. Higham, Algorithm 694: A collection of test matrices in
-%             MATLAB, ACM Trans. Math. Soft., 17 (1991), pp. 289-305.
-%
-%          Matrices omitted are: gallery, hadamard, hanowa, lauchli,
-%          neumann, wathen, wilk.
-%          Matrices provided with MATLAB that are included here: invhilb,
-%          magic.
-
-% Set up string array a few lines at a time to avoid `input buffer line
-% overflow'.
-
-matrices = '';
-
-matrices = [matrices
-'augment '; 'cauchy  '; 'chebspec'; 'chebvand';
-'chow    '; 'circul  '; 'clement '; 'compan  ';
-'condex  '; 'cycol   '; 'dingdong'; 'dorr    ';
-'dramadah'; 'fiedler '; 'forsythe'; 'frank   ';];
-
-matrices = [matrices
-'gearm   '; 'gfpp    '; 'grcar   '; 'hilb    ';
-'invhess '; 'invol   '; 'ipjfact '; 'jordbloc';
-'kahan   '; 'kms     '; 'krylov  '; 'lehmer  ';
-'lesp    '; 'lotkin  '; 'makejcf '; 'minij   ';];
-
-matrices = [matrices
-'moler   '; 'ohess   '; 'orthog  '; 'parter  ';
-'pascal  '; 'pdtoep  '; 'pei     '; 'pentoep ';
-'prolate '; 'rando   '; 'randsvd ';
-'redheff '; 'riemann '; 'rschur  '; 'smoke   ';
-'tridiag '; 'triw    '; 'vand    ';];
-
-if nargin == 0
-
-fprintf('Test matrices:                                                    \n')
-fprintf('                                                                  \n')
-fprintf('augment  cycol    gfpp     kahan   moler    poisson  triw         \n')
-fprintf('cauchy   dingdong grcar    kms     neumann  prolate  vand         \n')
-fprintf('chebspec dorr     hadamard krylov  ohess    rando    wathen       \n')
-fprintf('chebvand dramadah hanowa   lauchli orthog   randsvd  wilk         \n')
-fprintf('chow     fiedler  hilb     lehmer  parter   redheff               \n')
-fprintf('circul   forsythe invhess  lesp    pascal   riemann               \n')
-fprintf('clement  frank    invol    lotkin  pdtoep   rschur                \n')
-fprintf('compan   gallery  ipjfact  makejcf pei      smoke                 \n')
-fprintf('condex   gearm    jordbloc minij   pentoep  tridiag               \n')
-fprintf('                                                                  \n')
-fprintf('Visualization:   Decompositions:     Miscellaneous:              \n')
-fprintf('                                                                 \n')
-fprintf('fv               cgs      gj         bandred  matrix   show    \n')
-fprintf('gersh            cholp    mgs        chop     matsignt skewpart\n')
-fprintf('ps               cod      poldec     comp     pnorm    sparsify\n')
-fprintf('pscont           diagpiv  signm      cond     qmult    sub     \n')
-fprintf('see              ge                  cpltaxes rq       symmpart\n')
-fprintf('                 gecp                dual     seqa     trap2tri\n')
-fprintf('                                     eigsens  seqcheb          \n')
-fprintf('                                     house    seqm             \n')
-fprintf('                                     eigsens  seqcheb          \n\n')
-fprintf('Direct search optimization: adsmax, mdsmax, nmsmax             \n')
-fprintf('Demonstration: tmtdemo                                         \n')
-
-elseif nargin == 1
-   if k == 0
-      [A, temp] = size(matrices);
-   elseif k > 0
-      A = matrices(k,:);
-   else
-      % Version number and date of collection.
-%     A = 'Version 1.0, July 4 1989';
-%     A = 'Version 1.1, November 15 1989';
-%     A = 'Version 1.2, May 30 1990';
-%     A = 'Version 1.3, November 14 1991';
-%     A = 'Version 2.0, November 14 1993';
-      A = 'Version 3.0, September 19 1995';
-   end
-else
-   A = eval( [matrices(k,:) '(n)'] );
-end
--- a/toolbox/matsignt.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-function S = matsignt(T)
-%MATSIGNT    Matrix sign function of a triangular matrix.
-%            S = MATSIGN(T) computes the matrix sign function S of the
-%            upper triangular matrix T using a recurrence.
-
-%            Adapted from FUNM.  Called by SIGNM.
-
-if norm(tril(T,-1),1), error('Matrix must be upper triangular.'), end
-
-n = max(size(T));
-
-S = diag( sign( diag(real(T)) ) );
-tol = 0;
-for p = 1:n-1
-   for i = 1:n-p
-
-      j = i+p;
-      d = T(j,j) - T(i,i);
-
-      if S(i,i) ~= -S(j,j)  % Solve via S^2 = I if we can.
-
-         % Get S(i,j) from S^2 = I.
-         k = i+1:j-1;
-         RHS = 0;
-         if k, RHS = RHS - S(i,k)*S(k,j); end
-         S(i,j) = RHS  / (S(i,i)+S(j,j));
-
-      else
-
-         % Get S(i,j) from S*T = T*S.
-         s = T(i,j)*(S(j,j)-S(i,i));
-         if p > 1
-            k = i+1:j-1;
-            s = s + T(i,k)*S(k,j) - S(i,k)*T(k,j);
-         end
-         S(i,j) = s/d;
-
-      end
-
-   end
-end
--- a/toolbox/minij.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-function A = minij(n)
-%MINIJ   Symmetric positive definite matrix MIN(i,j).
-%        A = MINIJ(N) is the N-by-N symmetric positive definite matrix with
-%        A(i,j) = MIN(i,j).
-%        Properties, variations:
-%        A has eigenvalues .25*sec^2(r*PI/(2*N+1)), r=1:N, and the eigenvectors
-%                    are also known explicitly.
-%        INV(A) is tridiagonal: it is minus the second difference matrix
-%                    except its (N,N) element is 1.
-%        2*A-ONES(N) (Givens' matrix) has tridiagonal inverse and
-%                    eigenvalues .5*sec^2((2r-1)PI/4N), r=1:N.
-%        (N+1)*ONES(N)-A also has a tridiagonal inverse.
-%        FLIPUD(TRIW(N,1)) is a square root of A.
-
-%        References:
-%        J. Fortiana and C. M. Cuadras, A family of matrices, the discretized
-%           Brownian bridge, and distance-based regression, Linear Algebra
-%           Appl., 264 (1997), 173-188.  (For the eigensystem of A.)
-%        J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
-%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 158.
-%        D.E. Rutherford, Some continuant determinants arising in physics and
-%           chemistry---II, Proc. Royal Soc. Edin., 63, A (1952), pp. 232-241.
-%           (For the eigenvalues of Givens' matrix.)
-
-A = min( ones(n,1)*(1:n), (1:n)'*ones(1,n) );
--- a/toolbox/moler.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-function A = moler(n, alpha)
-%MOLER   Moler matrix - symmetric positive definite.
-%        A = MOLER(N, ALPHA) is the symmetric positive definite N-by-N matrix
-%        U'*U where U = TRIW(N, ALPHA).
-%        For ALPHA = -1 (the default) A(i,j) = MIN(i,j)-2, A(i,i) = i.
-%        A has one small eigenvalue.
-
-%        Nash (1990) attributes the ALPHA = -1 matrix to Moler.
-%
-%        Reference:
-%        J.C. Nash, Compact Numerical Methods for Computers: Linear
-%        Algebra and Function Minimisation, second edition, Adam Hilger,
-%        Bristol, 1990 (Appendix 1).
-
-if nargin == 1, alpha = -1; end
-
-A = triw(n, alpha)'*triw(n, alpha);
--- a/toolbox/neumann.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-function [A, T] = neumann(n)
-%NEUMANN  Singular matrix from the discrete Neumann problem (sparse).
-%         NEUMANN(N) is the singular, row diagonally dominant matrix resulting
-%         from discretizing the Neumann problem with the usual five point
-%         operator on a regular mesh.
-%         It has a one-dimensional null space with null vector ONES(N,1).
-%         The dimension N should be a perfect square, or else a 2-vector,
-%         in which case the dimension of the matrix is N(1)*N(2).
-
-%         Reference:
-%         R.J. Plemmons, Regular splittings and the discrete Neumann
-%         problem, Numer. Math., 25 (1976), pp. 153-161.
-
-if max(size(n)) == 1
-   m = sqrt(n);
-   if m^2 ~= n, error('N must be a perfect square.'), end
-   n(1) = m; n(2) = m;
-end
-
-T = tridiag(n(1), -1, 2, -1);
-T(1,2) = -2;
-T(n(1),n(1)-1) = -2;
-
-A = kron(T, eye(n(2))) + kron(eye(n(2)), T);
--- a/toolbox/ohess.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-function H = ohess(x)
-%OHESS  Random, orthogonal upper Hessenberg matrix.
-%       H = OHESS(N) is an N-by-N real, random, orthogonal
-%       upper Hessenberg matrix.
-%       Alternatively, H = OHESS(X), where X is an arbitrary real
-%       N-vector (N > 1) constructs H non-randomly using the elements
-%       of X as parameters.
-%       In both cases H is constructed via a product of N-1 Givens rotations.
-
-%       Note: See Gragg (1986) for how to represent an N-by-N (complex)
-%       unitary Hessenberg matrix with positive subdiagonal elements in terms
-%       of 2N-1 real parameters (the Schur parametrization).
-%       This M-file handles the real case only and is intended simply as a
-%       convenient way to generate random or non-random orthogonal Hessenberg
-%       matrices.
-%
-%       Reference:
-%       W.B. Gragg, The QR algorithm for unitary Hessenberg matrices,
-%       J. Comp. Appl. Math., 16 (1986), pp. 1-8.
-
-if any(imag(x)), error('Parameter must be real.'), end
-
-n = max(size(x));
-
-if n == 1
-%  Handle scalar x.
-   n = x;
-   x = rand(n-1,1)*2*pi;
-   H = eye(n);
-   H(n,n) = sign(randn);
-else
-   H = eye(n);
-   H(n,n) = sign(x(n)) + (x(n)==0);   % Second term ensures H(n,n) nonzero.
-end
-
-for i=n:-1:2
-    % Apply Givens rotation through angle x(i-1).
-    theta = x(i-1);
-    c = cos(theta);
-    s = sin(theta);
-    H( [i-1 i], : ) = [ c*H(i-1,:)+s*H(i,:)
-                       -s*H(i-1,:)+c*H(i,:) ];
-end
--- a/toolbox/orthog.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-function Q = orthog(n, k)
-%ORTHOG Orthogonal and nearly orthogonal matrices.
-%       Q = ORTHOG(N, K) selects the K'th type of matrix of order N.
-%       K > 0 for exactly orthogonal matrices, K < 0 for diagonal scalings of
-%       orthogonal matrices.
-%       Available types: (K = 1 is the default)
-%       K = 1:  Q(i,j) = SQRT(2/(n+1)) * SIN( i*j*PI/(n+1) )
-%               Symmetric eigenvector matrix for second difference matrix.
-%       K = 2:  Q(i,j) = 2/SQRT(2*n+1)) * SIN( 2*i*j*PI/(2*n+1) )
-%               Symmetric.
-%       K = 3:  Q(r,s) = EXP(2*PI*i*(r-1)*(s-1)/n) / SQRT(n)  (i=SQRT(-1))
-%               Unitary, the Fourier matrix.  Q^4 is the identity.
-%               This is essentially the same matrix as FFT(EYE(N))/SQRT(N)!
-%       K = 4:  Helmert matrix: a permutation of a lower Hessenberg matrix,
-%               whose first row is ONES(1:N)/SQRT(N).
-%       K = 5:  Q(i,j) = SIN( 2*PI*(i-1)*(j-1)/n ) + COS( 2*PI*(i-1)*(j-1)/n ).
-%               Symmetric matrix arising in the Hartley transform.
-%       K = -1: Q(i,j) = COS( (i-1)*(j-1)*PI/(n-1) )
-%               Chebyshev Vandermonde-like matrix, based on extrema of T(n-1).
-%       K = -2: Q(i,j) = COS( (i-1)*(j-1/2)*PI/n) )
-%               Chebyshev Vandermonde-like matrix, based on zeros of T(n).
-
-%       References:
-%       N.J. Higham and D.J. Higham, Large growth factors in Gaussian
-%            elimination with pivoting, SIAM J. Matrix Analysis and  Appl.,
-%            10 (1989), pp. 155-164.
-%       P. Morton, On the eigenvectors of Schur's matrix, J. Number Theory,
-%            12 (1980), pp. 122-127. (Re. ORTHOG(N, 3))
-%       H.O. Lancaster, The Helmert Matrices, Amer. Math. Monthly, 72 (1965),
-%            pp. 4-12.
-%       D. Bini and P. Favati, On a matrix algebra related to the discrete
-%            Hartley transform, SIAM J. Matrix Anal. Appl., 14 (1993),
-%            pp. 500-507.
-
-if nargin == 1, k = 1; end
-
-if k == 1
-                                       % E'vectors second difference matrix
-   m = (1:n)'*(1:n) * (pi/(n+1));
-   Q = sin(m) * sqrt(2/(n+1));
-
-elseif k == 2
-
-   m = (1:n)'*(1:n) * (2*pi/(2*n+1));
-   Q = sin(m) * (2/sqrt(2*n+1));
-
-elseif k == 3                          %  Vandermonde based on roots of unity
-
-   m = 0:n-1;
-   Q = exp(m'*m*2*pi*sqrt(-1)/n) / sqrt(n);
-
-elseif k == 4                          %  Helmert matrix
-
-   Q = tril(ones(n));
-   Q(1,2:n) = ones(1,n-1);
-   for i=2:n
-       Q(i,i) = -(i-1);
-   end
-   Q = diag( sqrt( [n 1:n-1] .* [1:n] ) ) \ Q;
-
-elseif k == 5                          %  Hartley matrix
-
-   m = (0:n-1)'*(0:n-1) * (2*pi/n);
-   Q = (cos(m) + sin(m))/sqrt(n);
-
-elseif k == -1
-                                       %  extrema of T(n-1)
-   m = (0:n-1)'*(0:n-1) * (pi/(n-1));
-   Q = cos(m);
-
-elseif k == -2
-                                       % zeros of T(n)
-   m = (0:n-1)'*(.5:n-.5) * (pi/n);
-   Q = cos(m);
-
-else
-
-   error('Illegal value for second parameter.')
-
-end
--- a/toolbox/parter.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-function A = parter(n)
-%PARTER    Parter matrix - a Toeplitz matrix with singular values near PI.
-%          PARTER(N) is the matrix with (i,j) element 1/(i-j+0.5).
-%          It is a Cauchy matrix and a Toeplitz matrix.
-
-%          At the Second SIAM Conference on Linear Algebra, Raleigh, N.C.,
-%          1985, Cleve Moler noted that most of the singular values of
-%          PARTER(N) are very close to PI.  An explanation of the phenomenon
-%          was given by Parter; see also the paper by Tyrtyshnikov.
-%
-%          References:
-%          The MathWorks Newsletter, Volume 1, Issue 1, March 1986, page 2.
-%          S.V. Parter, On the distribution of the singular values of Toeplitz
-%               matrices, Linear Algebra and Appl., 80 (1986), pp. 115-130.
-%          E.E. Tyrtyshnikov, Cauchy-Toeplitz matrices and some applications,
-%               Linear Algebra and Appl., 149 (1991), pp. 1-18.
-
-A = cauchy( (1:n)+0.5, -(1:n) );
--- a/toolbox/pascal.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-function P = pascal(n, k)
-%PASCAL  Pascal matrix.
-%        P = PASCAL(N) is the Pascal matrix of order N: a symmetric positive
-%        definite matrix with integer entries taken from Pascal's
-%        triangle.
-%        The Pascal matrix is totally positive and its inverse has
-%        integer entries.  Its eigenvalues occur in reciprocal pairs.
-%        COND(P) is approximately 16^N/(N*PI) for large N.
-%        PASCAL(N,1) is the lower triangular Cholesky factor (up to signs
-%        of columns) of the Pascal matrix.   It is involutary (is its own
-%        inverse).
-%        PASCAL(N,2) is a transposed and permuted version of PASCAL(N,1)
-%        which is a cube root of the identity.
-
-%        References:
-%        R. Brawer and M. Pirovino, The linear algebra of the Pascal matrix,
-%           Linear Algebra and Appl., 174 (1992), pp. 13-23 (this paper
-%           gives a factorization of L = PASCAL(N,1) and a formula for the
-%           elements of L^k).
-%        N.J. Higham, Accuracy and Stability of Numerical Algorithms,
-%           Society for Industrial and Applied Mathematics, Philadelphia, PA,
-%           USA, 1996; sec. 26.4.
-%        S. Karlin, Total Positivity, Volume 1, Stanford University Press,
-%           1968.  (Page 137: shows i+j-1 choose j is TP (i,j=0,1,...).
-%                   PASCAL(N) is a submatrix of this matrix.)
-%        M. Newman and J. Todd, The evaluation of matrix inversion programs,
-%           J. Soc. Indust. Appl. Math., 6(4):466--476, 1958.
-%        H. Rutishauser, On test matrices, Programmation en Mathematiques
-%           Numeriques, Editions Centre Nat. Recherche Sci., Paris, 165,
-%           1966, pp. 349-365.  (Gives an integral formula for the
-%           elements of PASCAL(N).)
-%        J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
-%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 172.
-%        H.W. Turnbull, The Theory of Determinants, Matrices, and Invariants,
-%           Blackie, London and Glasgow, 1929.  (PASCAL(N,2) on page 332.)
-
-if nargin == 1, k = 0; end
-
-P = diag( (-1).^[0:n-1] );
-P(:, 1) = ones(n,1);
-
-%  Generate the Pascal Cholesky factor (up to signs).
-for j=2:n-1
-    for i=j+1:n
-        P(i,j) = P(i-1,j) - P(i-1,j-1);
-    end
-end
-
-if k == 0
-
-   P = P*P';
-
-elseif k == 2
-
-   P = rot90(P,3);
-   if n/2 == round(n/2), P = -P; end
-
-end
--- a/toolbox/pdtoep.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-function T = pdtoep(n, m, w, theta)
-%PDTOEP   Symmetric positive definite Toeplitz matrix.
-%         PDTOEP(N, M, W, THETA) is an N-by-N symmetric positive (semi-)
-%         definite (SPD) Toeplitz matrix, comprised of the sum of M rank 2
-%         (or, for certain THETA, rank 1) SPD Toeplitz matrices.
-%         Specifically,
-%                 T = W(1)*T(THETA(1)) + ... + W(M)*T(THETA(M)),
-%         where T(THETA(k)) has (i,j) element COS(2*PI*THETA(k)*(i-j)).
-%         Defaults: M = N, W = RAND(M,1), THETA = RAND(M,1).
-
-%         Reference:
-%         G. Cybenko and C.F. Van Loan, Computing the minimum eigenvalue of
-%         a symmetric positive definite Toeplitz matrix, SIAM J. Sci. Stat.
-%         Comput., 7 (1986), pp. 123-131.
-
-if nargin < 2, m = n; end
-if nargin < 3, w = rand(m,1); end
-if nargin < 4, theta = rand(m,1); end
-
-if max(size(w)) ~= m | max(size(theta)) ~= m
-   error('Arguments W and THETA must be vectors of length M.')
-end
-
-T = zeros(n);
-E = 2*pi*( (1:n)'*ones(1,n) - ones(n,1)*(1:n) );
-
-for i=1:m
-    T = T + w(i) * cos( theta(i)*E );
-end
--- a/toolbox/pei.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-function P = pei(n, alpha)
-%PEI    Pei matrix.
-%       PEI(N, ALPHA), where ALPHA is a scalar, is the symmetric matrix
-%       ALPHA*EYE(N) + ONES(N).
-%       If ALPHA is omitted then ALPHA = 1 is used.
-%       The matrix is singular for ALPHA = 0, -N.
-
-%       Reference:
-%       M.L. Pei, A test matrix for inversion procedures,
-%       Comm. ACM, 5 (1962), p. 508.
-
-if nargin == 1, alpha = 1; end
-
-P = alpha*eye(n) + ones(n);
--- a/toolbox/pentoep.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-function P = pentoep(n, a, b, c, d, e)
-%PENTOEP   Pentadiagonal Toeplitz matrix (sparse).
-%          P = PENTOEP(N, A, B, C, D, E) is the N-by-N pentadiagonal
-%          Toeplitz matrix with diagonals composed of the numbers
-%          A =: P(3,1), B =: P(2,1), C =: P(1,1), D =: P(1,2), E =: P(1,3).
-%          Default: (A,B,C,D,E) = (1,-10,0,10,1) (a matrix of Rutishauser).
-%                    This matrix has eigenvalues lying approximately on
-%                    the line segment 2*cos(2*t) + 20*i*sin(t).
-%
-%          Interesting plots are
-%          PS(FULL(PENTOEP(32,0,1,0,0,1/4)))  - `triangle'
-%          PS(FULL(PENTOEP(32,0,1/2,0,0,1)))  - `propeller'
-%          PS(FULL(PENTOEP(32,0,1/2,1,1,1)))  - `fish'
-
-%          References:
-%          R.M. Beam and R.F. Warming, The asymptotic spectra of
-%             banded Toeplitz and quasi-Toeplitz matrices, SIAM J. Sci.
-%             Comput. 14 (4), 1993, pp. 971-1006.
-%          H. Rutishauser, On test matrices, Programmation en Mathematiques
-%             Numeriques, Editions Centre Nat. Recherche Sci., Paris, 165,
-%             1966, pp. 349-365.
-
-if nargin == 1, a = 1; b = -10; c = 0; d = 10; e = 1; end
-
-P = spdiags([ a*ones(n,1) b*ones(n,1) c*ones(n,1) d*ones(n,1) ....
-              e*ones(n,1) ], -2:2, n, n);
--- a/toolbox/pnorm.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-function [est, x, k] = pnorm(A, p, tol, noprint)
-%PNORM   Estimate of matrix p-norm (1 <= p <= inf).
-%        [EST, x, k] = PNORM(A, p, TOL) estimates the Holder p-norm of a
-%        matrix A, using the p-norm power method with a specially
-%        chosen starting vector.
-%        TOL is a relative convergence tolerance (default 1E-4).
-%        Returned are the norm estimate EST (which is a lower bound for the
-%        exact p-norm), the corresponding approximate maximizing vector x,
-%        and the number of power method iterations k.
-%        A nonzero fourth argument causes trace output to the screen.
-%        If A is a vector, this routine simply returns NORM(A, p).
-%
-%        See also NORM, NORMEST.
-
-%        Note: The estimate is exact for p = 1, but is not always exact for
-%        p = 2 or p = inf.  Code could be added to treat p = 2 and p = inf
-%        separately.
-%
-%        Calls DUAL and SEQA.
-%
-%        Reference:
-%        N.J. Higham, Estimating the matrix p-norm,
-%        Numer. Math., 62 (1992), pp. 539-555.
-
-if nargin < 2, error('Must specify norm via second parameter.'), end
-[m,n] = size(A);
-if min(m,n) == 1, est = norm(A,p); return, end
-
-if nargin < 4, noprint = 0; end
-if nargin < 3, tol = 1e-4; end
-
-% Stage I.  Use Algorithm OSE to get starting vector x for power method.
-% Form y = B*x, at each stage choosing x(k) = c and scaling previous
-% x(k+1:n) by s, where norm([c s],p)=1.
-
-sm = 9;  % Number of samples.
-y = zeros(m,1); x = zeros(n,1);
-
-for k=1:n
-
-    if k == 1
-       c = 1; s = 0;
-    else
-       W = [A(:,k) y];
-
-       if p == 2   % Special case.  Solve exactly for 2-norm.   
-          [U,S,V] = svd(full(W));
-          c = V(1,1); s = V(2,1);
-
-       else
-
-          fopt = 0;
-          for th=seqa(0,pi,sm)
-              c1 = cos(th); s1 = sin(th);
-              nrm = norm([c1 s1],p);
-              c1 = c1/nrm; s1 = s1/nrm;   % [c1 s1] has unit p-norm.
-              f = norm( W*[c1 s1]', p );
-              if f > fopt
-                 fopt = f;
-                 c = c1; s = s1;
-              end
-          end
-
-       end
-    end
-
-    x(k) = c;
-    y = x(k)*A(:,k) + s*y;
-    if k > 1, x(1:k-1) = s*x(1:k-1); end
-
-end
-
-est = norm(y,p);
-if noprint, fprintf('Alg OSE: %9.4e\n', est), end
-
-% Stage II.  Apply Algorithm PM (the power method).
-
-q = dual(p);
-k = 1;
-
-while 1
-
-    y = A*x;
-    est_old = est;
-    est = norm(y,p);
-
-    z = A' * dual(y,p);
-
-    if noprint
-        fprintf('%2.0f: norm(y) = %9.4e,  norm(z) = %9.4e', ...
-                 k, norm(y,p), norm(z,q))
-        fprintf('  rel_incr(est) = %9.4e\n', (est-est_old)/est)
-    end
-
-    if ( norm(z,q) <= z'*x | abs(est-est_old)/est <= tol ) & k > 1
-       return
-    end
-
-    x = dual(z,q);
-    k = k + 1;
-
-end
--- a/toolbox/poisson.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-function A = poisson(n)
-%POISSON   Block tridiagonal matrix from Poisson's equation (sparse).
-%          POISSON(N) is the block tridiagonal matrix of order N^2
-%          resulting from discretizing Poisson's equation with the
-%          5-point operator on an N-by-N mesh.
-
-%          Reference:
-%          G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
-%          Johns Hopkins University Press, Baltimore, Maryland, 1989
-%          (Section 4.5.4).
-
-S = tridiag(n,-1,2,-1);
-I = speye(n);
-A = kron(I,S) + kron(S,I);
--- a/toolbox/poldec.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-function [U, H] = poldec(A)
-%POLDEC   Polar decomposition.
-%         [U, H] = POLDEC(A) computes a matrix U of the same dimension
-%         (m-by-n) as A, and a Hermitian positive semi-definite matrix H,
-%         such that A = U*H.
-%         U has orthonormal columns if m >= n, and orthonormal rows if m <= n.
-%         U and H are computed via an SVD of A.
-%         U is a nearest unitary matrix to A in both the 2-norm and the
-%         Frobenius norm.
-
-%         Reference:
-%         N.J. Higham, Computing the polar decomposition---with applications,
-%         SIAM J. Sci. Stat. Comput., 7(4):1160--1174, 1986.
-%
-%         (The name `polar' is reserved for a graphics routine.)
-
-[m, n] = size(A);
-
-[P, S, Q] = svd(A, 0);  % Economy size.
-if m < n                % Ditto for the m<n case.
-   S = S(:, 1:m);
-   Q = Q(:, 1:m);
-end
-U = P*Q';
-if nargout == 2
-   H = Q*S*Q';
-   H = (H + H')/2;      % Force Hermitian by taking nearest Hermitian matrix.
-end
--- a/toolbox/prolate.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-function A = prolate(n, w)
-%PROLATE   Prolate matrix - symmetric, ill-conditioned Toeplitz matrix.
-%          A = PROLATE(N, W) is the N-by-N prolate matrix with parameter W.
-%          It is a symmetric Toeplitz matrix.
-%          If 0 < W < 0.5 then
-%             - A is positive definite
-%             - the eigenvalues of A are distinct, lie in (0, 1), and
-%               tend to cluster around 0 and 1.
-%          W defaults to 0.25.
-
-%          Reference:
-%          J.M. Varah. The Prolate matrix. Linear Algebra and Appl.,
-%          187:269--278, 1993.
-
-if nargin == 1, w = 0.25; end
-
-a = zeros(n,1);
-a(1) = 2*w;
-a(2:n) = sin( 2*pi*w*(1:n-1) ) ./ ( pi*(1:n-1) );
-
-A = toeplitz(a);
--- a/toolbox/ps.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,94 +0,0 @@
-function y = ps(A, m, tol, rl, marksize)
-%PS     Dot plot of a pseudospectrum.
-%       PS(A, M, TOL, RL) plots an approximation to a pseudospectrum
-%       of the square matrix A, using M random perturbations of size TOL.
-%       M defaults to a SIZE(A)-dependent value and TOL to 1E-3.
-%       RL defines the type of perturbation:
-%         RL =  0 (default): absolute complex perturbations of 2-norm TOL.
-%         RL =  1:           absolute real perturbations of 2-norm TOL.
-%         RL = -1:           componentwise real perturbations of size TOL.
-%       The eigenvalues of A are plotted as crosses `x'.
-%       PS(A, M, TOL, RL, MARKSIZE) uses the specified marker size instead
-%       of a size that depends on the figure size, the matrix order, and M.
-%       If MARKSIZE < 0, the plot is suppressed and the plot data is returned
-%       as an output argument.
-%       PS(A, 0) plots just the eigenvalues of A.
-
-%       For a given TOL, the pseudospectrum of A is the set of
-%       pseudo-eigenvalues of A, that is, the set
-%       { e : e is an eigenvalue of A+E, for some E with NORM(E) <= TOL }.
-%
-%       Reference:
-%       L.N. Trefethen, Pseudospectra of matrices, in D.F. Griffiths and
-%            G.A. Watson, eds, Numerical Analysis 1991, Proceedings of the 14th
-%            Dundee Conference, vol. 260, Pitman Research Notes in Mathematics,
-%            Longman Scientific and Technical, Essex, UK, 1992, pp. 234-266.
-
-if diff(size(A)), error('Matrix must be square.'), end
-n = max(size(A));
-
-if nargin < 5, marksize = 0; end
-if nargin < 4, rl = 0; end
-if nargin < 3, tol = 1e-3; end
-if nargin < 2, m = max(1, round( 25*exp(-0.047*n) )); end
-
-if m == 0
-   e = eig(A);
-   ax = cpltaxes(e);
-   plot(real(e), imag(e), 'xg')
-   axis(ax);
-   axis('square');
-   return
-end
-
-x = zeros(m*n,1);
-i = sqrt(-1);
-
-for j = 1:m
-   if rl == -1     % Componentwise.
-      dA = -ones(n) + 2*rand(n);   % Uniform random numbers on [-1,1].
-      dA = tol * A .* dA;
-   else
-      if rl == 0   % Complex absolute.
-         dA = randn(n) + i*randn(n);
-      else         % Real absolute.
-         dA = randn(n);
-      end
-      dA = tol/norm(dA)*dA;
-   end
-   e = eig(A + dA);
-   x((j-1)*n+1:j*n) = e;
-end
-
-if marksize >= 0
-
-   ax = cpltaxes(x);
-   h = plot(real(x),imag(x),'.');
-   axis(ax);
-   axis('square');
-
-   % Next block adapted from SPY.M.
-   if marksize == 0
-      units = get(gca,'units');
-      set(gca,'units','points');
-      pos = get(gca,'position');
-      nps = 2.4*sqrt(n*m);  % Factor based on number of pseudo-ei'vals plotted.
-      myguess = round(3*min(pos(3:4))/nps);
-%      [nps myguess]
-      marksize = max(1,myguess);
-      set(gca,'units',units);
-   end
-   set(h,'markersize',marksize);
-%   set(h,'linemarkersize',marksize);
-
-   hold on
-   e = eig(A);
-   plot(real(e),imag(e),'xw');
-   set(h,'markersize',marksize);
-   hold off
-
-else
-
-  y = x;
-
-end
--- a/toolbox/pscont.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,104 +0,0 @@
-function [x, y, z, m] = pscont(A, k, npts, ax, levels)
-%PSCONT   Contours and colour pictures of pseudospectra.
-%         PSCONT(A, K, NPTS, AX, LEVELS) plots LOG10(1/NORM(R(z))),
-%         where R(z) = INV(z*I-A) is the resolvent of the square matrix A,
-%         over an NPTS-by-NPTS grid.
-%         NPTS defaults to a SIZE(A)-dependent value.
-%         The limits are AX(1) and AX(2) on the x-axis and
-%                        AX(3) and AX(4) on the y-axis.
-%         If AX is omitted, suitable limits are guessed based on the
-%         eigenvalues of A.
-%         The eigenvalues of A are plotted as crosses `x'.
-%         K determines the type of plot:
-%             K = 0 (default) PCOLOR and CONTOUR
-%             K = 1           PCOLOR only
-%             K = 2           SURFC (SURF and CONTOUR)
-%             K = 3           SURF only
-%             K = 4           CONTOUR only
-%         The contours levels are specified by the vector LEVELS, which
-%         defaults to -10:-1 (recall we are plotting log10 of the data).
-%         Thus, by default, the contour lines trace out the boundaries of
-%         the epsilon pseudospectra for epsilon = 1e-10, ..., 1e-1.
-%         [X, Y, Z, NPTS] = PSCONT(A, ...) returns the plot data X, Y, Z
-%         and the value of NPTS used.
-%
-%         After calling this function you may want to change the
-%         color map (e.g., type COLORMAP HOT - see HELP COLOR) and the
-%         shading (e.g., type SHADING INTERP - see HELP INTERP).
-%         For an explanation of the term `pseudospectra' see PS.M.
-%         When A is real and the grid is symmetric about the x-axis, this
-%         routine exploits symmetry to halve the computational work.
-
-%         Colour pseduospectral pictures of this type are referred to as
-%         `spectral portraits' by Godunov, Kostin, and colleagues.
-%         References:
-%         V. I. Kostin, Linear algebra algorithms with guaranteed accuracy,
-%            Technical Report TR/PA/93/05, CERFACS, Toulouse, France, 1993.
-%         L.N. Trefethen, Pseudospectra of matrices, in D.F. Griffiths and
-%            G.A. Watson, eds, Numerical Analysis 1991, Proceedings of the 14th
-%            Dundee Conference, vol. 260, Pitman Research Notes in Mathematics,
-%            Longman Scientific and Technical, Essex, UK, 1992, pp. 234-266.
-
-Areal = ~norm(imag(A),1);
-
-if nargin < 5, levels = -10:-1; end
-e = eig(A);
-if nargin < 4
-   ax = cpltaxes(e);
-   if Areal, ax(3) = -ax(4); end  % Make sure region symmetric about x-axis.
-end
-n = max(size(A));
-if nargin < 3, npts = round( min(max(5, sqrt(20^2*10^3/n^3) ), 30)); end
-if nargin < 2, k = 0; end
-
-nptsx = npts; nptsy = npts;
-Ysymmetry = (Areal & ax(3) == -ax(4));
-
-x = seqa(ax(1), ax(2), npts);
-y = seqa(ax(3), ax(4), npts);
-if Ysymmetry                    % Exploit symmetry about x-axis.
-   nptsy = ceil(npts/2);
-   y1 = y;
-   y = y(1:nptsy);
-end
-
-[xx, yy] = meshgrid(x,y);
-z = xx + sqrt(-1)*yy;
-I = eye(n);
-Smin = zeros(nptsy, nptsx);
-
-for j=1:nptsx
-    for i=1:nptsy
-        Smin(i,j) = min( svd( z(i,j)*I-A ) );
-    end
-end
-
-z = log10( Smin + eps );
-if Ysymmetry
-   z = [z; z(nptsy-rem(npts,2):-1:1,:)];
-   y = y1;
-end
-
-if k == 0 | k == 1
-   pcolor(x, y, z); hold on
-elseif k == 2
-   surfc(x, y, z); hold on
-elseif k == 3
-   surf(x, y, z); hold on
-end
-
-if k == 0 | k == 4
-   contour(x, y, z, levels); hold on
-end
-
-if k ~= 2 & k ~= 3
-   if k == 0 | k == 1
-      s = 'k';   % Black.
-   else
-      s = 'w';   % White.
-   end
-   plot(real(e),imag(e),['x' s]);
-end
-
-axis('square');
-hold off
--- a/toolbox/qmult.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-function B = qmult(A)
-%QMULT  Pre-multiply by random orthogonal matrix.
-%       QMULT(A) is Q*A where Q is a random real orthogonal matrix from
-%       the Haar distribution, of dimension the number of rows in A.
-%       Special case: if A is a scalar then QMULT(A) is the same as
-%                     QMULT(EYE(A)).
-
-%       Called by RANDSVD.
-%
-%       Reference:
-%       G.W. Stewart, The efficient generation of random
-%       orthogonal matrices with an application to condition estimators,
-%       SIAM J. Numer. Anal., 17 (1980), 403-409.
-
-[n, m] = size(A);
-
-%  Handle scalar A.
-if max(n,m) == 1
-   n = A;
-   A = eye(n);
-end
-
-d = zeros(n);
-
-for k = n-1:-1:1
-
-    % Generate random Householder transformation.
-    x = randn(n-k+1,1);
-    s = norm(x);
-    sgn = sign(x(1)) + (x(1)==0);    % Modification for sign(1)=1.
-    s = sgn*s;
-    d(k) = -sgn;
-    x(1) = x(1) + s;
-    beta = s*x(1);
-
-    % Apply the transformation to A.
-    y = x'*A(k:n,:);
-    A(k:n,:) = A(k:n,:) - x*(y/beta);
-
-end
-
-% Tidy up signs.
-for i=1:n-1
-    A(i,:) = d(i)*A(i,:);
-end
-A(n,:) = A(n,:)*sign(randn);
-B = A;
--- a/toolbox/rando.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-function A = rando(n, k)
-%RANDO   Random matrix with elements -1, 0 or 1.
-%        A = RANDO(N, K) is a random N-by-N matrix with elements from
-%        one of the following discrete distributions (default K = 1):
-%          K = 1:  A(i,j) =  0 or 1    with equal probability,
-%          K = 2:  A(i,j) = -1 or 1    with equal probability,
-%          K = 3:  A(i,j) = -1, 0 or 1 with equal probability.
-%        N may be a 2-vector, in which case the matrix is N(1)-by-N(2).
-
-if nargin < 2, k = 1; end
-
-m = n(1);                    % Parameter n specifies dimension: m-by-n.
-n = n(max(size(n)));
-
-if k == 1                    % {0, 1}
-   A = floor( rand(m,n) + .5 );
-elseif k == 2                % {-1, 1}
-   A = 2*floor( rand(m,n) + .5 ) - 1;
-elseif k == 3                % {-1, 0, 1}
-   A = round( 3*rand(m,n) - 1.5 );
-end
--- a/toolbox/randsvd.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,104 +0,0 @@
-function A = randsvd(n, kappa, mode, kl, ku)
-%RANDSVD  Random matrix with pre-assigned singular values.
-%      RANDSVD(N, KAPPA, MODE, KL, KU) is a (banded) random matrix of order N
-%      with COND(A) = KAPPA and singular values from the distribution MODE.
-%      N may be a 2-vector, in which case the matrix is N(1)-by-N(2).
-%      Available types:
-%             MODE = 1:   one large singular value,
-%             MODE = 2:   one small singular value,
-%             MODE = 3:   geometrically distributed singular values,
-%             MODE = 4:   arithmetically distributed singular values,
-%             MODE = 5:   random singular values with unif. dist. logarithm.
-%      If omitted, MODE defaults to 3, and KAPPA defaults to SQRT(1/EPS).
-%      If MODE < 0 then the effect is as for ABS(MODE) except that in the
-%      original matrix of singular values the order of the diagonal entries
-%      is reversed: small to large instead of large to small.
-%      KL and KU are the lower and upper bandwidths respectively; if they
-%      are omitted a full matrix is produced.
-%      If only KL is present, KU defaults to KL.
-%      Special case: if KAPPA < 0 then a random full symmetric positive
-%                    definite matrix is produced with COND(A) = -KAPPA and
-%                    eigenvalues distributed according to MODE.
-%                    KL and KU, if present, are ignored.
-
-%      Reference:
-%      N.J. Higham, Accuracy and Stability of Numerical Algorithms,
-%         Society for Industrial and Applied Mathematics, Philadelphia, PA,
-%         USA, 1996; sec. 26.3.
-
-%      This routine is similar to the more comprehensive Fortran routine xLATMS
-%      in the following reference:
-%      J.W. Demmel and A. McKenney, A test matrix generation suite,
-%      LAPACK Working Note #9, Courant Institute of Mathematical Sciences,
-%      New York, 1989.
-
-if nargin < 2, kappa = sqrt(1/eps); end
-if nargin < 3, mode = 3; end
-if nargin < 4, kl = n-1; end  % Full matrix.
-if nargin < 5, ku = kl; end   % Same upper and lower bandwidths.
-
-if abs(kappa) < 1, error('Condition number must be at least 1!'), end
-posdef = 0; if kappa < 0, posdef = 1; kappa = -kappa; end  % Special case.
-
-p = min(n);
-m = n(1);              % Parameter n specifies dimension: m-by-n.
-n = n(max(size(n)));
-
-if p == 1              % Handle case where A is a vector.
-   A = randn(m, n);
-   A = A/norm(A);
-   return
-end
-
-j = abs(mode);
-
-% Set up vector sigma of singular values.
-if j == 3
-   factor = kappa^(-1/(p-1));
-   sigma = factor.^[0:p-1];
-
-elseif j == 4
-   sigma = ones(p,1) - (0:p-1)'/(p-1)*(1-1/kappa);
-
-elseif j == 5    % In this case cond(A) <= kappa.
-   sigma = exp( -rand(p,1)*log(kappa) );
-
-elseif j == 2
-   sigma = ones(p,1);
-   sigma(p) = 1/kappa;
-
-elseif j == 1
-   sigma = ones(p,1)./kappa;
-   sigma(1) = 1;
-end
-
-% Convert to diagonal matrix of singular values.
-if mode < 0
-  sigma = sigma(p:-1:1);
-end
-sigma = diag(sigma);
-
-if posdef                % Handle special case.
-   Q = qmult(p);
-   A = Q'*sigma*Q;
-   A = (A + A')/2;       % Ensure matrix is symmetric.
-   return
-end
-
-if m ~= n
-   sigma(m, n) = 0;      % Expand to m-by-n diagonal matrix.
-end
-
-if kl == 0 & ku == 0     % Diagonal matrix requested - nothing more to do.
-   A = sigma;
-   return
-end
-
-% A = U*sigma*V, where U, V are random orthogonal matrices from the
-% Haar distribution.
-A = qmult(sigma');
-A = qmult(A');
-
-if kl < n-1 | ku < n-1   % Bandwidth reduction.
-   A = bandred(A, kl, ku);
-end
--- a/toolbox/redheff.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-function A = redheff(n)
-%REDHEFF    A (0,1) matrix of Redheffer associated with the Riemann hypothesis.
-%           A = REDHEFF(N) is an N-by-N matrix of 0s and 1s defined by
-%               A(i,j) = 1 if j = 1 or if i divides j,
-%               A(i,j) = 0 otherwise.
-%           It has N - FLOOR(LOG2(N)) - 1 eigenvalues equal to 1,
-%           a real eigenvalue (the spectral radius) approximately SQRT(N),
-%           a negative eigenvalue approximately -SQRT(N),
-%           and the remaining eigenvalues are provably ``small''.
-%           Barrett and Jarvis (1992) conjecture that
-%             ``the small eigenvalues all lie inside the unit circle
-%               ABS(Z) = 1'',
-%           and a proof of this conjecture, together with a proof that some
-%           eigenvalue tends to zero as N tends to infinity, would yield
-%           a new proof of the prime number theorem.
-%           The Riemann hypothesis is true if and only if
-%           DET(A) = O( N^(1/2+epsilon) ) for every epsilon > 0
-%                                             (`!' denotes factorial).
-%           See also RIEMANN.
-
-%           Reference:
-%           W.W. Barrett and T.J. Jarvis,
-%           Spectral Properties of a Matrix of Redheffer,
-%           Linear Algebra and Appl., 162 (1992), pp. 673-683.
-
-i = (1:n)'*ones(1,n);
-A = ~rem(i',i);
-A(:,1) = ones(n,1);
--- a/toolbox/riemann.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-function A = riemann(n)
-%RIEMANN    A matrix associated with the Riemann hypothesis.
-%           A = RIEMANN(N) is an N-by-N matrix for which the
-%           Riemann hypothesis is true if and only if
-%           DET(A) = O( N! N^(-1/2+epsilon) ) for every epsilon > 0
-%                                             (`!' denotes factorial).
-%           A = B(2:N+1, 2:N+1), where
-%           B(i,j) = i-1 if i divides j and -1 otherwise.
-%           Properties include, with M = N+1:
-%              Each eigenvalue E(i) satisfies ABS(E(i)) <= M - 1/M.
-%              i <= E(i) <= i+1 with at most M-SQRT(M) exceptions.
-%              All integers in the interval (M/3, M/2] are eigenvalues.
-%
-%           See also REDHEFF.
-
-%           Reference:
-%           F. Roesler, Riemann's hypothesis as an eigenvalue problem,
-%           Linear Algebra and Appl., 81 (1986), pp. 153-198.
-
-n = n+1;
-i = (2:n)'*ones(1,n-1);
-j = i';
-A = i .* (~rem(j,i)) - ones(n-1);
--- a/toolbox/rq.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-function z = rq(A,x)
-%RQ      Rayleigh quotient.
-%        RQ(A,x) is the Rayleigh quotient of A and x, x'*A*x/(x'*x).
-
-%        Called by FV.
-
-z = x'*A*x/(x'*x);
--- a/toolbox/rschur.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-function A = rschur(n, mu, x, y)
-%RSCHUR   An upper quasi-triangular matrix.
-%         A = RSCHUR(N, MU, X, Y) is an N-by-N matrix in real Schur form.
-%         All the diagonal blocks are 2-by-2 (except for the last one, if N
-%         is odd) and the k'th has the form [x(k) y(k); -y(k) x(k)].
-%         Thus the eigenvalues of A are x(k) +/- i*y(k).
-%         MU (default 1) controls the departure from normality.
-%         Defaults: X(k) = -k^2/10, Y(k) = -k, i.e., the eigenvalues
-%                   lie on the parabola x = -y^2/10.
-
-%         References:
-%         F. Chatelin, Eigenvalues of Matrices, John Wiley, Chichester, 1993;
-%            Section 4.2.7.
-%         F. Chatelin and V. Fraysse, Qualitative computing: Elements
-%            of a theory for finite precision computation, Lecture notes,
-%            CERFACS, Toulouse, France and THOMSON-CSF, Orsay, France,
-%            June 1993.
-
-m = floor(n/2)+1;
-alpha = 10; beta = 1;
-
-if nargin < 4, y = -(1:m)/beta; end
-if nargin < 3, x = -(1:m).^2/alpha; end
-if nargin < 2, mu = 1; end
-
-A = diag( mu*ones(n-1,1), 1 );
-for i=1:2:2*(m-1)
-    j = (i+1)/2;
-    A(i:i+1,i:i+1) = [x(j) y(j); -y(j) x(j)];
-end
-if 2*m ~= n,
-   A(n,n) = x(m);
-end
--- a/toolbox/see.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-function see(A, k)
-%SEE    Pictures of a matrix and its (pseudo-) inverse.
-%       SEE(A) displays MESH(A), MESH(PINV(A)), SEMILOGY(SVD(A),'o'),
-%       and (if A is square) FV(A) in four subplot windows.
-%       SEE(A, 1) plots an approximation to the pseudospectrum in the
-%       third window instead of the singular values.
-%       SEE(A, -1) plots only the eigenvalues in the fourth window,
-%       which is much quicker than plotting the field of values.
-%       If A is complex, only real parts are used for the mesh plots.
-%       If A is sparse, just SPY(A) is shown.
-
-if nargin < 2, k = 0; end
-[m, n] = size(A);
-square = (m == n);
-clf
-
-if issparse(A)
-
-   spy(A);
-
-else
-
-   B = pinv(A);
-   s = svd(A);
-   zs = (s == zeros(size(s)));
-   if any( zs )
-      s( zs ) = [];  % Remove zero singular values for semilogy plot.
-   end
-
-   subplot(2,2,1)
-   mesh(real(A)), axis('ij'),  drawnow
-   subplot(2,2,2)
-   mesh(real(B)), axis('ij'),  drawnow
-
-   if k <= 0
-      subplot(2,2,3)
-      semilogy(s, 'og')
-      hold on, semilogy(s, '-'), hold off, drawnow
-      if any(zs), subplot(2,2,3), title('Zero(s) omitted'), subplot(2,2,4), end
-   elseif k == 1
-      subplot(2,2,3)
-      ps(A);  drawnow
-   end
-
-   if square
-      if k == -1
-         subplot(2,2,4)
-         ps(A, 0);
-      else
-         subplot(2,2,4)
-         fv(A);
-      end
-   else
-      if k == 0
-         subplot(2,2,4)
-         axis off
-      else
-         clf
-      end
-      text(0,0,'Matrix not square.')
-   end
-   subplot;
-
-end
--- a/toolbox/seqa.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-function y = seqa(a, b, n)
-%SEQA   Additive sequence.
-%       Y = SEQA(A, B, N) produces a row vector comprising N equally
-%       spaced numbers starting at A and finishing at B.
-%       If N is omitted then 10 points are generated.
-
-if nargin == 2, n = 10; end
-
-if n <= 1
-   y = a;
-   return
-end
-y = [a+(0:n-2)*(b-a)/(n-1), b];
--- a/toolbox/seqcheb.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-function x = seqcheb(n, k)
-%SEQCHEB   Sequence of points related to Chebyshev polynomials.
-%          X = SEQCHEB(N, K) produces a row vector of length N.
-%          There are two choices:
-%              K = 1:  zeros of T_N,         (the default)
-%              K = 2:  extrema of T_{N-1},
-%          where T_k is the Chebsyhev polynomial of degree k.
-
-if nargin == 1, k = 1; end
-
-if k == 1                     %  Zeros of T_n
-   i = 1:n; j = .5*ones(1,n);
-   x = cos( (i-j) * (pi/n) );
-elseif k == 2                 %  Extrema of T_(n-1)
-   i = 0:n-1;
-   x = cos( i * (pi/(n-1)) );
-end
--- a/toolbox/seqm.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-function y = seqm(a, b, n)
-%SEQM   Multiplicative sequence.
-%       Y = SEQM(A, B, N) produces a row vector comprising N
-%       logarithmically equally spaced numbers, starting at A ~= 0
-%       and finishing at B ~= 0.
-%       If A*B < 0 and N > 2 then complex results are produced.
-%       If N is omitted then 10 points are generated.
-
-if nargin == 2, n = 10; end
-
-if n <= 1
-   y = a;
-   return
-end
-p = [0:n-2]/(n-1);
-r = (b/a).^p;
-y = [a*r, b];
--- a/toolbox/show.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-function show(x)
-%SHOW   Display signs of matrix elements.
-%       SHOW(X) displays X in `FORMAT +' form, that is,
-%       with `+', `-' and  blank representing positive, negative
-%       and zero elements respectively.
-
-format +
-disp(x)
-format
--- a/toolbox/signm.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-function [S, N] = signm(A)
-%SIGNM   Matrix sign decomposition.
-%        [S, N] = SIGNM(A) is the matrix sign decomposition A = S*N,
-%        computed via the Schur decomposition.
-%        S is the matrix sign function, sign(A).
-
-%        Reference:
-%        N.J. Higham, The matrix sign decomposition and its relation to the
-%        polar decomposition, Linear Algebra and Appl., 212/213:3-20, 1994.
-
-[Q, T] = schur(A);
-[Q, T] = rsf2csf(Q, T);
-S = Q * matsignt(T) * Q';
-
-% Only problem with Schur method is possible nonzero imaginary part when
-% A is real.  Next line takes care of that.
-if ~norm(imag(A),1), S = real(S); end
-
-if nargout == 2
-   N = S*A;
-end
--- a/toolbox/skewpart.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-function S = skewpart(A)
-%SKEWPART  Skew-symmetric (skew-Hermitian) part.
-%          SKEWPART(A) is the skew-symmetric (skew-Hermitian) part of A,
-%          (A - A')/2.
-%          It is the nearest skew-symmetric (skew-Hermitian) matrix to A in
-%          both the 2- and the Frobenius norms.
-
-S = (A - A')./2;
--- a/toolbox/smoke.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-function A = smoke(n, k)
-%SMOKE     Smoke matrix - complex, with a `smoke ring' pseudospectrum.
-%          SMOKE(N) is an N-by-N matrix with 1s on the
-%          superdiagonal, 1 in the (N,1) position, and powers of
-%          roots of unity along the diagonal.
-%          SMOKE(N, 1) is the same except for a zero (N,1) element.
-%          The eigenvalues of SMOKE(N, 1) are the N'th roots of unity;
-%          those of SMOKE(N) are the N'th roots of unity times 2^(1/N).
-%
-%          Try PS(SMOKE(32)).  For SMOKE(N, 1) the pseudospectrum looks
-%          like a sausage folded back on itself.
-%          GERSH(SMOKE(N, 1)) is interesting.
-
-%          Reference:
-%          L. Reichel and L.N. Trefethen, Eigenvalues and pseudo-eigenvalues of
-%          Toeplitz matrices, Linear Algebra and Appl., 162-164:153-185, 1992.
-
-if nargin < 2, k = 0; end
-
-w = exp(2*pi*i/n);
-A = diag( [w.^(1:n-1) 1] ) + diag(ones(n-1,1),1);
-if k == 0, A(n,1) = 1; end
--- a/toolbox/sparsify.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-function A = sparsify(A, p)
-%SPARSIFY   Randomly sets matrix elements to zero.
-%           S = SPARSIFY(A, P) is A with elements randomly set to zero
-%           (S = S' if A is square and A = A', i.e. symmetry is preserved).
-%           Each element has probability P of being zeroed.
-%           Thus on average 100*P percent of the elements of A will be zeroed.
-%           Default: P = 0.25.
-
-if nargin < 2, p = 0.25; end
-if p<0 | p>1, error('Second parameter must be between 0 and 1 inclusive.'), end
-
-% Is A square and symmetric?
-symm = 0;
-if min(size(A)) == max(size(A))
-   if norm(A-A',1) == 0, symm = 1; end
-end
-
-if ~symm
-   A = A .* (rand(size(A)) > p);        % Unsymmetric case
-else
-   A = triu(A,1) .* (rand(size(A)) > p);  % Preserve symmetry
-   A = A + A';
-   A = A + diag( diag(A) .* (rand(size(diag(A))) > p) );
-end
--- a/toolbox/sub.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-function S = sub(A, i, j)
-%SUB     Principal submatrix.
-%        SUB(A,i,j) is A(i:j,i:j).
-%        SUB(A,i)  is the leading principal submatrix of order i,
-%        A(1:i,1:i), if i>0, and the trailing principal submatrix
-%        of order ABS(i) if i<0.
-
-if nargin == 2
-   if i >= 0
-      S = A(1:i, 1:i);
-   else
-      n = min(size(A));
-      S = A(n+i+1:n, n+i+1:n);
-   end
-else
-   S = A(i:j, i:j);
-end
--- a/toolbox/symmpart.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-function S = symmpart(A)
-%SYMMPART  Symmetric (Hermitian) part.
-%          SYMMPART(A) is the symmetric (Hermitian) part of A, (A + A')/2.
-%          It is the nearest symmetric (Hermitian) matrix to A in both the
-%          2- and the Frobenius norms.
-
-S = (A + A')./2;
--- a/toolbox/tmtdemo.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,263 +0,0 @@
-clc
-format compact
-echo on
-
-%TMTDEMO       Demonstration of Test Matrix Toolbox.
-%              N. J. Higham.
-
-% The Test Matrix Toolbox contains test matrices, visualization routines,
-% and other miscellaneous routines.
-
-% The version of the toolbox is
-
-matrix(-1)
-echo on
-
-% For this demonstration you will need to view both the command window
-% and one figure window.
-% This demonstration emphasises graphics and shows only
-% some of the features of the toolbox.
-
-pause  % Press any key to continue after pauses.
-
-% A list of the available M-files is obtained by typing `matrix':
-
-matrix
-
-pause
-
-% The FV command plots the boundary of the field of values of a matrix
-% (the set of all Rayleigh quotients) and plots the eigenvalues as
-% crosses (`x').  Here are some examples:
-
-% The Grcar matrix is a Toeplitz matrix of the following form:
-
-grcar(5)
-
-% Here is the field of values of the 10-by-10 Grcar matrix:
-
-fv(grcar(10));
-title('fv(grcar(10))')
-
-pause
-
-% Next, we form a random orthogonal matrix and look at its field of values.
-% The boundary is the convex hull of the eigenvalues since A is normal.
-
-A = randsvd(10, 1);
-fv(A);
-title('randsvd(10, 1)')
-pause
-
-% The RANDSVD commands generates random matrices with pre-assigned
-% condition number, and various singular value distributions:
-
-A = randsvd(6, 1e6, 3);  % Exponential distribution.
-
-format short e
-svd(A)'
-cond(A)
-pause
-
-% The PS command plots an approximation to a pseudospectrum of A,
-% which is the set of complex numbers that are eigenvalues of some
-% perturbed matrix A + E, with the norm of E at most epsilon
-% (default: epsilon = 1E-3).
-% The eigenvalues of A are plotted as crosses (`x').
-% Here are some interesting PS plots.
-
-% First, we use the KAHAN matrix, a triangular matrix made up of sines and
-% cosines.  Here is an approximate pseudospectrum of the 10-by-10 matrix:
-
-ps(kahan(10),25);
-title('ps(kahan(10))')
-pause
-
-% Next, a different way of looking at pseudospectra, via norms of
-% the resolvent.  (The resolvent of A is INV(z*I-A), where z is a complex
-% variable).  PSCONT gives a color map with a superimposed contour
-% plot.  Here we specify a region of the complex plane in
-% which the 8-by-8 Kahan matrix is interesting to look at.
-
-pscont(kahan(8), 0, 20, [0.2 1.2 -0.5 0.5]);
-title('pscont(kahan(8))')
-pause
-
-% The TRIW matrix is upper triangular, made up of 1s and -1s:
-
-triw(4)
-
-% Here is a combined surface and contour plot of its resolvent.
-% Notice how the repeated eigenvalue 1 `sucks in' the resolvent.
-
-pscont(triw(11), 2, 15, [-2 2 -2 2]);
-title('pscont(triw(11))')
-pause
-
-% The next PSCONT plot is for the companion matrix of the characteristic
-% polynomial of the CHEBSPEC matrix:
-
-A = chebspec(8); C = compan(A);
-
-% The SHOW command shows the +/- pattern of the elements of a matrix, with
-% blanks for zero elements:
-
-show(C)
-
-pscont(C, 2, 20, [-.1 .1 -.1 .1]);
-title('pscont(compan(chebspec(8)))')
-pause
-
-% The following matrix has a pseudospectrum in the form of a limacon.
-
-n = 25; A = triw(n,1,2) - eye(n);
-sub(A, 6)               % Leading principal 6-by-6 submatrix of A.
-ps(A);
-pause
-
-% Here is the 8-by-8 Frank matrix.
-A = frank(8)
-
-% We can get a visual representation of the matrix using the SEE
-% command, which produces subplots with the following layout:
-%     /---------------------------------\
-%     | MESH(A)            MESH(INV(A)) |
-%     | SEMILOGY(SVD(A))   FV(A)        |
-%     \---------------------------------/
-% where FV is the field of values.
-
-see(A)
-
-pause
-
-% The Frank matrix is well-known for having ill-conditioned eigenvalues.
-% Here are the eigenvalues (in column 1) together with the corresponding
-% eigenvalue condition numbers (in column 2):
-
-format short e
-[V, D, c] = eigsens(A);
-[diag(D) c 1./diag(D)]
-
-% In the last column are shown the reciprocals of the eigenvalues.
-% Notice that if LAMBDA is an eigenvalue, so is 1/LAMBDA!
-
-pause
-
-% Matlab's MAGIC function produces magic squares:
-
-A = magic(5)
-
-% Using the toolbox routine PNORM we can estimate the matrix p-norm
-% for any value of p.
-
-[pnorm(A,1) pnorm(A,1.5) pnorm(A,2) pnorm(A,pi) pnorm(A,inf)]
-
-% As this example suggests, the p-norm of a magic square is
-% constant for all p!
-
-pause
-
-% GERSH plots Gershgorin disks.  Here are some interesting examples.
-
-gersh(lesp(12));
-title('gersh(lesp(12))')
-pause
-
-gersh(hanowa(10));
-title('gersh(hanowa(10))')
-pause
-
-gersh(ipjfact(6,1));
-title('gersh(ipjfact(6,1))')
-pause
-
-gersh(smoke(16,1));
-title('gersh(smoke(16,1))')
-pause
-
-% A Hadamard matrix has elements 1 or -1 and mutually orthogonal rows:
-
-show(hadamard(16))
-
-% A CONTOUR plot of this matrix is interesting:
-
-contour(hadamard(16))
-pause
-
-% There are a few sparse matrices in the toolbox.
-% WATHEN is a finite element matrix with random entries.
-
-spy(wathen(6,6));  % SPY plot of sparsity pattern.
-
-pause
-
-% GFPP generates matrices for which Gaussian elimination with partial
-% pivoting produces a large growth factor.
-
-gfpp(6)
-pause
-
-% Let's find the growth factor for partial pivoting and complete pivoting
-% for a bigger matrix:
-
-A = gfpp(20);
-[L, U] = lu(A);    % Partial pivoting.
-max(max(abs(U))) / max(max(abs(A)))
-
-[L, U, P, Q, rho] = gecp(A);  % Complete pivoting using toolbox routine GECP.
-rho
-% As expected, complete pivoting does not produce large growth here.
-pause
-
-% The toolbox function MATRIX allows the test matrices to be accessed
-% by number.  The following piece of code steps through all the
-% square matrices of arbitrary dimension, setting A to each 10-by-10
-% matrix in turn. It evaluate the 2-norm condition number and the
-% ratio of the largest to smallest eigenvalue (in absolute values).
-c = []; e = []; j = 1;
-for i=1:matrix(0)
-    A = full(matrix(i, 10));
-    if norm(skewpart(A),1)  % If not Hermitian...
-       c1 = cond(A);
-       eg = eig(A);
-       e1 = max(abs(eg)) / min(abs(eg));
-       % Filter out extremely ill-conditioned matrices.
-       if c1 <= 1e10, c(j) = c1; e(j) = e1; j = j + 1; end
-    end
-end
-
-% The following plots confirm that the condition number can be much
-% larger than the extremal eigenvalue ratio.
-echo off
-j = max(size(c));
-subplot(2,1,1)
-semilogy(1:j, c, 'x', 1:j, e, 'o'), hold on
-semilogy(1:j, c, '-', 1:j, e, '--'), hold off
-title('cond: x, eig_ratio: o')
-subplot(2,1,2)
-semilogy(1:j, c./e)
-title('cond/eig_ratio')
-echo on
-pause
-
-% Finally, here are three interesting pseudospectra based on pentadiagonal
-% Toeplitz matrices:
-
-A = full(pentoep(6,0,1/2,0,0,1))
-
-subplot(1,1,1)
-ps(full(pentoep(32,0,1/2,0,0,1)));            % Propeller
-title('ps(full(pentoep(32,0,1/2,0,0,1))')
-pause
-
-ps(inv(full(pentoep(32,0,1,1,0,.25))));       % Man in the moon
-title('ps(inv(full(pentoep(32,0,1,1,0,.25))')
-pause
-
-ps(full(pentoep(32,0,1/2,1,1,1)));            % Fish
-title('ps(full(pentoep(32,0,1/2,1,1,1)))')
-pause
-
-echo off
-clear A L U P Q V D
-format
--- a/toolbox/trap2tri.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-function [Q, T] = trap2tri(L)
-%TRAP2TRI  Unitary reduction of trapezoidal matrix to triangular form.
-%          [Q, T] = TRAP2TRI(L), where L is an m-by-n lower trapezoidal
-%          matrix with m >= n, produces a unitary Q such that QL = [T; 0],
-%          where T is n-by-n and lower triangular.
-%          Q is a product of Householder transformations.
-
-%          Called by RANDSVD.
-%
-%          Reference:
-%          G.H. Golub and C.F. Van Loan, Matrix Computations, second edition,
-%          Johns Hopkins University Press, Baltimore, Maryland, 1989.
-%          P5.2.5, p. 220.
-
-[n, r] = size(L);
-
-if r > n  | norm(L-tril(L),1)
-   error('Matrix must be lower trapezoidal and m-by-n with m >= n.')
-end
-
-Q = eye(n);  % To hold product of H.T.s
-
-if r ~= n
-
-   % Reduce nxr L =   r  [L1]  to lower triangular form: QL = [T].
-   %                 n-r [L2]                                 [0]
-
-   for j=r:-1:1
-       % x is the vector to be reduced, which we overwrite with the H.T. vector.
-       x = L(j:n,j);
-       x(2:r-j+1) = zeros(r-j,1);  % These elts of column left unchanged.
-       s = norm(x)*(sign(x(1)) + (x(1)==0));    % Modification for sign(1)=1.
-
-       % Nothing to do if x is zero (or x=a*e_1, but we don't check for that).
-       if s ~= 0
-          x(1) = x(1) + s;
-          beta = s'*x(1);
-
-          %  Implicitly apply H.T. to pivot column.
-          % L(r+1:n,j) = zeros(n-r,1); % We throw these elts away at the end.
-          L(j,j) = -s;
-
-          % Apply H.T. to rest of matrix.
-          if j > 1
-             y = x'*L(j:n, 1:j-1);
-             L(j:n, 1:j-1) = L(j:n, 1:j-1) - x*(y/beta);
-          end
-
-          % Update H.T. product.
-          y = x'*Q(j:n,:);
-          Q(j:n,:) = Q(j:n,:) - x*(y/beta);
-       end
-   end
-end
-
-T = L(1:r,:);   % Rows r+1:n have been zeroed out.
--- a/toolbox/tridiag.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-function T = tridiag(n, x, y, z)
-%TRIDIAG  Tridiagonal matrix (sparse).
-%         TRIDIAG(X, Y, Z) is the tridiagonal matrix with subdiagonal X,
-%         diagonal Y, and superdiagonal Z.
-%         X and Z must be vectors of dimension one less than Y.
-%         Alternatively TRIDIAG(N, C, D, E), where C, D, and E are all
-%         scalars, yields the Toeplitz tridiagonal matrix of order N
-%         with subdiagonal elements C, diagonal elements D, and superdiagonal
-%         elements E.   This matrix has eigenvalues (Todd 1977)
-%                  D + 2*SQRT(C*E)*COS(k*PI/(N+1)), k=1:N.
-%         TRIDIAG(N) is the same as TRIDIAG(N,-1,2,-1), which is
-%         a symmetric positive definite M-matrix (the negative of the
-%         second difference matrix).
-
-%         References:
-%         J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
-%           Birkhauser, Basel, and Academic Press, New York, 1977, p. 155.
-%         D.E. Rutherford, Some continuant determinants arising in physics and
-%           chemistry---II, Proc. Royal Soc. Edin., 63, A (1952), pp. 232-241.
-
-if nargin == 1, x = -1; y = 2; z = -1; end
-if nargin == 3, z = y; y = x; x = n; end
-
-x = x(:); y = y(:); z = z(:);   % Force column vectors.
-
-if max( [ size(x) size(y) size(z) ] ) == 1
-   x = x*ones(n-1,1);
-   z = z*ones(n-1,1);
-   y = y*ones(n,1);
-else
-   [nx, m] = size(x);
-   [ny, m] = size(y);
-   [nz, m] = size(z);
-   if (ny - nx - 1) | (ny - nz -1)
-      error('Dimensions of vector arguments are incorrect.')
-   end
-end
-
-% T = diag(x, -1) + diag(y) + diag(z, 1);  % For non-sparse matrix.
-n = max(size(y));
-T = spdiags([ [x;0] y [0;z] ], -1:1, n, n);
--- a/toolbox/triw.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-function t = triw(n, alpha, k)
-%TRIW   Upper triangular matrix discussed by Wilkinson and others.
-%       TRIW(N, ALPHA, K) is the upper triangular matrix with ones on
-%       the diagonal and ALPHAs on the first K >= 0 superdiagonals.
-%       N may be a 2-vector, in which case the matrix is N(1)-by-N(2) and
-%       upper trapezoidal.
-%       Defaults: ALPHA = -1,
-%                 K = N - 1     (full upper triangle).
-%       TRIW(N) is a matrix discussed by Kahan, Golub and Wilkinson.
-%
-%       Ostrowski (1954) shows that
-%         COND(TRIW(N,2)) = COT(PI/(4*N))^2,
-%       and for large ABS(ALPHA),
-%         COND(TRIW(N,ALPHA)) is approximately ABS(ALPHA)^N*SIN(PI/(4*N-2)).
-%
-%       Adding -2^(2-N) to the (N,1) element makes TRIW(N) singular,
-%       as does adding -2^(1-N) to all elements in the first column.
-
-%       References:
-%       G.H. Golub and J.H. Wilkinson, Ill-conditioned eigensystems and the
-%          computation of the Jordan canonical form, SIAM Review,
-%          18(4), 1976, pp. 578-619.
-%       W. Kahan, Numerical linear algebra, Canadian Math. Bulletin,
-%          9 (1966), pp. 757-801.
-%       A.M. Ostrowski, On the spectrum of a one-parametric family of
-%          matrices, J. Reine Angew. Math., 193 (3/4), 1954, pp. 143-160.
-%       J.H. Wilkinson, Singular-value decomposition---basic aspects,
-%          in D.A.H. Jacobs, ed., Numerical Software---Needs and Availability,
-%          Academic Press, London, 1978, pp. 109-135.
-
-m = n(1);              % Parameter n specifies dimension: m-by-n.
-n = n(max(size(n)));
-
-if nargin < 3, k = n-1; end
-if nargin < 2, alpha = -1; end
-
-if max(size(alpha)) ~= 1
-   error('Second argument must be a scalar.')
-end 
-
-t = tril( eye(m,n) + alpha*triu(ones(m,n), 1), k);
--- a/toolbox/vand.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-function V = vand(m, p)
-%VAND   Vandermonde matrix.
-%       V = VAND(P), where P is a vector, produces the (primal)
-%       Vandermonde matrix based on the points P, i.e. V(i,j) = P(j)^(i-1).
-%       VAND(M,P) is a rectangular version of VAND(P) with M rows.
-%       Special case: If P is a scalar then P equally spaced points on [0,1]
-%                     are used.
-
-%       Reference:
-%       N.J. Higham, Stability analysis of algorithms for solving
-%       confluent Vandermonde-like systems, SIAM J. Matrix Anal. Appl.,
-%       11 (1990), pp. 23-41.
-
-if nargin == 1, p = m; end
-n = max(size(p));
-
-%  Handle scalar p.
-if n == 1
-   n = p;
-   p = seqa(0,1,n);
-end
-
-if nargin == 1, m = n; end
-
-p = p(:).';                    % Ensure p is a row vector.
-V = ones(m,n);
-for i=2:m
-    V(i,:) = p.*V(i-1,:);
-end
--- a/toolbox/wathen.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-function A = wathen(nx, ny, k)
-%WATHEN  Wathen matrix - a finite element matrix (sparse, random entries).
-%        A = WATHEN(NX,NY) is a sparse random N-by-N finite element matrix
-%        where N = 3*NX*NY + 2*NX + 2*NY + 1.
-%        A is precisely the `consistent mass matrix' for a regular NX-by-NY
-%        grid of 8-node (serendipity) elements in 2 space dimensions.
-%        A is symmetric positive definite for any (positive) values of
-%        the `density', RHO(NX,NY), which is chosen randomly in this routine.
-%        In particular, if D = DIAG(DIAG(A)), then
-%              0.25 <= EIG(INV(D)*A) <= 4.5
-%        for any positive integers NX and NY and any densities RHO(NX,NY).
-%        This diagonally scaled matrix is returned by WATHEN(NX,NY,1).
-
-%        Reference:
-%        A.J. Wathen, Realistic eigenvalue bounds for the Galerkin
-%        mass matrix, IMA J. Numer. Anal., 7 (1987), pp. 449-457.
-
-if nargin < 2, error('Two dimensioning arguments must be specified.'), end
-if nargin < 3, k = 0; end
-
-e1 = [6 -6 2 -8;-6 32 -6 20;2 -6 6 -6;-8 20 -6 32];
-e2 = [3 -8 2 -6;-8 16 -8 20;2 -8 3 -8;-6 20 -8 16];
-e = [e1 e2; e2' e1]/45;
-n = 3*nx*ny+2*nx+2*ny+1;
-A = sparse(n,n);
-
-RHO = 100*rand(nx,ny);
-
- for j=1:ny
-     for i=1:nx
-
-      nn(1) = 3*j*nx+2*i+2*j+1;
-      nn(2) = nn(1)-1;
-      nn(3) = nn(2)-1;
-      nn(4) = (3*j-1)*nx+2*j+i-1;
-      nn(5) = 3*(j-1)*nx+2*i+2*j-3;
-      nn(6) = nn(5)+1;
-      nn(7) = nn(6)+1;
-      nn(8) = nn(4)+1;
-
-      em = e*RHO(i,j);
-
-         for krow=1:8
-             for kcol=1:8
-                 A(nn(krow),nn(kcol)) = A(nn(krow),nn(kcol))+em(krow,kcol);
-             end
-         end
-
-      end
-  end
-
-if k == 1
-   A = diag(diag(A)) \ A;
-end
--- a/toolbox/wilk.m	Wed May 06 14:56:53 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-function [A, b] = wilk(n)
-%WILK   Various specific matrices devised/discussed by Wilkinson.
-%       [A, b] = WILK(N) is the matrix or system of order N.
-%       N = 3: upper triangular system Ux=b illustrating inaccurate solution.
-%       N = 4: lower triangular system Lx=b, ill-conditioned.
-%       N = 5: HILB(6)(1:5,2:6)*1.8144.  Symmetric positive definite.
-%       N = 21: W21+, tridiagonal.   Eigenvalue problem.
-
-%       References:
-%       J.H. Wilkinson, Error analysis of direct methods of matrix inversion,
-%          J. Assoc. Comput. Mach., 8 (1961),  pp. 281-330.
-%       J.H. Wilkinson, Rounding Errors in Algebraic Processes, Notes on Applied
-%          Science No. 32, Her Majesty's Stationery Office, London, 1963.
-%       J.H. Wilkinson, The Algebraic Eigenvalue Problem, Oxford University
-%          Press, 1965.
-
-if n == 3
-   % Wilkinson (1961) p.323.
-   A = [ 1e-10   .9  -.4
-           0     .9  -.4
-           0      0  1e-10];
-   b = [   0      0    1]';
-
-elseif n == 4
-   % Wilkinson (1963) p.105.
-   A = [0.9143e-4  0          0          0
-        0.8762     0.7156e-4  0          0
-        0.7943     0.8143     0.9504e-4  0
-        0.8017     0.6123     0.7165     0.7123e-4];
-   b = [0.6524     0.3127     0.4186     0.7853]';
-
-elseif n == 5
-   % Wilkinson (1965), p.234.
-   A = hilb(6);
-   A = A(1:5, 2:6)*1.8144;
-
-elseif n == 21
-   % Taken from gallery.m.  Wilkinson (1965), p.308.
-   E = diag(ones(n-1,1),1);
-   m = (n-1)/2;
-   A = diag(abs(-m:m)) + E + E';
-
-else
-   error('Sorry, that value of N is not available.')
-end