Mercurial > octave
view scripts/sparse/pcg.m @ 14363:f3d52523cde1
Use Octave coding conventions in all m-file %!test blocks
* wavread.m, acosd.m, acot.m, acotd.m, acoth.m, acsc.m, acscd.m, acsch.m,
asec.m, asecd.m, asech.m, asind.m, atand.m, cosd.m, cot.m, cotd.m, coth.m,
csc.m, cscd.m, csch.m, sec.m, secd.m, sech.m, sind.m, tand.m, accumarray.m,
accumdim.m, bitcmp.m, bitget.m, bitset.m, blkdiag.m, cart2pol.m, cart2sph.m,
celldisp.m, chop.m, circshift.m, colon.m, common_size.m, cplxpair.m,
cumtrapz.m, curl.m, dblquad.m, deal.m, divergence.m, flipdim.m, fliplr.m,
flipud.m, genvarname.m, gradient.m, idivide.m, int2str.m, interp1.m,
interp1q.m, interp2.m, interp3.m, interpft.m, interpn.m, isa.m, isdir.m,
isequal.m, isequalwithequalnans.m, issquare.m, logspace.m, nargchk.m,
narginchk.m, nargoutchk.m, nextpow2.m, nthargout.m, num2str.m, pol2cart.m,
polyarea.m, postpad.m, prepad.m, profile.m, profshow.m, quadgk.m, quadv.m,
randi.m, rat.m, repmat.m, rot90.m, rotdim.m, shift.m, shiftdim.m, sph2cart.m,
structfun.m, trapz.m, triplequad.m, convhull.m, dsearch.m, dsearchn.m,
griddata3.m, griddatan.m, rectint.m, tsearchn.m, __makeinfo__.m, doc.m,
get_first_help_sentence.m, help.m, type.m, unimplemented.m, which.m, imread.m,
imwrite.m, dlmwrite.m, fileread.m, is_valid_file_id.m, strread.m, textread.m,
textscan.m, commutation_matrix.m, cond.m, condest.m, cross.m,
duplication_matrix.m, expm.m, housh.m, isdefinite.m, ishermitian.m,
issymmetric.m, logm.m, normest.m, null.m, onenormest.m, orth.m, planerot.m,
qzhess.m, rank.m, rref.m, trace.m, vech.m, ans.m, bincoeff.m, bug_report.m,
bzip2.m, comma.m, compare_versions.m, computer.m, edit.m, fileparts.m,
fullfile.m, getfield.m, gzip.m, info.m, inputname.m, isappdata.m, isdeployed.m,
ismac.m, ispc.m, isunix.m, list_primes.m, ls.m, mexext.m, namelengthmax.m,
news.m, orderfields.m, paren.m, recycle.m, rmappdata.m, semicolon.m,
setappdata.m, setfield.m, substruct.m, symvar.m, ver.m, version.m,
warning_ids.m, xor.m, fminbnd.m, fsolve.m, fzero.m, lsqnonneg.m, optimset.m,
pqpnonneg.m, sqp.m, matlabroot.m, __gnuplot_drawnow__.m,
__plt_get_axis_arg__.m, ancestor.m, cla.m, clf.m, close.m, colorbar.m,
colstyle.m, comet3.m, contourc.m, figure.m, gca.m, gcbf.m, gcbo.m, gcf.m,
ginput.m, graphics_toolkit.m, gtext.m, hggroup.m, hist.m, hold.m, isfigure.m,
ishghandle.m, ishold.m, isocolors.m, isonormals.m, isosurface.m, isprop.m,
legend.m, line.m, loglog.m, loglogerr.m, meshgrid.m, ndgrid.m, newplot.m,
orient.m, patch.m, plot3.m, plotyy.m, __print_parse_opts__.m, quiver3.m,
refreshdata.m, ribbon.m, semilogx.m, semilogxerr.m, semilogy.m, stem.m,
stem3.m, subplot.m, title.m, uigetfile.m, view.m, whitebg.m, compan.m, conv.m,
deconv.m, mkpp.m, mpoles.m, pchip.m, poly.m, polyaffine.m, polyder.m,
polyfit.m, polygcd.m, polyint.m, polyout.m, polyval.m, polyvalm.m, ppder.m,
ppint.m, ppjumps.m, ppval.m, residue.m, roots.m, spline.m, intersect.m,
ismember.m, powerset.m, setdiff.m, setxor.m, union.m, unique.m,
autoreg_matrix.m, bartlett.m, blackman.m, detrend.m, fftconv.m, fftfilt.m,
fftshift.m, freqz.m, hamming.m, hanning.m, ifftshift.m, sinc.m, sinetone.m,
sinewave.m, unwrap.m, bicg.m, bicgstab.m, gmres.m, gplot.m, nonzeros.m, pcg.m,
pcr.m, spaugment.m, spconvert.m, spdiags.m, speye.m, spfun.m, spones.m,
sprand.m, sprandsym.m, spstats.m, spy.m, svds.m, treelayout.m, bessel.m,
beta.m, betaln.m, factor.m, factorial.m, isprime.m, lcm.m, legendre.m,
nchoosek.m, nthroot.m, perms.m, pow2.m, primes.m, reallog.m, realpow.m,
realsqrt.m, hadamard.m, hankel.m, hilb.m, invhilb.m, magic.m, rosser.m,
vander.m, __finish__.m, center.m, cloglog.m, corr.m, cov.m, gls.m, histc.m,
iqr.m, kendall.m, kurtosis.m, logit.m, mahalanobis.m, mean.m, meansq.m,
median.m, mode.m, moment.m, ols.m, ppplot.m, prctile.m, probit.m, quantile.m,
range.m, ranks.m, run_count.m, runlength.m, skewness.m, spearman.m,
statistics.m, std.m, table.m, var.m, zscore.m, betacdf.m, betainv.m, betapdf.m,
betarnd.m, binocdf.m, binoinv.m, binopdf.m, binornd.m, cauchy_cdf.m,
cauchy_inv.m, cauchy_pdf.m, cauchy_rnd.m, chi2cdf.m, chi2inv.m, chi2pdf.m,
chi2rnd.m, discrete_cdf.m, discrete_inv.m, discrete_pdf.m, discrete_rnd.m,
empirical_cdf.m, empirical_inv.m, empirical_pdf.m, empirical_rnd.m, expcdf.m,
expinv.m, exppdf.m, exprnd.m, fcdf.m, finv.m, fpdf.m, frnd.m, gamcdf.m,
gaminv.m, gampdf.m, gamrnd.m, geocdf.m, geoinv.m, geopdf.m, geornd.m,
hygecdf.m, hygeinv.m, hygepdf.m, hygernd.m, kolmogorov_smirnov_cdf.m,
laplace_cdf.m, laplace_inv.m, laplace_pdf.m, laplace_rnd.m, logistic_cdf.m,
logistic_inv.m, logistic_pdf.m, logistic_rnd.m, logncdf.m, logninv.m,
lognpdf.m, lognrnd.m, nbincdf.m, nbininv.m, nbinpdf.m, nbinrnd.m, normcdf.m,
norminv.m, normpdf.m, normrnd.m, poisscdf.m, poissinv.m, poisspdf.m,
poissrnd.m, stdnormal_cdf.m, stdnormal_inv.m, stdnormal_pdf.m, stdnormal_rnd.m,
tcdf.m, tinv.m, tpdf.m, trnd.m, unidcdf.m, unidinv.m, unidpdf.m, unidrnd.m,
unifcdf.m, unifinv.m, unifpdf.m, unifrnd.m, wblcdf.m, wblinv.m, wblpdf.m,
wblrnd.m, kolmogorov_smirnov_test.m, kruskal_wallis_test.m, base2dec.m,
bin2dec.m, blanks.m, cstrcat.m, deblank.m, dec2base.m, dec2bin.m, dec2hex.m,
findstr.m, hex2dec.m, index.m, isletter.m, mat2str.m, rindex.m, str2num.m,
strcat.m, strjust.m, strmatch.m, strsplit.m, strtok.m, strtrim.m, strtrunc.m,
substr.m, validatestring.m, demo.m, example.m, fail.m, speed.m, addtodate.m,
asctime.m, clock.m, ctime.m, date.m, datenum.m, datetick.m, datevec.m,
eomday.m, etime.m, is_leap_year.m, now.m:
Use Octave coding conventions in all m-file %!test blocks
author | Rik <octave@nomad.inbox5.com> |
---|---|
date | Mon, 13 Feb 2012 07:29:44 -0800 |
parents | ce2b59a6d0e5 |
children | 5d3a684236b0 |
line wrap: on
line source
## Copyright (C) 2004-2012 Piotr Krzyzanowski ## ## This file is part of Octave. ## ## Octave is free software; you can redistribute it and/or modify it ## under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or (at ## your option) any later version. ## ## Octave is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Octave; see the file COPYING. If not, see ## <http://www.gnu.org/licenses/>. ## -*- texinfo -*- ## @deftypefn {Function File} {@var{x} =} pcg (@var{A}, @var{b}, @var{tol}, @var{maxit}, @var{m1}, @var{m2}, @var{x0}, @dots{}) ## @deftypefnx {Function File} {[@var{x}, @var{flag}, @var{relres}, @var{iter}, @var{resvec}, @var{eigest}] =} pcg (@dots{}) ## ## Solve the linear system of equations @code{@var{A} * @var{x} = @var{b}} ## by means of the Preconditioned Conjugate Gradient iterative ## method. The input arguments are ## ## @itemize ## @item ## @var{A} can be either a square (preferably sparse) matrix or a ## function handle, inline function or string containing the name ## of a function which computes @code{@var{A} * @var{x}}. In principle ## @var{A} should be symmetric and positive definite; if @code{pcg} ## finds @var{A} to not be positive definite, you will get a warning ## message and the @var{flag} output parameter will be set. ## ## @item ## @var{b} is the right hand side vector. ## ## @item ## @var{tol} is the required relative tolerance for the residual error, ## @code{@var{b} - @var{A} * @var{x}}. The iteration stops if ## @code{norm (@var{b} - @var{A} * @var{x}) <= ## @var{tol} * norm (@var{b} - @var{A} * @var{x0})}. ## If @var{tol} is empty or is omitted, the function sets ## @code{@var{tol} = 1e-6} by default. ## ## @item ## @var{maxit} is the maximum allowable number of iterations; if ## @code{[]} is supplied for @code{maxit}, or @code{pcg} has less ## arguments, a default value equal to 20 is used. ## ## @item ## @var{m} = @var{m1} * @var{m2} is the (left) preconditioning matrix, so that ## the iteration is (theoretically) equivalent to solving by @code{pcg} ## @code{@var{P} * ## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{A}}. ## Note that a proper choice of the preconditioner may dramatically ## improve the overall performance of the method. Instead of matrices ## @var{m1} and @var{m2}, the user may pass two functions which return ## the results of applying the inverse of @var{m1} and @var{m2} to ## a vector (usually this is the preferred way of using the preconditioner). ## If @code{[]} is supplied for @var{m1}, or @var{m1} is omitted, no ## preconditioning is applied. If @var{m2} is omitted, @var{m} = @var{m1} ## will be used as preconditioner. ## ## @item ## @var{x0} is the initial guess. If @var{x0} is empty or omitted, the ## function sets @var{x0} to a zero vector by default. ## @end itemize ## ## The arguments which follow @var{x0} are treated as parameters, and ## passed in a proper way to any of the functions (@var{A} or @var{m}) ## which are passed to @code{pcg}. See the examples below for further ## details. The output arguments are ## ## @itemize ## @item ## @var{x} is the computed approximation to the solution of ## @code{@var{A} * @var{x} = @var{b}}. ## ## @item ## @var{flag} reports on the convergence. @code{@var{flag} = 0} means ## the solution converged and the tolerance criterion given by @var{tol} ## is satisfied. @code{@var{flag} = 1} means that the @var{maxit} limit ## for the iteration count was reached. @code{@var{flag} = 3} reports that ## the (preconditioned) matrix was found not positive definite. ## ## @item ## @var{relres} is the ratio of the final residual to its initial value, ## measured in the Euclidean norm. ## ## @item ## @var{iter} is the actual number of iterations performed. ## ## @item ## @var{resvec} describes the convergence history of the method. ## @code{@var{resvec} (i,1)} is the Euclidean norm of the residual, and ## @code{@var{resvec} (i,2)} is the preconditioned residual norm, ## after the (@var{i}-1)-th iteration, @code{@var{i} = ## 1, 2, @dots{}, @var{iter}+1}. The preconditioned residual norm ## is defined as ## @code{norm (@var{r}) ^ 2 = @var{r}' * (@var{m} \ @var{r})} where ## @code{@var{r} = @var{b} - @var{A} * @var{x}}, see also the ## description of @var{m}. If @var{eigest} is not required, only ## @code{@var{resvec} (:,1)} is returned. ## ## @item ## @var{eigest} returns the estimate for the smallest @code{@var{eigest} ## (1)} and largest @code{@var{eigest} (2)} eigenvalues of the ## preconditioned matrix @code{@var{P} = @var{m} \ @var{A}}. In ## particular, if no preconditioning is used, the estimates for the ## extreme eigenvalues of @var{A} are returned. @code{@var{eigest} (1)} ## is an overestimate and @code{@var{eigest} (2)} is an underestimate, ## so that @code{@var{eigest} (2) / @var{eigest} (1)} is a lower bound ## for @code{cond (@var{P}, 2)}, which nevertheless in the limit should ## theoretically be equal to the actual value of the condition number. ## The method which computes @var{eigest} works only for symmetric positive ## definite @var{A} and @var{m}, and the user is responsible for ## verifying this assumption. ## @end itemize ## ## Let us consider a trivial problem with a diagonal matrix (we exploit the ## sparsity of A) ## ## @example ## @group ## n = 10; ## A = diag (sparse (1:n)); ## b = rand (n, 1); ## [l, u, p, q] = luinc (A, 1.e-3); ## @end group ## @end example ## ## @sc{Example 1:} Simplest use of @code{pcg} ## ## @example ## x = pcg (A,b) ## @end example ## ## @sc{Example 2:} @code{pcg} with a function which computes ## @code{@var{A} * @var{x}} ## ## @example ## @group ## function y = apply_a (x) ## y = [1:N]' .* x; ## endfunction ## ## x = pcg ("apply_a", b) ## @end group ## @end example ## ## @sc{Example 3:} @code{pcg} with a preconditioner: @var{l} * @var{u} ## ## @example ## x = pcg (A, b, 1.e-6, 500, l*u) ## @end example ## ## @sc{Example 4:} @code{pcg} with a preconditioner: @var{l} * @var{u}. ## Faster than @sc{Example 3} since lower and upper triangular matrices ## are easier to invert ## ## @example ## x = pcg (A, b, 1.e-6, 500, l, u) ## @end example ## ## @sc{Example 5:} Preconditioned iteration, with full diagnostics. The ## preconditioner (quite strange, because even the original matrix ## @var{A} is trivial) is defined as a function ## ## @example ## @group ## function y = apply_m (x) ## k = floor (length (x) - 2); ## y = x; ## y(1:k) = x(1:k) ./ [1:k]'; ## endfunction ## ## [x, flag, relres, iter, resvec, eigest] = ... ## pcg (A, b, [], [], "apply_m"); ## semilogy (1:iter+1, resvec); ## @end group ## @end example ## ## @sc{Example 6:} Finally, a preconditioner which depends on a ## parameter @var{k}. ## ## @example ## @group ## function y = apply_M (x, varargin) ## K = varargin@{1@}; ## y = x; ## y(1:K) = x(1:K) ./ [1:K]'; ## endfunction ## ## [x, flag, relres, iter, resvec, eigest] = ... ## pcg (A, b, [], [], "apply_m", [], [], 3) ## @end group ## @end example ## ## References: ## ## @enumerate ## @item ## C.T. Kelley, @cite{Iterative Methods for Linear and Nonlinear Equations}, ## SIAM, 1995. (the base PCG algorithm) ## ## @item ## Y. Saad, @cite{Iterative Methods for Sparse Linear Systems}, PWS 1996. ## (condition number estimate from PCG) Revised version of this book is ## available online at @url{http://www-users.cs.umn.edu/~saad/books.html} ## @end enumerate ## ## @seealso{sparse, pcr} ## @end deftypefn ## Author: Piotr Krzyzanowski <piotr.krzyzanowski@mimuw.edu.pl> ## Modified by: Vittoria Rezzonico <vittoria.rezzonico@epfl.ch> ## - Add the ability to provide the pre-conditioner as two separate matrices function [x, flag, relres, iter, resvec, eigest] = pcg (A, b, tol, maxit, m1, m2, x0, varargin) ## M = M1*M2 if (nargin < 7 || isempty (x0)) x = zeros (size (b)); else x = x0; endif if (nargin < 5 || isempty (m1)) exist_m1 = 0; else exist_m1 = 1; endif if (nargin < 6 || isempty (m2)) exist_m2 = 0; else exist_m2 = 1; endif if (nargin < 4 || isempty (maxit)) maxit = min (size (b, 1), 20); endif maxit += 2; if (nargin < 3 || isempty (tol)) tol = 1e-6; endif preconditioned_residual_out = false; if (nargout > 5) T = zeros (maxit, maxit); preconditioned_residual_out = true; endif ## Assume A is positive definite. matrix_positive_definite = true; p = zeros (size (b)); oldtau = 1; if (isnumeric (A)) ## A is a matrix. r = b - A*x; else ## A should be a function. r = b - feval (A, x, varargin{:}); endif resvec(1,1) = norm (r); alpha = 1; iter = 2; while (resvec (iter-1,1) > tol * resvec (1,1) && iter < maxit) if (exist_m1) if(isnumeric (m1)) y = m1 \ r; else y = feval (m1, r, varargin{:}); endif else y = r; endif if (exist_m2) if (isnumeric (m2)) z = m2 \ y; else z = feval (m2, y, varargin{:}); endif else z = y; endif tau = z' * r; resvec (iter-1,2) = sqrt (tau); beta = tau / oldtau; oldtau = tau; p = z + beta * p; if (isnumeric (A)) ## A is a matrix. w = A * p; else ## A should be a function. w = feval (A, p, varargin{:}); endif ## Needed only for eigest. oldalpha = alpha; alpha = tau / (p'*w); if (alpha <= 0.0) ## Negative matrix. matrix_positive_definite = false; endif x += alpha * p; r -= alpha * w; if (nargout > 5 && iter > 2) T(iter-1:iter, iter-1:iter) = T(iter-1:iter, iter-1:iter) + ... [1 sqrt(beta); sqrt(beta) beta]./oldalpha; ## EVS = eig(T(2:iter-1,2:iter-1)); ## fprintf(stderr,"PCG condest: %g (iteration: %d)\n", max(EVS)/min(EVS),iter); endif resvec (iter,1) = norm (r); iter++; endwhile if (nargout > 5) if (matrix_positive_definite) if (iter > 3) T = T(2:iter-2,2:iter-2); l = eig (T); eigest = [min(l), max(l)]; ## fprintf (stderr, "pcg condest: %g\n", eigest(2)/eigest(1)); else eigest = [NaN, NaN]; warning ("pcg: eigenvalue estimate failed: iteration converged too fast"); endif else eigest = [NaN, NaN]; endif ## Apply the preconditioner once more and finish with the precond ## residual. if (exist_m1) if (isnumeric (m1)) y = m1 \ r; else y = feval (m1, r, varargin{:}); endif else y = r; endif if (exist_m2) if (isnumeric (m2)) z = m2 \ y; else z = feval (m2, y, varargin{:}); endif else z = y; endif resvec (iter-1,2) = sqrt (r' * z); else resvec = resvec(:,1); endif flag = 0; relres = resvec (iter-1,1) ./ resvec(1,1); iter -= 2; if (iter >= maxit - 2) flag = 1; if (nargout < 2) warning ("pcg: maximum number of iterations (%d) reached\n", iter); warning ("the initial residual norm was reduced %g times.\n", ... 1.0 / relres); endif elseif (nargout < 2) fprintf (stderr, "pcg: converged in %d iterations. ", iter); fprintf (stderr, "the initial residual norm was reduced %g times.\n",... 1.0/relres); endif if (! matrix_positive_definite) flag = 3; if (nargout < 2) warning ("pcg: matrix not positive definite?\n"); endif endif endfunction %!demo %! # Simplest usage of pcg (see also 'help pcg') %! %! N = 10; %! A = diag ([1:N]); b = rand (N, 1); %! y = A \ b; # y is the true solution %! x = pcg (A, b); %! printf ("The solution relative error is %g\n", norm (x - y) / norm (y)); %! %! # You shouldn't be afraid if pcg issues some warning messages in this %! # example: watch out in the second example, why it takes N iterations %! # of pcg to converge to (a very accurate, by the way) solution %!demo %! # Full output from pcg, except for the eigenvalue estimates %! # We use this output to plot the convergence history %! %! N = 10; %! A = diag ([1:N]); b = rand (N, 1); %! X = A \ b; # X is the true solution %! [x, flag, relres, iter, resvec] = pcg (A, b); %! printf ("The solution relative error is %g\n", norm (x - X) / norm (X)); %! title ("Convergence history"); %! semilogy ([0:iter], resvec / resvec(1), "o-g"); %! xlabel ("Iteration"); ylabel ("log(||b-Ax||/||b||)"); %! legend ("relative residual"); %!demo %! # Full output from pcg, including the eigenvalue estimates %! # Hilbert matrix is extremely ill-conditioned, so pcg WILL have problems %! %! N = 10; %! A = hilb (N); b = rand (N, 1); %! X = A \ b; # X is the true solution %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], 200); %! printf ("The solution relative error is %g\n", norm (x - X) / norm (X)); %! printf ("Condition number estimate is %g\n", eigest(2) / eigest(1)); %! printf ("Actual condition number is %g\n", cond (A)); %! title ("Convergence history"); %! semilogy ([0:iter], resvec, ["o-g";"+-r"]); %! xlabel ("Iteration"); ylabel ("log(||b-Ax||)"); %! legend ("absolute residual", "absolute preconditioned residual"); %!demo %! # Full output from pcg, including the eigenvalue estimates %! # We use the 1-D Laplacian matrix for A, and cond(A) = O(N^2) %! # and that's the reason we need some preconditioner; here we take %! # a very simple and not powerful Jacobi preconditioner, %! # which is the diagonal of A %! %! N = 100; %! A = zeros (N, N); %! for i = 1 : N - 1 # form 1-D Laplacian matrix %! A(i:i+1, i:i+1) = [2 -1; -1 2]; %! endfor %! b = rand (N, 1); %! X = A \ b; # X is the true solution %! maxit = 80; %! printf ("System condition number is %g\n", cond (A)); %! # No preconditioner: the convergence is very slow! %! %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit); %! printf ("System condition number estimate is %g\n", eigest(2) / eigest(1)); %! title ("Convergence history"); %! semilogy ([0:iter], resvec(:,1), "o-g"); %! xlabel ("Iteration"); ylabel ("log(||b-Ax||)"); %! legend ("NO preconditioning: absolute residual"); %! %! pause (1); %! # Test Jacobi preconditioner: it will not help much!!! %! %! M = diag (diag (A)); # Jacobi preconditioner %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit, M); %! printf ("JACOBI preconditioned system condition number estimate is %g\n", eigest(2) / eigest(1)); %! hold on; %! semilogy ([0:iter], resvec(:,1), "o-r"); %! legend ("NO preconditioning: absolute residual", ... %! "JACOBI preconditioner: absolute residual"); %! %! pause (1); %! # Test nonoverlapping block Jacobi preconditioner: it will help much! %! %! M = zeros (N, N); k = 4; %! for i = 1 : k : N # form 1-D Laplacian matrix %! M(i:i+k-1, i:i+k-1) = A(i:i+k-1, i:i+k-1); %! endfor %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit, M); %! printf ("BLOCK JACOBI preconditioned system condition number estimate is %g\n", eigest(2) / eigest(1)); %! semilogy ([0:iter], resvec(:,1), "o-b"); %! legend ("NO preconditioning: absolute residual", ... %! "JACOBI preconditioner: absolute residual", ... %! "BLOCK JACOBI preconditioner: absolute residual"); %! hold off; %!test %! # solve small diagonal system %! %! N = 10; %! A = diag ([1:N]); b = rand (N, 1); %! X = A \ b; # X is the true solution %! [x, flag] = pcg (A, b, [], N+1); %! assert (norm (x - X) / norm (X), 0, 1e-10); %! assert (flag, 0); %!test %! # solve small indefinite diagonal system %! # despite A is indefinite, the iteration continues and converges %! # indefiniteness of A is detected %! %! N = 10; %! A = diag([1:N] .* (-ones(1, N) .^ 2)); b = rand (N, 1); %! X = A \ b; # X is the true solution %! [x, flag] = pcg (A, b, [], N+1); %! assert (norm (x - X) / norm (X), 0, 1e-10); %! assert (flag, 3); %!test %! # solve tridiagonal system, do not converge in default 20 iterations %! %! N = 100; %! A = zeros (N, N); %! for i = 1 : N - 1 # form 1-D Laplacian matrix %! A(i:i+1, i:i+1) = [2 -1; -1 2]; %! endfor %! b = ones (N, 1); %! X = A \ b; # X is the true solution %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, 1e-12); %! assert (flag); %! assert (relres > 1.0); %! assert (iter, 20); # should perform max allowable default number of iterations %!test %! # solve tridiagonal system with 'perfect' preconditioner %! # which converges in one iteration, so the eigest does not %! # work and issues a warning %! %! N = 100; %! A = zeros (N, N); %! for i = 1 : N - 1 # form 1-D Laplacian matrix %! A (i:i+1, i:i+1) = [2 -1; -1 2]; %! endfor %! b = ones (N, 1); %! X = A \ b; # X is the true solution %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], [], A, [], b); %! assert (norm (x - X) / norm (X), 0, 1e-6); %! assert (flag, 0); %! assert (iter, 1); # should converge in one iteration %! assert (isnan (eigest), isnan ([NaN, NaN]));