Mercurial > octave
view liboctave/numeric/sparse-qr.cc @ 31605:e88a07dec498 stable
maint: Use macros to begin/end C++ namespaces.
* oct-conf-post-public.in.h: Define two macros (OCTAVE_BEGIN_NAMESPACE,
OCTAVE_END_NAMESPACE) that can be used to start/end a namespace.
* mk-opts.pl, build-env.h, build-env.in.cc, __betainc__.cc, __contourc__.cc,
__dsearchn__.cc, __eigs__.cc, __expint__.cc, __ftp__.cc, __gammainc__.cc,
__ichol__.cc, __ilu__.cc, __isprimelarge__.cc, __lin_interpn__.cc,
__magick_read__.cc, __pchip_deriv__.cc, __qp__.cc, amd.cc, auto-shlib.cc,
auto-shlib.h, balance.cc, base-text-renderer.cc, base-text-renderer.h,
besselj.cc, bitfcns.cc, bsxfun.cc, c-file-ptr-stream.cc, c-file-ptr-stream.h,
call-stack.cc, call-stack.h, ccolamd.cc, cellfun.cc, chol.cc, colamd.cc,
colloc.cc, conv2.cc, daspk.cc, dasrt.cc, dassl.cc, data.cc, data.h, debug.cc,
defaults.cc, defaults.h, defun-int.h, defun.cc, det.cc, dirfns.cc, display.cc,
display.h, dlmread.cc, dmperm.cc, dot.cc, dynamic-ld.cc, dynamic-ld.h, eig.cc,
ellipj.cc, environment.cc, environment.h, error.cc, error.h, errwarn.h,
event-manager.cc, event-manager.h, event-queue.cc, event-queue.h, fcn-info.cc,
fcn-info.h, fft.cc, fft2.cc, fftn.cc, file-io.cc, filter.cc, find.cc,
ft-text-renderer.cc, ft-text-renderer.h, gcd.cc, getgrent.cc, getpwent.cc,
getrusage.cc, givens.cc, gl-render.cc, gl-render.h, gl2ps-print.cc,
gl2ps-print.h, graphics-toolkit.cc, graphics-toolkit.h, graphics.cc,
graphics.in.h, gsvd.cc, gtk-manager.cc, gtk-manager.h, hash.cc, help.cc,
help.h, hess.cc, hex2num.cc, hook-fcn.cc, hook-fcn.h, input.cc, input.h,
interpreter-private.cc, interpreter-private.h, interpreter.cc, interpreter.h,
inv.cc, jsondecode.cc, jsonencode.cc, kron.cc, latex-text-renderer.cc,
latex-text-renderer.h, load-path.cc, load-path.h, load-save.cc, load-save.h,
lookup.cc, ls-ascii-helper.cc, ls-ascii-helper.h, ls-oct-text.cc, ls-utils.cc,
ls-utils.h, lsode.cc, lu.cc, mappers.cc, matrix_type.cc, max.cc, mex-private.h,
mex.cc, mgorth.cc, nproc.cc, oct-fstrm.cc, oct-fstrm.h, oct-hdf5-types.cc,
oct-hdf5-types.h, oct-hist.cc, oct-hist.h, oct-iostrm.cc, oct-iostrm.h,
oct-opengl.h, oct-prcstrm.cc, oct-prcstrm.h, oct-procbuf.cc, oct-procbuf.h,
oct-process.cc, oct-process.h, oct-stdstrm.h, oct-stream.cc, oct-stream.h,
oct-strstrm.cc, oct-strstrm.h, oct-tex-lexer.in.ll, oct-tex-parser.yy,
ordqz.cc, ordschur.cc, pager.cc, pager.h, pinv.cc, pow2.cc, pr-flt-fmt.cc,
pr-output.cc, procstream.cc, procstream.h, psi.cc, qr.cc, quad.cc, quadcc.cc,
qz.cc, rand.cc, rcond.cc, regexp.cc, schur.cc, settings.cc, settings.h,
sighandlers.cc, sighandlers.h, sparse-xdiv.cc, sparse-xdiv.h, sparse-xpow.cc,
sparse-xpow.h, sparse.cc, spparms.cc, sqrtm.cc, stack-frame.cc, stack-frame.h,
stream-euler.cc, strfind.cc, strfns.cc, sub2ind.cc, svd.cc, sylvester.cc,
symbfact.cc, syminfo.cc, syminfo.h, symrcm.cc, symrec.cc, symrec.h,
symscope.cc, symscope.h, symtab.cc, symtab.h, syscalls.cc, sysdep.cc, sysdep.h,
text-engine.cc, text-engine.h, text-renderer.cc, text-renderer.h, time.cc,
toplev.cc, tril.cc, tsearch.cc, typecast.cc, url-handle-manager.cc,
url-handle-manager.h, urlwrite.cc, utils.cc, utils.h, variables.cc,
variables.h, xdiv.cc, xdiv.h, xnorm.cc, xnorm.h, xpow.cc, xpow.h,
__delaunayn__.cc, __fltk_uigetfile__.cc, __glpk__.cc, __init_fltk__.cc,
__init_gnuplot__.cc, __ode15__.cc, __voronoi__.cc, audiodevinfo.cc,
audioread.cc, convhulln.cc, fftw.cc, gzip.cc, mk-build-env-features.sh,
mk-builtins.pl, cdef-class.cc, cdef-class.h, cdef-fwd.h, cdef-manager.cc,
cdef-manager.h, cdef-method.cc, cdef-method.h, cdef-object.cc, cdef-object.h,
cdef-package.cc, cdef-package.h, cdef-property.cc, cdef-property.h,
cdef-utils.cc, cdef-utils.h, ov-base.cc, ov-base.h, ov-bool-mat.cc,
ov-builtin.h, ov-cell.cc, ov-class.cc, ov-class.h, ov-classdef.cc,
ov-classdef.h, ov-complex.cc, ov-fcn-handle.cc, ov-fcn-handle.h, ov-fcn.h,
ov-java.cc, ov-java.h, ov-mex-fcn.h, ov-null-mat.cc, ov-oncleanup.cc,
ov-struct.cc, ov-typeinfo.cc, ov-typeinfo.h, ov-usr-fcn.cc, ov-usr-fcn.h,
ov.cc, ov.h, octave.cc, octave.h, mk-ops.sh, op-b-b.cc, op-b-bm.cc,
op-b-sbm.cc, op-bm-b.cc, op-bm-bm.cc, op-bm-sbm.cc, op-cdm-cdm.cc, op-cell.cc,
op-chm.cc, op-class.cc, op-cm-cm.cc, op-cm-cs.cc, op-cm-m.cc, op-cm-s.cc,
op-cm-scm.cc, op-cm-sm.cc, op-cs-cm.cc, op-cs-cs.cc, op-cs-m.cc, op-cs-s.cc,
op-cs-scm.cc, op-cs-sm.cc, op-dm-dm.cc, op-dm-scm.cc, op-dm-sm.cc,
op-dm-template.cc, op-dms-template.cc, op-fcdm-fcdm.cc, op-fcm-fcm.cc,
op-fcm-fcs.cc, op-fcm-fm.cc, op-fcm-fs.cc, op-fcn.cc, op-fcs-fcm.cc,
op-fcs-fcs.cc, op-fcs-fm.cc, op-fcs-fs.cc, op-fdm-fdm.cc, op-fm-fcm.cc,
op-fm-fcs.cc, op-fm-fm.cc, op-fm-fs.cc, op-fs-fcm.cc, op-fs-fcs.cc,
op-fs-fm.cc, op-fs-fs.cc, op-i16-i16.cc, op-i32-i32.cc, op-i64-i64.cc,
op-i8-i8.cc, op-int-concat.cc, op-m-cm.cc, op-m-cs.cc, op-m-m.cc, op-m-s.cc,
op-m-scm.cc, op-m-sm.cc, op-mi.cc, op-pm-pm.cc, op-pm-scm.cc, op-pm-sm.cc,
op-pm-template.cc, op-range.cc, op-s-cm.cc, op-s-cs.cc, op-s-m.cc, op-s-s.cc,
op-s-scm.cc, op-s-sm.cc, op-sbm-b.cc, op-sbm-bm.cc, op-sbm-sbm.cc,
op-scm-cm.cc, op-scm-cs.cc, op-scm-m.cc, op-scm-s.cc, op-scm-scm.cc,
op-scm-sm.cc, op-sm-cm.cc, op-sm-cs.cc, op-sm-m.cc, op-sm-s.cc, op-sm-scm.cc,
op-sm-sm.cc, op-str-m.cc, op-str-s.cc, op-str-str.cc, op-struct.cc,
op-ui16-ui16.cc, op-ui32-ui32.cc, op-ui64-ui64.cc, op-ui8-ui8.cc, ops.h,
anon-fcn-validator.cc, anon-fcn-validator.h, bp-table.cc, bp-table.h,
comment-list.cc, comment-list.h, filepos.h, lex.h, lex.ll, oct-lvalue.cc,
oct-lvalue.h, oct-parse.yy, parse.h, profiler.cc, profiler.h,
pt-anon-scopes.cc, pt-anon-scopes.h, pt-arg-list.cc, pt-arg-list.h,
pt-args-block.cc, pt-args-block.h, pt-array-list.cc, pt-array-list.h,
pt-assign.cc, pt-assign.h, pt-binop.cc, pt-binop.h, pt-bp.cc, pt-bp.h,
pt-cbinop.cc, pt-cbinop.h, pt-cell.cc, pt-cell.h, pt-check.cc, pt-check.h,
pt-classdef.cc, pt-classdef.h, pt-cmd.h, pt-colon.cc, pt-colon.h, pt-const.cc,
pt-const.h, pt-decl.cc, pt-decl.h, pt-eval.cc, pt-eval.h, pt-except.cc,
pt-except.h, pt-exp.cc, pt-exp.h, pt-fcn-handle.cc, pt-fcn-handle.h, pt-id.cc,
pt-id.h, pt-idx.cc, pt-idx.h, pt-jump.h, pt-loop.cc, pt-loop.h, pt-mat.cc,
pt-mat.h, pt-misc.cc, pt-misc.h, pt-pr-code.cc, pt-pr-code.h, pt-select.cc,
pt-select.h, pt-spmd.cc, pt-spmd.h, pt-stmt.cc, pt-stmt.h, pt-tm-const.cc,
pt-tm-const.h, pt-unop.cc, pt-unop.h, pt-vm-eval.cc, pt-walk.cc, pt-walk.h,
pt.cc, pt.h, token.cc, token.h, Range.cc, Range.h, idx-vector.cc, idx-vector.h,
range-fwd.h, CollocWt.cc, CollocWt.h, aepbalance.cc, aepbalance.h, chol.cc,
chol.h, gepbalance.cc, gepbalance.h, gsvd.cc, gsvd.h, hess.cc, hess.h,
lo-mappers.cc, lo-mappers.h, lo-specfun.cc, lo-specfun.h, lu.cc, lu.h,
oct-convn.cc, oct-convn.h, oct-fftw.cc, oct-fftw.h, oct-norm.cc, oct-norm.h,
oct-rand.cc, oct-rand.h, oct-spparms.cc, oct-spparms.h, qr.cc, qr.h, qrp.cc,
qrp.h, randgamma.cc, randgamma.h, randmtzig.cc, randmtzig.h, randpoisson.cc,
randpoisson.h, schur.cc, schur.h, sparse-chol.cc, sparse-chol.h, sparse-lu.cc,
sparse-lu.h, sparse-qr.cc, sparse-qr.h, svd.cc, svd.h, child-list.cc,
child-list.h, dir-ops.cc, dir-ops.h, file-ops.cc, file-ops.h, file-stat.cc,
file-stat.h, lo-sysdep.cc, lo-sysdep.h, lo-sysinfo.cc, lo-sysinfo.h,
mach-info.cc, mach-info.h, oct-env.cc, oct-env.h, oct-group.cc, oct-group.h,
oct-password.cc, oct-password.h, oct-syscalls.cc, oct-syscalls.h, oct-time.cc,
oct-time.h, oct-uname.cc, oct-uname.h, action-container.cc, action-container.h,
base-list.h, cmd-edit.cc, cmd-edit.h, cmd-hist.cc, cmd-hist.h, f77-fcn.h,
file-info.cc, file-info.h, lo-array-errwarn.cc, lo-array-errwarn.h, lo-hash.cc,
lo-hash.h, lo-ieee.h, lo-regexp.cc, lo-regexp.h, lo-utils.cc, lo-utils.h,
oct-base64.cc, oct-base64.h, oct-glob.cc, oct-glob.h, oct-inttypes.h,
oct-mutex.cc, oct-mutex.h, oct-refcount.h, oct-shlib.cc, oct-shlib.h,
oct-sparse.cc, oct-sparse.h, oct-string.h, octave-preserve-stream-state.h,
pathsearch.cc, pathsearch.h, quit.cc, quit.h, unwind-prot.cc, unwind-prot.h,
url-transfer.cc, url-transfer.h : Use new macros to begin/end C++ namespaces.
author | Rik <rik@octave.org> |
---|---|
date | Thu, 01 Dec 2022 14:23:45 -0800 |
parents | fa4bb329a51a |
children | aac27ad79be6 |
line wrap: on
line source
//////////////////////////////////////////////////////////////////////// // // Copyright (C) 2005-2022 The Octave Project Developers // // See the file COPYRIGHT.md in the top-level directory of this // distribution or <https://octave.org/copyright/>. // // This file is part of Octave. // // Octave is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Octave is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Octave; see the file COPYING. If not, see // <https://www.gnu.org/licenses/>. // //////////////////////////////////////////////////////////////////////// #if defined (HAVE_CONFIG_H) # include "config.h" #endif #include "CMatrix.h" #include "CSparse.h" #include "MArray.h" #include "dColVector.h" #include "dMatrix.h" #include "dSparse.h" #include "lo-error.h" #include "oct-locbuf.h" #include "oct-sparse.h" #include "quit.h" #include "sparse-qr.h" OCTAVE_BEGIN_NAMESPACE(octave) OCTAVE_BEGIN_NAMESPACE(math) #if defined (HAVE_CXSPARSE) template <typename SPARSE_T> class cxsparse_types { }; template <> class cxsparse_types<SparseMatrix> { public: typedef CXSPARSE_DNAME (s) symbolic_type; typedef CXSPARSE_DNAME (n) numeric_type; }; template <> class cxsparse_types<SparseComplexMatrix> { public: typedef CXSPARSE_ZNAME (s) symbolic_type; typedef CXSPARSE_ZNAME (n) numeric_type; }; #endif template <typename SPARSE_T> class sparse_qr<SPARSE_T>::sparse_qr_rep { public: sparse_qr_rep (const SPARSE_T& a, int order); // No copying! sparse_qr_rep (const sparse_qr_rep&) = delete; sparse_qr_rep& operator = (const sparse_qr_rep&) = delete; ~sparse_qr_rep (void); bool ok (void) const { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) return (m_H && m_Htau && m_HPinv && m_R && m_E); #elif defined (HAVE_CXSPARSE) return (N && S); #else return false; #endif } SPARSE_T V (void) const; ColumnVector Pinv (void) const; ColumnVector P (void) const; ColumnVector E (void) const; SPARSE_T R (bool econ) const; typename SPARSE_T::dense_matrix_type C (const typename SPARSE_T::dense_matrix_type& b, bool econ = false); typename SPARSE_T::dense_matrix_type Q (bool econ = false); octave_idx_type nrows; octave_idx_type ncols; #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) template <typename RHS_T, typename RET_T> RET_T solve (const RHS_T& b, octave_idx_type& info) const; #endif #if defined (HAVE_CXSPARSE) typename cxsparse_types<SPARSE_T>::symbolic_type *S; typename cxsparse_types<SPARSE_T>::numeric_type *N; #endif template <typename RHS_T, typename RET_T> RET_T tall_solve (const RHS_T& b, octave_idx_type& info); template <typename RHS_T, typename RET_T> RET_T wide_solve (const RHS_T& b, octave_idx_type& info) const; #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) private: cholmod_common m_cc; cholmod_sparse *m_R; // R factor // Column permutation for A. Fill-reducing ordering. SuiteSparse_long *m_E; cholmod_sparse *m_H; // Householder vectors cholmod_dense *m_Htau; // beta scalars SuiteSparse_long *m_HPinv; #endif }; template <typename SPARSE_T> ColumnVector sparse_qr<SPARSE_T>::sparse_qr_rep::Pinv (void) const { #if defined (HAVE_CXSPARSE) ColumnVector ret (N->L->m); for (octave_idx_type i = 0; i < N->L->m; i++) ret.xelem (i) = S->pinv[i]; return ret; #else return ColumnVector (); #endif } template <typename SPARSE_T> ColumnVector sparse_qr<SPARSE_T>::sparse_qr_rep::P (void) const { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) ColumnVector ret (nrows); // FIXME: Is ret.xelem (m_HPinv[i]) = i + 1 correct? for (octave_idx_type i = 0; i < nrows; i++) ret.xelem (from_suitesparse_long (m_HPinv[i])) = i + 1; return ret; #elif defined (HAVE_CXSPARSE) ColumnVector ret (N->L->m); for (octave_idx_type i = 0; i < N->L->m; i++) ret.xelem (S->pinv[i]) = i; return ret; #else return ColumnVector (); #endif } template <typename SPARSE_T> ColumnVector sparse_qr<SPARSE_T>::sparse_qr_rep::E (void) const { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) ColumnVector ret (ncols); if (m_E) for (octave_idx_type i = 0; i < ncols; i++) ret(i) = from_suitesparse_long (m_E[i]) + 1; else for (octave_idx_type i = 0; i < ncols; i++) ret(i) = i + 1; return ret; #else return ColumnVector (); #endif } #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) // Convert real sparse octave matrix to real sparse cholmod matrix. // Returns a "shallow" copy of a. static cholmod_sparse ros2rcs (const SparseMatrix& a) { cholmod_sparse A; octave_idx_type ncols = a.cols (); octave_idx_type nnz = a.nnz (); A.ncol = ncols; A.nrow = a.rows (); A.itype = CHOLMOD_LONG; A.nzmax = nnz; A.sorted = 0; A.packed = 1; A.stype = 0; A.xtype = CHOLMOD_REAL; A.dtype = CHOLMOD_DOUBLE; A.nz = nullptr; A.z = nullptr; if (sizeof (octave_idx_type) == sizeof (SuiteSparse_long)) { A.p = reinterpret_cast<SuiteSparse_long *> (a.cidx ()); A.i = reinterpret_cast<SuiteSparse_long *> (a.ridx ()); } else { SuiteSparse_long *A_p; A_p = new SuiteSparse_long[ncols+1]; for (octave_idx_type i = 0; i < ncols+1; i++) A_p[i] = a.cidx (i); A.p = A_p; SuiteSparse_long *A_i; A_i = new SuiteSparse_long[nnz]; for (octave_idx_type i = 0; i < nnz; i++) A_i[i] = a.ridx (i); A.i = A_i; } A.x = const_cast<double *> (a.data ()); return A; } // Convert complex sparse octave matrix to complex sparse cholmod matrix. // Returns a "shallow" copy of a. static cholmod_sparse cos2ccs (const SparseComplexMatrix& a) { cholmod_sparse A; octave_idx_type ncols = a.cols (); octave_idx_type nnz = a.nnz (); A.ncol = ncols; A.nrow = a.rows (); A.itype = CHOLMOD_LONG; A.nzmax = nnz; A.sorted = 0; A.packed = 1; A.stype = 0; A.xtype = CHOLMOD_COMPLEX; A.dtype = CHOLMOD_DOUBLE; A.nz = nullptr; A.z = nullptr; if (sizeof (octave_idx_type) == sizeof (SuiteSparse_long)) { A.p = reinterpret_cast<SuiteSparse_long *> (a.cidx ()); A.i = reinterpret_cast<SuiteSparse_long *> (a.ridx ()); } else { SuiteSparse_long *A_p; A_p = new SuiteSparse_long[ncols+1]; for (octave_idx_type i = 0; i < ncols+1; i++) A_p[i] = a.cidx (i); A.p = A_p; SuiteSparse_long *A_i; A_i = new SuiteSparse_long[nnz]; for (octave_idx_type i = 0; i < nnz; i++) A_i[i] = a.ridx (i); A.i = A_i; } A.x = const_cast<Complex *> (reinterpret_cast<const Complex *> (a.data ())); return A; } // Convert real dense octave matrix to complex dense cholmod matrix. // Returns a "deep" copy of a. static cholmod_dense * rod2ccd (const MArray<double>& a, cholmod_common *cc1) { cholmod_dense *A = cholmod_l_allocate_dense (a.rows (), a.cols (), a.rows(), CHOLMOD_COMPLEX, cc1); const double *a_x = a.data (); Complex *A_x = reinterpret_cast<Complex *> (A->x); for (octave_idx_type j = 0; j < a.cols() * a.rows() ; j++) A_x[j] = Complex (a_x[j], 0.0); return A; } // Convert real dense octave matrix to real dense cholmod matrix. // Returns a "shallow" copy of a. static cholmod_dense rod2rcd (const MArray<double>& a) { cholmod_dense A; A.ncol = a.cols (); A.nrow = a.rows (); A.nzmax = a.cols () * a.rows (); A.xtype = CHOLMOD_REAL; A.dtype = CHOLMOD_DOUBLE; A.z = nullptr; A.d = a.rows (); A.x = const_cast<double *> (a.data ()); return A; } // Convert complex dense octave matrix to complex dense cholmod matrix. // Returns a "shallow" copy of a. static cholmod_dense cod2ccd (const ComplexMatrix& a) { cholmod_dense A; A.ncol = a.cols (); A.nrow = a.rows (); A.nzmax = a.cols () * a.rows (); A.xtype = CHOLMOD_COMPLEX; A.dtype = CHOLMOD_DOUBLE; A.z = nullptr; A.d = a.rows (); A.x = const_cast<Complex *> (reinterpret_cast<const Complex *> (a.data ())); return A; } // Convert real sparse cholmod matrix to real sparse octave matrix. // Returns a "shallow" copy of y. static SparseMatrix rcs2ros (const cholmod_sparse* y) { octave_idx_type nrow = from_size_t (y->nrow); octave_idx_type ncol = from_size_t (y->ncol); octave_idx_type nz = from_size_t (y->nzmax); SparseMatrix ret (nrow, ncol, nz); SuiteSparse_long *y_p = reinterpret_cast<SuiteSparse_long *> (y->p); for (octave_idx_type j = 0; j < ncol + 1; j++) ret.xcidx (j) = from_suitesparse_long (y_p[j]); SuiteSparse_long *y_i = reinterpret_cast<SuiteSparse_long *> (y->i); double *y_x = reinterpret_cast<double *> (y->x); for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = from_suitesparse_long (y_i[j]); ret.xdata (j) = y_x[j]; } return ret; } // Convert complex sparse cholmod matrix to complex sparse octave matrix. // Returns a "deep" copy of a. static SparseComplexMatrix ccs2cos (const cholmod_sparse *a) { octave_idx_type nrow = from_size_t (a->nrow); octave_idx_type ncol = from_size_t (a->ncol); octave_idx_type nz = from_size_t (a->nzmax); SparseComplexMatrix ret (nrow, ncol, nz); SuiteSparse_long *a_p = reinterpret_cast<SuiteSparse_long *> (a->p); for (octave_idx_type j = 0; j < ncol + 1; j++) ret.xcidx(j) = from_suitesparse_long (a_p[j]); SuiteSparse_long *a_i = reinterpret_cast<SuiteSparse_long *> (a->i); Complex *a_x = reinterpret_cast<Complex *> (a->x); for (octave_idx_type j = 0; j < nz; j++) { ret.xridx(j) = from_suitesparse_long (a_i[j]); ret.xdata(j) = a_x[j]; } return ret; } // Convert real sparse octave matrix to complex sparse cholmod matrix. // Returns a "deep" copy of a. static cholmod_sparse * ros2ccs (const SparseMatrix& a, cholmod_common *cc) { cholmod_sparse *A = cholmod_l_allocate_sparse (a.rows (), a.cols (), a.nnz (), 0, 1, 0, CHOLMOD_COMPLEX, cc); octave_idx_type ncol = a.cols (); SuiteSparse_long *A_p = reinterpret_cast<SuiteSparse_long *> (A->p); for (octave_idx_type j = 0; j < ncol + 1; j++) A_p[j] = a.cidx(j); const double *a_x = a.data (); Complex *A_x = reinterpret_cast<Complex *> (A->x); SuiteSparse_long *A_i = reinterpret_cast<SuiteSparse_long *> (A->i); for (octave_idx_type j = 0; j < a.nnz (); j++) { A_x[j] = Complex (a_x[j], 0.0); A_i[j] = a.ridx(j); } return A; } static suitesparse_integer suitesparse_long_to_suitesparse_integer (SuiteSparse_long x) { if (x < std::numeric_limits<suitesparse_integer>::min () || x > std::numeric_limits<suitesparse_integer>::max ()) (*current_liboctave_error_handler) ("integer dimension or index out of range for SuiteSparse's indexing type"); return static_cast<suitesparse_integer> (x); } static void spqr_error_handler (const cholmod_common *cc) { if (cc->status >= 0) return; switch (cc->status) { case CHOLMOD_OUT_OF_MEMORY: (*current_liboctave_error_handler) ("sparse_qr: sparse matrix QR factorization failed" " - out of memory"); case CHOLMOD_TOO_LARGE: (*current_liboctave_error_handler) ("sparse_qr: sparse matrix QR factorization failed" " - integer overflow occurred"); default: (*current_liboctave_error_handler) ("sparse_qr: sparse matrix QR factorization failed" " - error %d", cc->status); } // FIXME: Free memory? // FIXME: Can cc-status > 0 (CHOLMOD_NOT_POSDEF, CHOLMOD_DSMALL) occur? } #endif // Specializations. // Real-valued matrices. // Arguments for parameter order (taken from SuiteSparseQR documentation). // 0: fixed ordering 0 (no permutation of columns) // 1: natural ordering 1 (only singleton columns are permuted to the left of // the matrix) // 2: colamd // 3: // 4: CHOLMOD best-effort (COLAMD, METIS,...) // 5: AMD(a'*a) // 6: metis(a'*a) // 7: SuiteSparseQR default ordering // 8: try COLAMD, AMD, and METIS; pick best // 9: try COLAMD and AMD; pick best //FIXME: What is order = 3? template <> sparse_qr<SparseMatrix>::sparse_qr_rep::sparse_qr_rep (const SparseMatrix& a, int order) : nrows (a.rows ()), ncols (a.columns ()) #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) , m_cc (), m_R (nullptr), m_E (nullptr), m_H (nullptr), m_Htau (nullptr), m_HPinv (nullptr) { octave_idx_type nr = a.rows (); octave_idx_type nc = a.cols (); if (nr < 0 || nc < 0) (*current_liboctave_error_handler) ("matrix dimension with negative size"); if (order < 0 || order > 9) (*current_liboctave_error_handler) ("ordering %d is not supported by SPQR", order); cholmod_l_start (&m_cc); cholmod_sparse A = ros2rcs (a); SuiteSparseQR<double> (order, static_cast<double> (SPQR_DEFAULT_TOL), static_cast<SuiteSparse_long> (A.nrow), &A, &m_R, &m_E, &m_H, &m_HPinv, &m_Htau, &m_cc); spqr_error_handler (&m_cc); if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); } } #elif defined (HAVE_CXSPARSE) , S (nullptr), N (nullptr) { CXSPARSE_DNAME () A; A.nzmax = a.nnz (); A.m = nrows; A.n = ncols; // Cast away const on A, with full knowledge that CSparse won't touch it // Prevents the methods below making a copy of the data. A.p = const_cast<suitesparse_integer *> (to_suitesparse_intptr (a.cidx ())); A.i = const_cast<suitesparse_integer *> (to_suitesparse_intptr (a.ridx ())); A.x = const_cast<double *> (a.data ()); A.nz = -1; S = CXSPARSE_DNAME (_sqr) (order, &A, 1); N = CXSPARSE_DNAME (_qr) (&A, S); if (! N) (*current_liboctave_error_handler) ("sparse_qr: sparse matrix QR factorization filled"); } #else { octave_unused_parameter (order); (*current_liboctave_error_handler) ("sparse_qr: support for SPQR or CXSparse was unavailable or disabled when liboctave was built"); } #endif template <> sparse_qr<SparseMatrix>::sparse_qr_rep::~sparse_qr_rep (void) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) cholmod_l_free_sparse (&m_R, &m_cc); cholmod_l_free_sparse (&m_H, &m_cc); cholmod_l_free_dense (&m_Htau, &m_cc); free (m_E); // FIXME: use cholmod_l_free free (m_HPinv); cholmod_l_finish (&m_cc); #elif defined (HAVE_CXSPARSE) CXSPARSE_DNAME (_sfree) (S); CXSPARSE_DNAME (_nfree) (N); #endif } template <> SparseMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::V (void) const { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) return rcs2ros (m_H); #elif defined (HAVE_CXSPARSE) // Drop zeros from V and sort // FIXME: Is the double transpose to sort necessary? CXSPARSE_DNAME (_dropzeros) (N->L); CXSPARSE_DNAME () *D = CXSPARSE_DNAME (_transpose) (N->L, 1); CXSPARSE_DNAME (_spfree) (N->L); N->L = CXSPARSE_DNAME (_transpose) (D, 1); CXSPARSE_DNAME (_spfree) (D); octave_idx_type nc = N->L->n; octave_idx_type nz = N->L->nzmax; SparseMatrix ret (N->L->m, nc, nz); for (octave_idx_type j = 0; j < nc+1; j++) ret.xcidx (j) = N->L->p[j]; for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = N->L->i[j]; ret.xdata (j) = N->L->x[j]; } return ret; #else return SparseMatrix (); #endif } template <> SparseMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::R (bool econ) const { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) octave_idx_type nr = static_cast<octave_idx_type> (m_R->nrow); octave_idx_type nc = static_cast<octave_idx_type> (m_R->ncol); octave_idx_type nz = static_cast<octave_idx_type> (m_R->nzmax); // FIXME: Does this work if econ = true? SparseMatrix ret ((econ ? (nc > nr ? nr : nc) : nr), nc, nz); SuiteSparse_long *Rp = reinterpret_cast<SuiteSparse_long *> (m_R->p); SuiteSparse_long *Ri = reinterpret_cast<SuiteSparse_long *> (m_R->i); for (octave_idx_type j = 0; j < nc + 1; j++) ret.xcidx (j) = from_suitesparse_long (Rp[j]); for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = from_suitesparse_long (Ri[j]); ret.xdata (j) = (reinterpret_cast<double *> (m_R->x))[j]; } return ret; #elif defined (HAVE_CXSPARSE) // Drop zeros from R and sort // FIXME: Is the double transpose to sort necessary? CXSPARSE_DNAME (_dropzeros) (N->U); CXSPARSE_DNAME () *D = CXSPARSE_DNAME (_transpose) (N->U, 1); CXSPARSE_DNAME (_spfree) (N->U); N->U = CXSPARSE_DNAME (_transpose) (D, 1); CXSPARSE_DNAME (_spfree) (D); octave_idx_type nc = N->U->n; octave_idx_type nz = N->U->nzmax; SparseMatrix ret ((econ ? (nc > nrows ? nrows : nc) : nrows), nc, nz); for (octave_idx_type j = 0; j < nc+1; j++) ret.xcidx (j) = N->U->p[j]; for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = N->U->i[j]; ret.xdata (j) = N->U->x[j]; } return ret; #else octave_unused_parameter (econ); return SparseMatrix (); #endif } template <> Matrix sparse_qr<SparseMatrix>::sparse_qr_rep::C (const Matrix& b, bool econ) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) octave_idx_type nr = (econ ? (ncols > nrows ? nrows : ncols) : nrows); octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); Matrix ret (nr, b_nc); if (nrows != b_nr) (*current_liboctave_error_handler) ("sparse_qr: matrix dimension mismatch"); else if (b_nc < 0 || b_nr < 0) (*current_liboctave_error_handler) ("sparse_qr: matrix dimension with negative size"); cholmod_dense *QTB; // Q' * B cholmod_dense B = rod2rcd (b); QTB = SuiteSparseQR_qmult<double> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B, &m_cc); spqr_error_handler (&m_cc); // copy QTB into ret double *QTB_x = reinterpret_cast<double *> (QTB->x); double *ret_vec = reinterpret_cast<double *> (ret.fortran_vec ()); for (octave_idx_type j = 0; j < b_nc; j++) for (octave_idx_type i = 0; i < nr; i++) ret_vec[j * nr + i] = QTB_x[j * b_nr + i]; cholmod_l_free_dense (&QTB, &m_cc); return ret; #elif defined (HAVE_CXSPARSE) if (econ) (*current_liboctave_error_handler) ("sparse-qr: economy mode with CXSparse not supported"); octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); octave_idx_type nc = N->L->n; octave_idx_type nr = nrows; const double *bvec = b.data (); Matrix ret (b_nr, b_nc); double *vec = ret.fortran_vec (); if (nr < 0 || nc < 0 || nr != b_nr) (*current_liboctave_error_handler) ("matrix dimension mismatch"); if (nr == 0 || nc == 0 || b_nc == 0) ret = Matrix (nc, b_nc, 0.0); else { OCTAVE_LOCAL_BUFFER (double, buf, S->m2); for (volatile octave_idx_type j = 0, idx = 0; j < b_nc; j++, idx += b_nr) { octave_quit (); for (octave_idx_type i = nr; i < S->m2; i++) buf[i] = 0.; volatile octave_idx_type nm = (nr < nc ? nr : nc); CXSPARSE_DNAME (_ipvec) (S->pinv, bvec + idx, buf, b_nr); for (volatile octave_idx_type i = 0; i < nm; i++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, i, N->B[i], buf); } for (octave_idx_type i = 0; i < b_nr; i++) vec[i+idx] = buf[i]; } } return ret; #else octave_unused_parameter (b); octave_unused_parameter (econ); return Matrix (); #endif } template <> Matrix sparse_qr<SparseMatrix>::sparse_qr_rep::Q (bool econ) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) octave_idx_type nc = (econ ? (ncols > nrows ? nrows : ncols) : nrows); Matrix ret (nrows, nc); cholmod_dense *q; // I is nrows x nrows identity matrix cholmod_dense *I = cholmod_l_allocate_dense (nrows, nrows, nrows, CHOLMOD_REAL, &m_cc); for (octave_idx_type i = 0; i < nrows * nrows; i++) (reinterpret_cast<double *> (I->x))[i] = 0.0; for (octave_idx_type i = 0; i < nrows; i++) (reinterpret_cast<double *> (I->x))[i * nrows + i] = 1.0; q = SuiteSparseQR_qmult<double> (SPQR_QX, m_H, m_Htau, m_HPinv, I, &m_cc); spqr_error_handler (&m_cc); double *q_x = reinterpret_cast<double *> (q->x); double *ret_vec = const_cast<double *> (ret.fortran_vec ()); for (octave_idx_type j = 0; j < nc; j++) for (octave_idx_type i = 0; i < nrows; i++) ret_vec[j * nrows + i] = q_x[j * nrows + i]; cholmod_l_free_dense (&q, &m_cc); cholmod_l_free_dense (&I, &m_cc); return ret; #elif defined (HAVE_CXSPARSE) if (econ) (*current_liboctave_error_handler) ("sparse-qr: economy mode with CXSparse not supported"); octave_idx_type nc = N->L->n; octave_idx_type nr = nrows; Matrix ret (nr, nr); double *ret_vec = ret.fortran_vec (); if (nr < 0 || nc < 0) (*current_liboctave_error_handler) ("matrix dimension mismatch"); if (nr == 0 || nc == 0) ret = Matrix (nc, nr, 0.0); else { OCTAVE_LOCAL_BUFFER (double, bvec, nr + 1); for (octave_idx_type i = 0; i < nr; i++) bvec[i] = 0.; OCTAVE_LOCAL_BUFFER (double, buf, S->m2); for (volatile octave_idx_type j = 0, idx = 0; j < nr; j++, idx += nr) { octave_quit (); bvec[j] = 1.0; for (octave_idx_type i = nr; i < S->m2; i++) buf[i] = 0.; volatile octave_idx_type nm = (nr < nc ? nr : nc); CXSPARSE_DNAME (_ipvec) (S->pinv, bvec, buf, nr); for (volatile octave_idx_type i = 0; i < nm; i++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, i, N->B[i], buf); } for (octave_idx_type i = 0; i < nr; i++) ret_vec[i+idx] = buf[i]; bvec[j] = 0.0; } } return ret.transpose (); #else octave_unused_parameter (econ); return Matrix (); #endif } template <> template <> Matrix sparse_qr<SparseMatrix>::sparse_qr_rep::tall_solve<MArray<double>, Matrix> (const MArray<double>& b, octave_idx_type& info) { info = -1; #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD) && defined (HAVE_CXSPARSE)) octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); Matrix x (ncols, b_nc); // X = m_E'*(m_R\(Q'*B)) if (nrows < 0 || ncols < 0 || b_nc < 0 || b_nr < 0) (*current_liboctave_error_handler) ("matrix dimension with negative size"); if (nrows < 0 || ncols < 0 || nrows != b_nr) (*current_liboctave_error_handler) ("matrix dimension mismatch"); cholmod_dense *QTB; // Q' * B cholmod_dense B = rod2rcd (b); // FIXME: Process b column by column as in the CXSPARSE version below. // This avoids a large dense matrix Q' * B in memory. QTB = SuiteSparseQR_qmult<double> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B, &m_cc); spqr_error_handler (&m_cc); // convert m_R into CXSPARSE matrix R2 CXSPARSE_DNAME (_sparse) R2; R2.n = ncols; R2.m = ncols; R2.nzmax = m_R->nzmax; R2.x = reinterpret_cast<double *> (m_R->x); suitesparse_integer *R2_p; suitesparse_integer *R2_i; if (sizeof (suitesparse_integer) == sizeof (SuiteSparse_long)) { R2.p = reinterpret_cast<suitesparse_integer *> (m_R->p); R2.i = reinterpret_cast<suitesparse_integer *> (m_R->i); } else { R2_p = new suitesparse_integer[ncols+1]; SuiteSparse_long *R_p = reinterpret_cast<SuiteSparse_long *> (m_R->p); for (octave_idx_type i = 0; i < ncols+1; i++) R2_p[i] = suitesparse_long_to_suitesparse_integer (R_p[i]); R2.p = R2_p; octave_idx_type nnz = m_R->nzmax; R2_i = new suitesparse_integer[nnz]; SuiteSparse_long *R_i = reinterpret_cast<SuiteSparse_long *> (m_R->i); for (octave_idx_type i = 0; i < nnz; i++) R2_i[i] = suitesparse_long_to_suitesparse_integer (R_i[i]); R2.i = R2_i; } R2.nz = -1; double *x_vec = const_cast<double *> (x.fortran_vec ()); suitesparse_integer *E; if (sizeof (suitesparse_integer) != sizeof (SuiteSparse_long)) { E = new suitesparse_integer [ncols]; for (octave_idx_type i = 0; i < ncols; i++) E[i] = suitesparse_long_to_suitesparse_integer (m_E[i]); } else E = reinterpret_cast<suitesparse_integer *> (m_E); for (volatile octave_idx_type j = 0; j < b_nc; j++) { // fill x(:,j) // solve (m_R\(Q'*B(:,j)) and store result in QTB(:,j) CXSPARSE_DNAME (_usolve) (&R2, &(reinterpret_cast<double *> (QTB->x)[j * b_nr])); // x(:,j) = m_E' * (m_R\(Q'*B(:,j)) CXSPARSE_DNAME (_ipvec) (E, &(reinterpret_cast<double *> (QTB->x)[j * b_nr]), &x_vec[j * ncols], ncols); } if (sizeof (suitesparse_integer) != sizeof (SuiteSparse_long)) { delete [] R2_p; delete [] R2_i; delete [] E; } cholmod_l_free_dense (&QTB, &m_cc); info = 0; return x; #elif defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); const double *bvec = b.data (); Matrix x (nc, b_nc); double *vec = x.fortran_vec (); OCTAVE_LOCAL_BUFFER (double, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc; i++, idx+=nc, bidx+=b_nr) { octave_quit (); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = 0.; CXSPARSE_DNAME (_ipvec) (S->pinv, bvec + bidx, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_usolve) (N->U, buf); CXSPARSE_DNAME (_ipvec) (S->q, buf, vec + idx, nc); } info = 0; return x; #else octave_unused_parameter (b); return Matrix (); #endif } template <> template <> Matrix sparse_qr<SparseMatrix>::sparse_qr_rep::wide_solve<MArray<double>, Matrix> (const MArray<double>& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); const double *bvec = b.data (); Matrix x (nc, b_nc); double *vec = x.fortran_vec (); volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (double, buf, nbuf); for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc; i++, idx+=nc, bidx+=b_nr) { octave_quit (); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = 0.; CXSPARSE_DNAME (_pvec) (S->q, bvec + bidx, buf, nr); CXSPARSE_DNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_pvec) (S->pinv, buf, vec + idx, nc); } info = 0; return x; #else octave_unused_parameter (b); return Matrix (); #endif } template <> template <> SparseMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::tall_solve<SparseMatrix, SparseMatrix> (const SparseMatrix& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); SparseMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = 0.; CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_usolve) (N->U, buf); CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc); for (octave_idx_type j = 0; j < nc; j++) { double tmp = Xx[j]; if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; return x; #else octave_unused_parameter (b); return SparseMatrix (); #endif } template <> template <> SparseMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::wide_solve<SparseMatrix, SparseMatrix> (const SparseMatrix& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); SparseMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, buf, nbuf); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = 0.; CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr); CXSPARSE_DNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc); for (octave_idx_type j = 0; j < nc; j++) { double tmp = Xx[j]; if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; x.maybe_compress (); return x; #else octave_unused_parameter (b); return SparseMatrix (); #endif } template <> template <> ComplexMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::tall_solve<MArray<Complex>, ComplexMatrix> (const MArray<Complex>& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); ComplexMatrix x (nc, b_nc); Complex *vec = x.fortran_vec (); OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) { Complex c = b.xelem (j, i); Xx[j] = c.real (); Xz[j] = c.imag (); } for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = 0.; CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_usolve) (N->U, buf); CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = 0.; CXSPARSE_DNAME (_ipvec) (S->pinv, Xz, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_usolve) (N->U, buf); CXSPARSE_DNAME (_ipvec) (S->q, buf, Xz, nc); for (octave_idx_type j = 0; j < nc; j++) vec[j+idx] = Complex (Xx[j], Xz[j]); } info = 0; return x; #else octave_unused_parameter (b); return ComplexMatrix (); #endif } template <> template <> ComplexMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::wide_solve<MArray<Complex>, ComplexMatrix> (const MArray<Complex>& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); ComplexMatrix x (nc, b_nc); Complex *vec = x.fortran_vec (); volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, buf, nbuf); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) { Complex c = b.xelem (j, i); Xx[j] = c.real (); Xz[j] = c.imag (); } for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = 0.; CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr); CXSPARSE_DNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = 0.; CXSPARSE_DNAME (_pvec) (S->q, Xz, buf, nr); CXSPARSE_DNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xz, nc); for (octave_idx_type j = 0; j < nc; j++) vec[j+idx] = Complex (Xx[j], Xz[j]); } info = 0; return x; #else octave_unused_parameter (b); return ComplexMatrix (); #endif } // Complex-valued matrices. template <> sparse_qr<SparseComplexMatrix>::sparse_qr_rep::sparse_qr_rep (const SparseComplexMatrix& a, int order) : nrows (a.rows ()), ncols (a.columns ()) #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) , m_cc (), m_R (nullptr), m_E (nullptr), m_H (nullptr), m_Htau (nullptr), m_HPinv (nullptr) { octave_idx_type nr = a.rows (); octave_idx_type nc = a.cols (); if (nr < 0 || nc < 0) (*current_liboctave_error_handler) ("matrix dimension with negative size"); if (order < 0 || order > 9) (*current_liboctave_error_handler) ("ordering %d is not supported by SPQR", order); cholmod_l_start (&m_cc); cholmod_sparse A = cos2ccs (a); SuiteSparseQR<Complex> (order, static_cast<double> (SPQR_DEFAULT_TOL), static_cast<SuiteSparse_long> (A.nrow), &A, &m_R, &m_E, &m_H, &m_HPinv, &m_Htau, &m_cc); spqr_error_handler (&m_cc); if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); } } #elif defined (HAVE_CXSPARSE) , S (nullptr), N (nullptr) { CXSPARSE_ZNAME () A; A.nzmax = a.nnz (); A.m = nrows; A.n = ncols; // Cast away const on A, with full knowledge that CSparse won't touch it // Prevents the methods below making a copy of the data. A.p = const_cast<suitesparse_integer *> (to_suitesparse_intptr (a.cidx ())); A.i = const_cast<suitesparse_integer *> (to_suitesparse_intptr (a.ridx ())); A.x = const_cast<cs_complex_t *> (reinterpret_cast<const cs_complex_t *> (a.data ())); A.nz = -1; S = CXSPARSE_ZNAME (_sqr) (order, &A, 1); N = CXSPARSE_ZNAME (_qr) (&A, S); if (! N) (*current_liboctave_error_handler) ("sparse_qr: sparse matrix QR factorization filled"); } #else { octave_unused_parameter (order); (*current_liboctave_error_handler) ("sparse_qr: support for CXSparse was unavailable or disabled when liboctave was built"); } #endif template <> sparse_qr<SparseComplexMatrix>::sparse_qr_rep::~sparse_qr_rep (void) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) cholmod_l_free_sparse (&m_R, &m_cc); cholmod_l_free_sparse (&m_H, &m_cc); cholmod_l_free_dense (&m_Htau, &m_cc); free (m_E); // FIXME: use cholmod_l_free free (m_HPinv); cholmod_l_finish (&m_cc); #elif defined (HAVE_CXSPARSE) CXSPARSE_ZNAME (_sfree) (S); CXSPARSE_ZNAME (_nfree) (N); #endif } template <> SparseComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::V (void) const { #if defined (HAVE_CXSPARSE) // Drop zeros from V and sort // FIXME: Is the double transpose to sort necessary? CXSPARSE_ZNAME (_dropzeros) (N->L); CXSPARSE_ZNAME () *D = CXSPARSE_ZNAME (_transpose) (N->L, 1); CXSPARSE_ZNAME (_spfree) (N->L); N->L = CXSPARSE_ZNAME (_transpose) (D, 1); CXSPARSE_ZNAME (_spfree) (D); octave_idx_type nc = N->L->n; octave_idx_type nz = N->L->nzmax; SparseComplexMatrix ret (N->L->m, nc, nz); for (octave_idx_type j = 0; j < nc+1; j++) ret.xcidx (j) = N->L->p[j]; for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = N->L->i[j]; ret.xdata (j) = reinterpret_cast<Complex *> (N->L->x)[j]; } return ret; #else return SparseComplexMatrix (); #endif } template <> SparseComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::R (bool econ) const { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) octave_idx_type nr = from_size_t (m_R->nrow); octave_idx_type nc = from_size_t (m_R->ncol); octave_idx_type nz = from_size_t (m_R->nzmax); // FIXME: Does this work if econ = true? SparseComplexMatrix ret ((econ ? (nc > nr ? nr : nc) : nr), nc, nz); SuiteSparse_long *Rp = reinterpret_cast<SuiteSparse_long *> (m_R->p); SuiteSparse_long *Ri = reinterpret_cast<SuiteSparse_long *> (m_R->i); for (octave_idx_type j = 0; j < nc + 1; j++) ret.xcidx (j) = from_suitesparse_long (Rp[j]); for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = from_suitesparse_long (Ri[j]); ret.xdata (j) = (reinterpret_cast<Complex *> (m_R->x))[j]; } return ret; #elif defined (HAVE_CXSPARSE) // Drop zeros from R and sort // FIXME: Is the double transpose to sort necessary? CXSPARSE_ZNAME (_dropzeros) (N->U); CXSPARSE_ZNAME () *D = CXSPARSE_ZNAME (_transpose) (N->U, 1); CXSPARSE_ZNAME (_spfree) (N->U); N->U = CXSPARSE_ZNAME (_transpose) (D, 1); CXSPARSE_ZNAME (_spfree) (D); octave_idx_type nc = N->U->n; octave_idx_type nz = N->U->nzmax; SparseComplexMatrix ret ((econ ? (nc > nrows ? nrows : nc) : nrows), nc, nz); for (octave_idx_type j = 0; j < nc+1; j++) ret.xcidx (j) = N->U->p[j]; for (octave_idx_type j = 0; j < nz; j++) { ret.xridx (j) = N->U->i[j]; ret.xdata (j) = reinterpret_cast<Complex *>(N->U->x)[j]; } return ret; #else octave_unused_parameter (econ); return SparseComplexMatrix (); #endif } template <> ComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::C (const ComplexMatrix& b, bool econ) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) // FIXME: not tested octave_idx_type nr = (econ ? (ncols > nrows ? nrows : ncols) : nrows); octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); ComplexMatrix ret (nr, b_nc); if (nrows != b_nr) (*current_liboctave_error_handler) ("matrix dimension mismatch"); if (b_nc < 0 || b_nr < 0) (*current_liboctave_error_handler) ("matrix dimension with negative size"); cholmod_dense *QTB; // Q' * B cholmod_dense B = cod2ccd (b); QTB = SuiteSparseQR_qmult<Complex> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B, &m_cc); spqr_error_handler (&m_cc); // copy QTB into ret Complex *QTB_x = reinterpret_cast<Complex *> (QTB->x); Complex *ret_vec = reinterpret_cast<Complex *> (ret.fortran_vec ()); for (octave_idx_type j = 0; j < b_nc; j++) for (octave_idx_type i = 0; i < nr; i++) ret_vec[j * nr + i] = QTB_x[j * b_nr + i]; cholmod_l_free_dense (&QTB, &m_cc); return ret; #elif defined (HAVE_CXSPARSE) if (econ) (*current_liboctave_error_handler) ("sparse-qr: economy mode with CXSparse not supported"); octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); octave_idx_type nc = N->L->n; octave_idx_type nr = nrows; const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *> (b.data ()); ComplexMatrix ret (b_nr, b_nc); Complex *vec = ret.fortran_vec (); if (nr < 0 || nc < 0 || nr != b_nr) (*current_liboctave_error_handler) ("matrix dimension mismatch"); if (nr == 0 || nc == 0 || b_nc == 0) ret = ComplexMatrix (nc, b_nc, Complex (0.0, 0.0)); else { OCTAVE_LOCAL_BUFFER (Complex, buf, S->m2); for (volatile octave_idx_type j = 0, idx = 0; j < b_nc; j++, idx += b_nr) { octave_quit (); volatile octave_idx_type nm = (nr < nc ? nr : nc); CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec + idx, reinterpret_cast<cs_complex_t *> (buf), b_nr); for (volatile octave_idx_type i = 0; i < nm; i++) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, i, N->B[i], reinterpret_cast<cs_complex_t *> (buf)); } for (octave_idx_type i = 0; i < b_nr; i++) vec[i+idx] = buf[i]; } } return ret; #else octave_unused_parameter (b); octave_unused_parameter (econ); return ComplexMatrix (); #endif } template <> ComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::Q (bool econ) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) octave_idx_type nc = (econ ? (ncols > nrows ? nrows : ncols) : nrows); ComplexMatrix ret (nrows, nc); cholmod_dense *q; // I is nrows x nrows identity matrix cholmod_dense *I = reinterpret_cast<cholmod_dense *> (cholmod_l_allocate_dense (nrows, nrows, nrows, CHOLMOD_COMPLEX, &m_cc)); for (octave_idx_type i = 0; i < nrows * nrows; i++) (reinterpret_cast<Complex *> (I->x))[i] = 0.0; for (octave_idx_type i = 0; i < nrows; i++) (reinterpret_cast<Complex *> (I->x))[i * nrows + i] = 1.0; q = SuiteSparseQR_qmult<Complex> (SPQR_QX, m_H, m_Htau, m_HPinv, I, &m_cc); spqr_error_handler (&m_cc); Complex *q_x = reinterpret_cast<Complex *> (q->x); Complex *ret_vec = const_cast<Complex *> (ret.fortran_vec ()); for (octave_idx_type j = 0; j < nc; j++) for (octave_idx_type i = 0; i < nrows; i++) ret_vec[j * nrows + i] = q_x[j * nrows + i]; cholmod_l_free_dense (&q, &m_cc); cholmod_l_free_dense (&I, &m_cc); return ret; #elif defined (HAVE_CXSPARSE) if (econ) (*current_liboctave_error_handler) ("sparse-qr: economy mode with CXSparse not supported"); octave_idx_type nc = N->L->n; octave_idx_type nr = nrows; ComplexMatrix ret (nr, nr); Complex *vec = ret.fortran_vec (); if (nr < 0 || nc < 0) (*current_liboctave_error_handler) ("matrix dimension mismatch"); if (nr == 0 || nc == 0) ret = ComplexMatrix (nc, nr, Complex (0.0, 0.0)); else { OCTAVE_LOCAL_BUFFER (cs_complex_t, bvec, nr); for (octave_idx_type i = 0; i < nr; i++) bvec[i] = cs_complex_t (0.0, 0.0); OCTAVE_LOCAL_BUFFER (Complex, buf, S->m2); for (volatile octave_idx_type j = 0, idx = 0; j < nr; j++, idx+=nr) { octave_quit (); bvec[j] = cs_complex_t (1.0, 0.0); volatile octave_idx_type nm = (nr < nc ? nr : nc); CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec, reinterpret_cast<cs_complex_t *> (buf), nr); for (volatile octave_idx_type i = 0; i < nm; i++) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, i, N->B[i], reinterpret_cast<cs_complex_t *> (buf)); } for (octave_idx_type i = 0; i < nr; i++) vec[i+idx] = buf[i]; bvec[j] = cs_complex_t (0.0, 0.0); } } return ret.hermitian (); #else octave_unused_parameter (econ); return ComplexMatrix (); #endif } template <> template <> SparseComplexMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::tall_solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseComplexMatrix& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); SparseComplexMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) { Complex c = b.xelem (j, i); Xx[j] = c.real (); Xz[j] = c.imag (); } for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = 0.; CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_usolve) (N->U, buf); CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = 0.; CXSPARSE_DNAME (_ipvec) (S->pinv, Xz, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_usolve) (N->U, buf); CXSPARSE_DNAME (_ipvec) (S->q, buf, Xz, nc); for (octave_idx_type j = 0; j < nc; j++) { Complex tmp = Complex (Xx[j], Xz[j]); if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; return x; #else octave_unused_parameter (b); return SparseComplexMatrix (); #endif } template <> template <> SparseComplexMatrix sparse_qr<SparseMatrix>::sparse_qr_rep::wide_solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseComplexMatrix& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nr = b.rows (); octave_idx_type b_nc = b.cols (); SparseComplexMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (double, buf, nbuf); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) { Complex c = b.xelem (j, i); Xx[j] = c.real (); Xz[j] = c.imag (); } for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = 0.; CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr); CXSPARSE_DNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = 0.; CXSPARSE_DNAME (_pvec) (S->q, Xz, buf, nr); CXSPARSE_DNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xz, nc); for (octave_idx_type j = 0; j < nc; j++) { Complex tmp = Complex (Xx[j], Xz[j]); if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; x.maybe_compress (); return x; #else octave_unused_parameter (b); return SparseComplexMatrix (); #endif } template <> template <> ComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::tall_solve<MArray<double>, ComplexMatrix> (const MArray<double>& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); ComplexMatrix x (nc, b_nc); cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ()); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2); OCTAVE_LOCAL_BUFFER (Complex, Xx, b_nr); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_ipvec) (S->pinv, reinterpret_cast<cs_complex_t *>(Xx), buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_ZNAME (_usolve) (N->U, buf); CXSPARSE_ZNAME (_ipvec) (S->q, buf, vec + idx, nc); } info = 0; return x; #else octave_unused_parameter (b); return ComplexMatrix (); #endif } template <> template <> ComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::wide_solve<MArray<double>, ComplexMatrix> (const MArray<double>& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseComplexMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); ComplexMatrix x (nc, b_nc); cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ()); volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf); OCTAVE_LOCAL_BUFFER (Complex, Xx, b_nr); OCTAVE_LOCAL_BUFFER (double, B, nr); for (octave_idx_type i = 0; i < nr; i++) B[i] = N->B[i]; for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *> (Xx), buf, nr); CXSPARSE_ZNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf); } CXSPARSE_ZNAME (_pvec) (S->pinv, buf, vec + idx, nc); } info = 0; return x; #else octave_unused_parameter (b); return ComplexMatrix (); #endif } template <> template <> SparseComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::tall_solve<SparseMatrix, SparseComplexMatrix> (const SparseMatrix& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); SparseComplexMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_ipvec) (S->pinv, reinterpret_cast<cs_complex_t *> (Xx), buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_ZNAME (_usolve) (N->U, buf); CXSPARSE_ZNAME (_ipvec) (S->q, buf, reinterpret_cast<cs_complex_t *> (Xx), nc); for (octave_idx_type j = 0; j < nc; j++) { Complex tmp = Xx[j]; if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; x.maybe_compress (); return x; #else octave_unused_parameter (b); return SparseComplexMatrix (); #endif } template <> template <> SparseComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::wide_solve<SparseMatrix, SparseComplexMatrix> (const SparseMatrix& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseComplexMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); SparseComplexMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf); OCTAVE_LOCAL_BUFFER (double, B, nr); for (octave_idx_type i = 0; i < nr; i++) B[i] = N->B[i]; for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *> (Xx), buf, nr); CXSPARSE_ZNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf); } CXSPARSE_ZNAME (_pvec) (S->pinv, buf, reinterpret_cast<cs_complex_t *> (Xx), nc); for (octave_idx_type j = 0; j < nc; j++) { Complex tmp = Xx[j]; if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; x.maybe_compress (); return x; #else octave_unused_parameter (b); return SparseComplexMatrix (); #endif } template <> template <> ComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::tall_solve<MArray<Complex>, ComplexMatrix> (const MArray<Complex>& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *> (b.data ()); ComplexMatrix x (nc, b_nc); cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ()); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc; i++, idx+=nc, bidx+=b_nr) { octave_quit (); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec + bidx, buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_ZNAME (_usolve) (N->U, buf); CXSPARSE_ZNAME (_ipvec) (S->q, buf, vec + idx, nc); } info = 0; return x; #else octave_unused_parameter (b); return ComplexMatrix (); #endif } template <> template <> ComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::wide_solve<MArray<Complex>, ComplexMatrix> (const MArray<Complex>& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseComplexMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *> (b.data ()); ComplexMatrix x (nc, b_nc); cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ()); volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf); OCTAVE_LOCAL_BUFFER (double, B, nr); for (octave_idx_type i = 0; i < nr; i++) B[i] = N->B[i]; for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc; i++, idx+=nc, bidx+=b_nr) { octave_quit (); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_pvec) (S->q, bvec + bidx, buf, nr); CXSPARSE_ZNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf); } CXSPARSE_ZNAME (_pvec) (S->pinv, buf, vec + idx, nc); } info = 0; return x; #else octave_unused_parameter (b); return ComplexMatrix (); #endif } template <> template <> SparseComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::tall_solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseComplexMatrix& b, octave_idx_type& info) { info = -1; #if defined (HAVE_CXSPARSE) octave_idx_type nr = nrows; octave_idx_type nc = ncols; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); SparseComplexMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2); for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < S->m2; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_ipvec) (S->pinv, reinterpret_cast<cs_complex_t *> (Xx), buf, nr); for (volatile octave_idx_type j = 0; j < nc; j++) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf); } CXSPARSE_ZNAME (_usolve) (N->U, buf); CXSPARSE_ZNAME (_ipvec) (S->q, buf, reinterpret_cast<cs_complex_t *> (Xx), nc); for (octave_idx_type j = 0; j < nc; j++) { Complex tmp = Xx[j]; if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; x.maybe_compress (); return x; #else octave_unused_parameter (b); return SparseComplexMatrix (); #endif } template <> template <> SparseComplexMatrix sparse_qr<SparseComplexMatrix>::sparse_qr_rep::wide_solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseComplexMatrix& b, octave_idx_type& info) const { info = -1; #if defined (HAVE_CXSPARSE) // These are swapped because the original matrix was transposed in // sparse_qr<SparseComplexMatrix>::solve. octave_idx_type nr = ncols; octave_idx_type nc = nrows; octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); SparseComplexMatrix x (nc, b_nc, b.nnz ()); x.xcidx (0) = 0; volatile octave_idx_type x_nz = b.nnz (); volatile octave_idx_type ii = 0; volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2); OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc)); OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf); OCTAVE_LOCAL_BUFFER (double, B, nr); for (octave_idx_type i = 0; i < nr; i++) B[i] = N->B[i]; for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc) { octave_quit (); for (octave_idx_type j = 0; j < b_nr; j++) Xx[j] = b.xelem (j, i); for (octave_idx_type j = nr; j < nbuf; j++) buf[j] = cs_complex_t (0.0, 0.0); CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *>(Xx), buf, nr); CXSPARSE_ZNAME (_utsolve) (N->U, buf); for (volatile octave_idx_type j = nr-1; j >= 0; j--) { octave_quit (); CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf); } CXSPARSE_ZNAME (_pvec) (S->pinv, buf, reinterpret_cast<cs_complex_t *>(Xx), nc); for (octave_idx_type j = 0; j < nc; j++) { Complex tmp = Xx[j]; if (tmp != 0.0) { if (ii == x_nz) { // Resize the sparse matrix octave_idx_type sz = x_nz * (b_nc - i) / b_nc; sz = (sz > 10 ? sz : 10) + x_nz; x.change_capacity (sz); x_nz = sz; } x.xdata (ii) = tmp; x.xridx (ii++) = j; } } x.xcidx (i+1) = ii; } info = 0; x.maybe_compress (); return x; #else octave_unused_parameter (b); return SparseComplexMatrix (); #endif } template <typename SPARSE_T> sparse_qr<SPARSE_T>::sparse_qr (void) : m_rep (new sparse_qr_rep (SPARSE_T (), 0)) { } template <typename SPARSE_T> sparse_qr<SPARSE_T>::sparse_qr (const SPARSE_T& a, int order) : m_rep (new sparse_qr_rep (a, order)) { } template <typename SPARSE_T> bool sparse_qr<SPARSE_T>::ok (void) const { return m_rep->ok (); } template <typename SPARSE_T> SPARSE_T sparse_qr<SPARSE_T>::V (void) const { return m_rep->V (); } template <typename SPARSE_T> ColumnVector sparse_qr<SPARSE_T>::Pinv (void) const { return m_rep->P (); } template <typename SPARSE_T> ColumnVector sparse_qr<SPARSE_T>::P (void) const { return m_rep->P (); } template <typename SPARSE_T> ColumnVector sparse_qr<SPARSE_T>::E (void) const { return m_rep->E(); } template <typename SPARSE_T> SparseMatrix sparse_qr<SPARSE_T>::E_MAT (void) const { ColumnVector perm = m_rep->E (); octave_idx_type nrows = perm.rows (); SparseMatrix ret (nrows, nrows, nrows); for (octave_idx_type i = 0; i < nrows; i++) ret(perm(i) - 1, i) = 1.0; return ret; } template <typename SPARSE_T> SPARSE_T sparse_qr<SPARSE_T>::R (bool econ) const { return m_rep->R (econ); } template <typename SPARSE_T> typename SPARSE_T::dense_matrix_type sparse_qr<SPARSE_T>::C (const typename SPARSE_T::dense_matrix_type& b, bool econ) const { return m_rep->C (b, econ); } template <typename SPARSE_T> typename SPARSE_T::dense_matrix_type sparse_qr<SPARSE_T>::Q (bool econ) const { return m_rep->Q (econ); } #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) //specializations of function min2norm_solve template <> template <> OCTAVE_API Matrix sparse_qr<SparseMatrix>::min2norm_solve<MArray<double>, Matrix> (const SparseMatrix& a, const MArray<double>& b, octave_idx_type& info, int order) { info = -1; octave_idx_type b_nc = b.cols (); octave_idx_type nc = a.cols (); Matrix x (nc, b_nc); cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse A = ros2rcs (a); cholmod_dense B = rod2rcd (b); cholmod_dense *X; X = SuiteSparseQR_min2norm<double> (order, SPQR_DEFAULT_TOL, &A, &B, &cc); spqr_error_handler (&cc); double *vec = x.fortran_vec (); for (volatile octave_idx_type i = 0; i < nc * b_nc; i++) vec[i] = reinterpret_cast<double *> (X->x)[i]; info = 0; if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); } cholmod_l_finish (&cc); return x; } template <> template <> OCTAVE_API SparseMatrix sparse_qr<SparseMatrix>::min2norm_solve<SparseMatrix, SparseMatrix> (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info, int order) { info = -1; SparseMatrix x; cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse A = ros2rcs (a); cholmod_sparse B = ros2rcs (b); cholmod_sparse *X; X = SuiteSparseQR_min2norm<double> (order, SPQR_DEFAULT_TOL, &A, &B, &cc); spqr_error_handler (&cc); if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); delete [] reinterpret_cast<SuiteSparse_long *> (B.p); delete [] reinterpret_cast<SuiteSparse_long *> (B.i); } x = rcs2ros (X); cholmod_l_finish (&cc); info = 0; return x; } template <> template <> OCTAVE_API ComplexMatrix sparse_qr<SparseMatrix>::min2norm_solve<MArray<Complex>, ComplexMatrix> (const SparseMatrix& a, const MArray<Complex>& b, octave_idx_type& info, int order) { info = -1; octave_idx_type b_nc = b.cols (); octave_idx_type nc = a.cols (); ComplexMatrix x (nc, b_nc); cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse *A = ros2ccs (a, &cc); cholmod_dense B = cod2ccd (b); cholmod_dense *X; X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, A, &B, &cc); spqr_error_handler (&cc); Complex *vec = x.fortran_vec (); for (volatile octave_idx_type i = 0; i < nc * b_nc; i++) vec[i] = reinterpret_cast<Complex *> (X->x)[i]; cholmod_l_free_sparse (&A, &cc); cholmod_l_finish (&cc); info = 0; return x; } template <> template <> OCTAVE_API SparseComplexMatrix sparse_qr<SparseMatrix>::min2norm_solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info, int order) { info = -1; cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse *A = ros2ccs (a, &cc); cholmod_sparse B = cos2ccs (b); cholmod_sparse *X; X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, A, &B, &cc); spqr_error_handler (&cc); cholmod_l_free_sparse (&A, &cc); if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (B.p); delete [] reinterpret_cast<SuiteSparse_long *> (B.i); } cholmod_l_finish (&cc); SparseComplexMatrix ret = ccs2cos(X); info = 0; return ret; } template <> template <> OCTAVE_API ComplexMatrix sparse_qr<SparseComplexMatrix>::min2norm_solve<MArray<Complex>, ComplexMatrix> (const SparseComplexMatrix& a, const MArray<Complex>& b, octave_idx_type& info,int order) { info = -1; octave_idx_type b_nc = b.cols (); octave_idx_type nc = a.cols (); ComplexMatrix x (nc, b_nc); cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse A = cos2ccs (a); cholmod_dense B = cod2ccd (b); cholmod_dense *X; X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, &B, &cc); spqr_error_handler (&cc); Complex *vec = x.fortran_vec (); for (volatile octave_idx_type i = 0; i < nc * b_nc; i++) vec[i] = reinterpret_cast<Complex *> (X->x)[i]; if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); } cholmod_l_finish (&cc); info = 0; return x; } template <> template <> OCTAVE_API ComplexMatrix sparse_qr<SparseComplexMatrix>::min2norm_solve<MArray<double>, ComplexMatrix> (const SparseComplexMatrix& a, const MArray<double>& b, octave_idx_type& info, int order) { info = -1; octave_idx_type b_nc = b.cols (); octave_idx_type nc = a.cols (); ComplexMatrix x (nc, b_nc); cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse A = cos2ccs (a); cholmod_dense *B = rod2ccd (b, &cc); cholmod_dense *X; X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, B, &cc); spqr_error_handler (&cc); Complex *vec = x.fortran_vec (); for (volatile octave_idx_type i = 0; i < nc * b_nc; i++) vec[i] = reinterpret_cast<Complex *> (X->x)[i]; if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); } cholmod_l_free_dense (&B, &cc); cholmod_l_finish (&cc); info = 0; return x; } template <> template <> OCTAVE_API SparseComplexMatrix sparse_qr<SparseComplexMatrix>::min2norm_solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseComplexMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info, int order) { info = -1; cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse A = cos2ccs (a); cholmod_sparse B = cos2ccs (b); cholmod_sparse *X; X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, &B, &cc); spqr_error_handler (&cc); if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); delete [] reinterpret_cast<SuiteSparse_long *> (B.p); delete [] reinterpret_cast<SuiteSparse_long *> (B.i); } cholmod_l_finish (&cc); info = 0; return ccs2cos (X); } template <> template <> OCTAVE_API SparseComplexMatrix sparse_qr<SparseComplexMatrix>::min2norm_solve<SparseMatrix, SparseComplexMatrix> (const SparseComplexMatrix& a, const SparseMatrix& b, octave_idx_type& info,int order) { info = -1; cholmod_common cc; cholmod_l_start (&cc); cholmod_sparse A = cos2ccs (a); cholmod_sparse *B = ros2ccs (b, &cc); cholmod_sparse *X; X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, B, &cc); spqr_error_handler (&cc); SparseComplexMatrix ret = ccs2cos(X); if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long)) { delete [] reinterpret_cast<SuiteSparse_long *> (A.p); delete [] reinterpret_cast<SuiteSparse_long *> (A.i); } cholmod_l_free_sparse (&B, &cc); cholmod_l_finish (&cc); info = 0; return ret; } #endif // FIXME: Why is the "order" of the QR calculation as used in the // CXSparse function sqr 3 for real matrices and 2 for complex? These // values seem to be required but there was no explanation in David // Bateman's original code. template <typename SPARSE_T> class cxsparse_defaults { public: enum { order = -1 }; }; template <> class cxsparse_defaults<SparseMatrix> { public: #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) enum { order = SPQR_ORDERING_DEFAULT }; #elif defined (HAVE_CXSPARSE) enum { order = 3 }; #endif }; template <> class cxsparse_defaults<SparseComplexMatrix> { public: #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) enum { order = SPQR_ORDERING_DEFAULT }; #elif defined (HAVE_CXSPARSE) enum { order = 2 }; #endif }; template <typename SPARSE_T> template <typename RHS_T, typename RET_T> RET_T sparse_qr<SPARSE_T>::solve (const SPARSE_T& a, const RHS_T& b, octave_idx_type& info) { #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD)) info = -1; octave_idx_type nr = a.rows (); octave_idx_type nc = a.cols (); octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); int order = cxsparse_defaults<SPARSE_T>::order; if (nr < 0 || nc < 0 || b_nc < 0 || b_nr < 0) (*current_liboctave_error_handler) ("matrix dimension with negative size"); if ( nr != b_nr) (*current_liboctave_error_handler) ("matrix dimension mismatch in solution of minimum norm problem"); info = 0; return min2norm_solve<RHS_T, RET_T> (a, b, info, order); #elif defined (HAVE_CXSPARSE) info = -1; octave_idx_type nr = a.rows (); octave_idx_type nc = a.cols (); octave_idx_type b_nc = b.cols (); octave_idx_type b_nr = b.rows (); int order = cxsparse_defaults<SPARSE_T>::order; if (nr < 0 || nc < 0 || nr != b_nr) (*current_liboctave_error_handler) ("matrix dimension mismatch in solution of minimum norm problem"); if (nr == 0 || nc == 0 || b_nc == 0) { info = 0; return RET_T (nc, b_nc, 0.0); } else if (nr >= nc) { sparse_qr<SPARSE_T> q (a, order); return q.ok () ? q.tall_solve<RHS_T, RET_T> (b, info) : RET_T (); } else { sparse_qr<SPARSE_T> q (a.hermitian (), order); return q.ok () ? q.wide_solve<RHS_T, RET_T> (b, info) : RET_T (); } #else octave_unused_parameter (a); octave_unused_parameter (b); octave_unused_parameter (info); return RET_T (); #endif } //explicit instantiations of static member function solve template OCTAVE_API Matrix sparse_qr<SparseMatrix>::solve<MArray<double>, Matrix> (const SparseMatrix& a, const MArray<double>& b, octave_idx_type& info); template OCTAVE_API SparseMatrix sparse_qr<SparseMatrix>::solve<SparseMatrix, SparseMatrix> (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info); template OCTAVE_API ComplexMatrix sparse_qr<SparseMatrix>::solve<MArray<Complex>, ComplexMatrix> (const SparseMatrix& a, const MArray<Complex>& b, octave_idx_type& info); template OCTAVE_API SparseComplexMatrix sparse_qr<SparseMatrix>::solve<SparseComplexMatrix, SparseComplexMatrix> (const SparseMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info); template OCTAVE_API ComplexMatrix sparse_qr<SparseComplexMatrix>::solve<MArray<Complex>, ComplexMatrix> (const SparseComplexMatrix& a, const MArray<Complex>& b, octave_idx_type& info); template OCTAVE_API SparseComplexMatrix sparse_qr<SparseComplexMatrix>::solve< SparseComplexMatrix, SparseComplexMatrix> (const SparseComplexMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info); template OCTAVE_API ComplexMatrix sparse_qr<SparseComplexMatrix>::solve<MArray<double>, ComplexMatrix> (const SparseComplexMatrix& a, const MArray<double>& b, octave_idx_type& info); template OCTAVE_API SparseComplexMatrix sparse_qr<SparseComplexMatrix>::solve<SparseMatrix, SparseComplexMatrix> (const SparseComplexMatrix& a, const SparseMatrix& b, octave_idx_type& info); //explicit instantiations of member function E_MAT template OCTAVE_API SparseMatrix sparse_qr<SparseMatrix>::E_MAT (void) const; template OCTAVE_API SparseMatrix sparse_qr<SparseComplexMatrix>::E_MAT (void) const; template <typename SPARSE_T> template <typename RHS_T, typename RET_T> RET_T sparse_qr<SPARSE_T>::tall_solve (const RHS_T& b, octave_idx_type& info) const { return m_rep->template tall_solve<RHS_T, RET_T> (b, info); } template <typename SPARSE_T> template <typename RHS_T, typename RET_T> RET_T sparse_qr<SPARSE_T>::wide_solve (const RHS_T& b, octave_idx_type& info) const { return m_rep->template wide_solve<RHS_T, RET_T> (b, info); } // Explicitly instantiate all member functions template OCTAVE_API sparse_qr<SparseMatrix>::sparse_qr (void); template OCTAVE_API sparse_qr<SparseMatrix>::sparse_qr (const SparseMatrix& a, int order); template OCTAVE_API bool sparse_qr<SparseMatrix>::ok (void) const; template OCTAVE_API ColumnVector sparse_qr<SparseMatrix>::E (void) const; template OCTAVE_API SparseMatrix sparse_qr<SparseMatrix>::V (void) const; template OCTAVE_API ColumnVector sparse_qr<SparseMatrix>::Pinv (void) const; template OCTAVE_API ColumnVector sparse_qr<SparseMatrix>::P (void) const; template OCTAVE_API SparseMatrix sparse_qr<SparseMatrix>::R (bool econ) const; template OCTAVE_API Matrix sparse_qr<SparseMatrix>::C (const Matrix& b, bool econ) const; template OCTAVE_API Matrix sparse_qr<SparseMatrix>::Q (bool econ) const; template OCTAVE_API sparse_qr<SparseComplexMatrix>::sparse_qr (void); template OCTAVE_API sparse_qr<SparseComplexMatrix>::sparse_qr (const SparseComplexMatrix& a, int order); template OCTAVE_API bool sparse_qr<SparseComplexMatrix>::ok (void) const; template OCTAVE_API ColumnVector sparse_qr<SparseComplexMatrix>::E (void) const; template OCTAVE_API SparseComplexMatrix sparse_qr<SparseComplexMatrix>::V (void) const; template OCTAVE_API ColumnVector sparse_qr<SparseComplexMatrix>::Pinv (void) const; template OCTAVE_API ColumnVector sparse_qr<SparseComplexMatrix>::P (void) const; template OCTAVE_API SparseComplexMatrix sparse_qr<SparseComplexMatrix>::R (bool econ) const; template OCTAVE_API ComplexMatrix sparse_qr<SparseComplexMatrix>::C (const ComplexMatrix& b, bool econ) const; template OCTAVE_API ComplexMatrix sparse_qr<SparseComplexMatrix>::Q (bool econ) const; Matrix qrsolve (const SparseMatrix& a, const MArray<double>& b, octave_idx_type& info) { return sparse_qr<SparseMatrix>::solve<MArray<double>, Matrix> (a, b, info); } SparseMatrix qrsolve (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info) { return sparse_qr<SparseMatrix>::solve<SparseMatrix, SparseMatrix> (a, b, info); } ComplexMatrix qrsolve (const SparseMatrix& a, const MArray<Complex>& b, octave_idx_type& info) { return sparse_qr<SparseMatrix>::solve<MArray<Complex>, ComplexMatrix> (a, b, info); } SparseComplexMatrix qrsolve (const SparseMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info) { return sparse_qr<SparseMatrix>::solve<SparseComplexMatrix, SparseComplexMatrix> (a, b, info); } ComplexMatrix qrsolve (const SparseComplexMatrix& a, const MArray<double>& b, octave_idx_type& info) { return sparse_qr<SparseComplexMatrix>::solve<MArray<double>, ComplexMatrix> (a, b, info); } SparseComplexMatrix qrsolve (const SparseComplexMatrix& a, const SparseMatrix& b, octave_idx_type& info) { return sparse_qr<SparseComplexMatrix>::solve<SparseMatrix, SparseComplexMatrix> (a, b, info); } ComplexMatrix qrsolve (const SparseComplexMatrix& a, const MArray<Complex>& b, octave_idx_type& info) { return sparse_qr<SparseComplexMatrix>::solve<MArray<Complex>, ComplexMatrix> (a, b, info); } SparseComplexMatrix qrsolve (const SparseComplexMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info) { return sparse_qr<SparseComplexMatrix>::solve<SparseComplexMatrix, SparseComplexMatrix> (a, b, info); } OCTAVE_END_NAMESPACE(math) OCTAVE_END_NAMESPACE(octave)