# HG changeset patch # User John W. Eaton # Date 1470069618 14400 # Node ID e43d83253e28ee13133c931bc043ec1cf36748aa # Parent dd992fd74fce76612b95d37618a035a8abbe6ba6 refill multi-line macro definitions Use the Emacs C++ mode style for line continuation markers in multi-line macro definitions. * make_int.cc, __dsearchn__.cc, __magick_read__.cc, besselj.cc, bitfcns.cc, bsxfun.cc, cellfun.cc, data.cc, defun-dld.h, defun-int.h, defun.h, det.cc, error.h, find.cc, gcd.cc, graphics.cc, interpreter.h, jit-ir.h, jit-typeinfo.h, lookup.cc, ls-mat5.cc, max.cc, mexproto.h, mxarray.in.h, oct-stream.cc, ordschur.cc, pr-output.cc, profiler.h, psi.cc, regexp.cc, sparse-xdiv.cc, sparse-xpow.cc, tril.cc, txt-eng.h, utils.cc, variables.cc, variables.h, xdiv.cc, xpow.cc, __glpk__.cc, ov-base.cc, ov-base.h, ov-cell.cc, ov-ch-mat.cc, ov-classdef.cc, ov-complex.cc, ov-cx-mat.cc, ov-cx-sparse.cc, ov-float.cc, ov-float.h, ov-flt-complex.cc, ov-flt-cx-mat.cc, ov-flt-re-mat.cc, ov-int-traits.h, ov-lazy-idx.h, ov-perm.cc, ov-re-mat.cc, ov-re-sparse.cc, ov-scalar.cc, ov-scalar.h, ov-str-mat.cc, ov-type-conv.h, ov.cc, ov.h, op-class.cc, op-int-conv.cc, op-int.h, op-str-str.cc, ops.h, lex.ll, Array.cc, CMatrix.cc, CSparse.cc, MArray.cc, MArray.h, MDiagArray2.cc, MDiagArray2.h, MSparse.h, Sparse.cc, dMatrix.cc, dSparse.cc, fCMatrix.cc, fMatrix.cc, idx-vector.cc, f77-fcn.h, quit.h, bsxfun-decl.h, bsxfun-defs.cc, lo-specfun.cc, oct-convn.cc, oct-convn.h, oct-norm.cc, oct-norm.h, oct-rand.cc, Sparse-op-decls.h, Sparse-op-defs.h, mx-inlines.cc, mx-op-decl.h, mx-op-defs.h, mach-info.cc, oct-group.cc, oct-passwd.cc, oct-syscalls.cc, oct-time.cc, data-conv.cc, kpse.cc, lo-ieee.h, lo-macros.h, oct-cmplx.h, oct-glob.cc, oct-inttypes.cc, oct-inttypes.h, oct-locbuf.h, oct-sparse.h, url-transfer.cc, oct-conf-post.in.h, shared-fcns.h: Refill macro definitions. diff -r dd992fd74fce -r e43d83253e28 examples/code/make_int.cc --- a/examples/code/make_int.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/examples/code/make_int.cc Mon Aug 01 12:40:18 2016 -0400 @@ -123,12 +123,12 @@ #undef DEFUNOP_OP #endif -#define DEFUNOP_OP(name, t, op) \ - static octave_value \ - CONCAT2(oct_unop_, name) (const octave_base_value& a) \ - { \ - const octave_ ## t& v = dynamic_cast (a); \ - return octave_value (new octave_integer (op v.t ## _value ())); \ +#define DEFUNOP_OP(name, t, op) \ + static octave_value \ + CONCAT2(oct_unop_, name) (const octave_base_value& a) \ + { \ + const octave_ ## t& v = dynamic_cast (a); \ + return octave_value (new octave_integer (op v.t ## _value ())); \ } DEFUNOP_OP (gnot, integer, !) @@ -143,14 +143,15 @@ #undef DEFBINOP_OP #endif -#define DEFBINOP_OP(name, t1, t2, op) \ - static octave_value \ - CONCAT2(oct_binop_, name) (const octave_base_value& a1, const octave_base_value& a2) \ - { \ - const octave_ ## t1& v1 = dynamic_cast (a1); \ - const octave_ ## t2& v2 = dynamic_cast (a2); \ - return octave_value \ - (new octave_integer (v1.t1 ## _value () op v2.t2 ## _value ())); \ +#define DEFBINOP_OP(name, t1, t2, op) \ + static octave_value \ + CONCAT2(oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ + const octave_ ## t1& v1 = dynamic_cast (a1); \ + const octave_ ## t2& v2 = dynamic_cast (a2); \ + return octave_value \ + (new octave_integer (v1.t1 ## _value () op v2.t2 ## _value ())); \ } // integer by integer ops. diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/__dsearchn__.cc --- a/libinterp/corefcn/__dsearchn__.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/__dsearchn__.cc Mon Aug 01 12:40:18 2016 -0400 @@ -58,13 +58,13 @@ ColumnVector dist (nxi); double *pdist = dist.fortran_vec (); -#define DIST(dd, y, yi, m) \ - dd = 0.0; \ - for (octave_idx_type k = 0; k < m; k++) \ - { \ - double yd = y[k] - yi[k]; \ - dd += yd * yd; \ - } \ +#define DIST(dd, y, yi, m) \ + dd = 0.0; \ + for (octave_idx_type k = 0; k < m; k++) \ + { \ + double yd = y[k] - yi[k]; \ + dd += yd * yd; \ + } \ dd = sqrt (dd) const double *pxi = xi.fortran_vec (); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/__magick_read__.cc --- a/libinterp/corefcn/__magick_read__.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/__magick_read__.cc Mon Aug 01 12:40:18 2016 -0400 @@ -1527,19 +1527,27 @@ imvec[0].animationIterations (options.getfield ("loopcount").uint_value ()); const std::string compression = options.getfield ("compression").string_value (); -#define COMPRESS_MAGICK_IMAGE_VECTOR(COMPRESSION_STRING, GM_TYPE) \ - if (compression == COMPRESSION_STRING) \ - for (std::vector::size_type i = 0; i < imvec.size (); i++) \ - imvec[i].compressType (GM_TYPE); + +#define COMPRESS_MAGICK_IMAGE_VECTOR(GM_TYPE) \ + for (std::vector::size_type i = 0; i < imvec.size (); i++) \ + imvec[i].compressType (GM_TYPE) - COMPRESS_MAGICK_IMAGE_VECTOR("none", Magick::NoCompression) - else COMPRESS_MAGICK_IMAGE_VECTOR("bzip", Magick::BZipCompression) - else COMPRESS_MAGICK_IMAGE_VECTOR("fax3", Magick::FaxCompression) - else COMPRESS_MAGICK_IMAGE_VECTOR("fax4", Magick::Group4Compression) - else COMPRESS_MAGICK_IMAGE_VECTOR("jpeg", Magick::JPEGCompression) - else COMPRESS_MAGICK_IMAGE_VECTOR("lzw", Magick::LZWCompression) - else COMPRESS_MAGICK_IMAGE_VECTOR("rle", Magick::RLECompression) - else COMPRESS_MAGICK_IMAGE_VECTOR("deflate", Magick::ZipCompression) + if (compression == "none") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::NoCompression); + else if (compression == "bzip") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::BZipCompression); + else if (compression == "fax3") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::FaxCompression); + else if (compression == "fax4") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::Group4Compression); + else if (compression == "jpeg") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::JPEGCompression); + else if (compression == "lzw") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::LZWCompression); + else if (compression == "rle") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::RLECompression); + else if (compression == "deflate") + COMPRESS_MAGICK_IMAGE_VECTOR (Magick::ZipCompression); #undef COMPRESS_MAGICK_IMAGE_VECTOR diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/besselj.cc --- a/libinterp/corefcn/besselj.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/besselj.cc Mon Aug 01 12:40:18 2016 -0400 @@ -42,39 +42,39 @@ BESSEL_H2 }; -#define DO_BESSEL(type, alpha, x, scaled, ierr, result) \ - do \ - { \ - switch (type) \ - { \ - case BESSEL_J: \ - result = octave::math::besselj (alpha, x, scaled, ierr); \ - break; \ - \ - case BESSEL_Y: \ - result = octave::math::bessely (alpha, x, scaled, ierr); \ - break; \ - \ - case BESSEL_I: \ - result = octave::math::besseli (alpha, x, scaled, ierr); \ - break; \ - \ - case BESSEL_K: \ - result = octave::math::besselk (alpha, x, scaled, ierr); \ - break; \ - \ - case BESSEL_H1: \ - result = octave::math::besselh1 (alpha, x, scaled, ierr); \ - break; \ - \ - case BESSEL_H2: \ - result = octave::math::besselh2 (alpha, x, scaled, ierr); \ - break; \ - \ - default: \ - break; \ - } \ - } \ +#define DO_BESSEL(type, alpha, x, scaled, ierr, result) \ + do \ + { \ + switch (type) \ + { \ + case BESSEL_J: \ + result = octave::math::besselj (alpha, x, scaled, ierr); \ + break; \ + \ + case BESSEL_Y: \ + result = octave::math::bessely (alpha, x, scaled, ierr); \ + break; \ + \ + case BESSEL_I: \ + result = octave::math::besseli (alpha, x, scaled, ierr); \ + break; \ + \ + case BESSEL_K: \ + result = octave::math::besselk (alpha, x, scaled, ierr); \ + break; \ + \ + case BESSEL_H1: \ + result = octave::math::besselh1 (alpha, x, scaled, ierr); \ + break; \ + \ + case BESSEL_H2: \ + result = octave::math::besselh2 (alpha, x, scaled, ierr); \ + break; \ + \ + default: \ + break; \ + } \ + } \ while (0) octave_value_list diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/bitfcns.cc --- a/libinterp/corefcn/bitfcns.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/bitfcns.cc Mon Aug 01 12:40:18 2016 -0400 @@ -458,74 +458,75 @@ // bits than in the type, so we need to test for the size of the // shift. -#define DO_BITSHIFT(T) \ - double d1, d2; \ - \ - if (! n.all_integers (d1, d2)) \ - error ("bitshift: K must be a scalar or array of integers"); \ - \ - int m_nel = m.numel (); \ - int n_nel = n.numel (); \ - \ - bool is_scalar_op = (m_nel == 1 || n_nel == 1); \ - \ - dim_vector m_dv = m.dims (); \ - dim_vector n_dv = n.dims (); \ - \ - bool is_array_op = (m_dv == n_dv); \ - \ - if (! is_array_op && ! is_scalar_op) \ +#define DO_BITSHIFT(T) \ + double d1, d2; \ + \ + if (! n.all_integers (d1, d2)) \ + error ("bitshift: K must be a scalar or array of integers"); \ + \ + int m_nel = m.numel (); \ + int n_nel = n.numel (); \ + \ + bool is_scalar_op = (m_nel == 1 || n_nel == 1); \ + \ + dim_vector m_dv = m.dims (); \ + dim_vector n_dv = n.dims (); \ + \ + bool is_array_op = (m_dv == n_dv); \ + \ + if (! is_array_op && ! is_scalar_op) \ error ("bitshift: size of A and N must match, or one operand must be a scalar"); \ - \ - T ## NDArray result; \ - \ - if (m_nel != 1) \ - result.resize (m_dv); \ - else \ - result.resize (n_dv); \ - \ - for (int i = 0; i < m_nel; i++) \ - if (is_scalar_op) \ - for (int k = 0; k < n_nel; k++) \ - if (static_cast (n(k)) >= bits_in_type) \ - result(i+k) = 0; \ - else \ + \ + T ## NDArray result; \ + \ + if (m_nel != 1) \ + result.resize (m_dv); \ + else \ + result.resize (n_dv); \ + \ + for (int i = 0; i < m_nel; i++) \ + if (is_scalar_op) \ + for (int k = 0; k < n_nel; k++) \ + if (static_cast (n(k)) >= bits_in_type) \ + result(i+k) = 0; \ + else \ result(i+k) = bitshift (m(i), static_cast (n(k)), mask); \ - else \ - if (static_cast (n(i)) >= bits_in_type) \ - result(i) = 0; \ - else \ - result(i) = bitshift (m(i), static_cast (n(i)), mask); \ - \ + else \ + if (static_cast (n(i)) >= bits_in_type) \ + result(i) = 0; \ + else \ + result(i) = bitshift (m(i), static_cast (n(i)), mask); \ + \ retval = result; -#define DO_UBITSHIFT(T, N) \ - do \ - { \ - int bits_in_type = octave_ ## T :: nbits (); \ - T ## NDArray m = m_arg.T ## _array_value (); \ - octave_ ## T mask = octave_ ## T::max (); \ - if ((N) < bits_in_type) \ - mask = bitshift (mask, (N) - bits_in_type); \ - else if ((N) < 1) \ - mask = 0; \ - DO_BITSHIFT (T); \ - } \ +#define DO_UBITSHIFT(T, N) \ + do \ + { \ + int bits_in_type = octave_ ## T :: nbits (); \ + T ## NDArray m = m_arg.T ## _array_value (); \ + octave_ ## T mask = octave_ ## T::max (); \ + if ((N) < bits_in_type) \ + mask = bitshift (mask, (N) - bits_in_type); \ + else if ((N) < 1) \ + mask = 0; \ + DO_BITSHIFT (T); \ + } \ while (0) -#define DO_SBITSHIFT(T, N) \ - do \ - { \ - int bits_in_type = octave_ ## T :: nbits (); \ - T ## NDArray m = m_arg.T ## _array_value (); \ - octave_ ## T mask = octave_ ## T::max (); \ - if ((N) < bits_in_type) \ - mask = bitshift (mask, (N) - bits_in_type); \ - else if ((N) < 1) \ - mask = 0; \ - mask = mask | octave_ ## T :: min (); /* FIXME: 2's complement only? */ \ - DO_BITSHIFT (T); \ - } \ +#define DO_SBITSHIFT(T, N) \ + do \ + { \ + int bits_in_type = octave_ ## T :: nbits (); \ + T ## NDArray m = m_arg.T ## _array_value (); \ + octave_ ## T mask = octave_ ## T::max (); \ + if ((N) < bits_in_type) \ + mask = bitshift (mask, (N) - bits_in_type); \ + else if ((N) < 1) \ + mask = 0; \ + /* FIXME: 2's complement only? */ \ + mask = mask | octave_ ## T :: min (); \ + DO_BITSHIFT (T); \ + } \ while (0) DEFUN (bitshift, args, , diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/bsxfun.cc --- a/libinterp/corefcn/bsxfun.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/bsxfun.cc Mon Aug 01 12:40:18 2016 -0400 @@ -133,24 +133,24 @@ if (filled) return; -#define REGISTER_OP_HANDLER(OP, BTYP, NDA, FUNOP) \ +#define REGISTER_OP_HANDLER(OP, BTYP, NDA, FUNOP) \ bsxfun_handler_table[OP][BTYP] = bsxfun_forward_op -#define REGISTER_REL_HANDLER(REL, BTYP, NDA, FUNREL) \ +#define REGISTER_REL_HANDLER(REL, BTYP, NDA, FUNREL) \ bsxfun_handler_table[REL][BTYP] = bsxfun_forward_rel -#define REGISTER_STD_HANDLERS(BTYP, NDA) \ - REGISTER_OP_HANDLER (bsxfun_builtin_plus, BTYP, NDA, bsxfun_add); \ - REGISTER_OP_HANDLER (bsxfun_builtin_minus, BTYP, NDA, bsxfun_sub); \ - REGISTER_OP_HANDLER (bsxfun_builtin_times, BTYP, NDA, bsxfun_mul); \ - REGISTER_OP_HANDLER (bsxfun_builtin_divide, BTYP, NDA, bsxfun_div); \ - REGISTER_OP_HANDLER (bsxfun_builtin_max, BTYP, NDA, bsxfun_max); \ - REGISTER_OP_HANDLER (bsxfun_builtin_min, BTYP, NDA, bsxfun_min); \ - REGISTER_REL_HANDLER (bsxfun_builtin_eq, BTYP, NDA, bsxfun_eq); \ - REGISTER_REL_HANDLER (bsxfun_builtin_ne, BTYP, NDA, bsxfun_ne); \ - REGISTER_REL_HANDLER (bsxfun_builtin_lt, BTYP, NDA, bsxfun_lt); \ - REGISTER_REL_HANDLER (bsxfun_builtin_le, BTYP, NDA, bsxfun_le); \ - REGISTER_REL_HANDLER (bsxfun_builtin_gt, BTYP, NDA, bsxfun_gt); \ +#define REGISTER_STD_HANDLERS(BTYP, NDA) \ + REGISTER_OP_HANDLER (bsxfun_builtin_plus, BTYP, NDA, bsxfun_add); \ + REGISTER_OP_HANDLER (bsxfun_builtin_minus, BTYP, NDA, bsxfun_sub); \ + REGISTER_OP_HANDLER (bsxfun_builtin_times, BTYP, NDA, bsxfun_mul); \ + REGISTER_OP_HANDLER (bsxfun_builtin_divide, BTYP, NDA, bsxfun_div); \ + REGISTER_OP_HANDLER (bsxfun_builtin_max, BTYP, NDA, bsxfun_max); \ + REGISTER_OP_HANDLER (bsxfun_builtin_min, BTYP, NDA, bsxfun_min); \ + REGISTER_REL_HANDLER (bsxfun_builtin_eq, BTYP, NDA, bsxfun_eq); \ + REGISTER_REL_HANDLER (bsxfun_builtin_ne, BTYP, NDA, bsxfun_ne); \ + REGISTER_REL_HANDLER (bsxfun_builtin_lt, BTYP, NDA, bsxfun_lt); \ + REGISTER_REL_HANDLER (bsxfun_builtin_le, BTYP, NDA, bsxfun_le); \ + REGISTER_REL_HANDLER (bsxfun_builtin_gt, BTYP, NDA, bsxfun_gt); \ REGISTER_REL_HANDLER (bsxfun_builtin_ge, BTYP, NDA, bsxfun_ge) REGISTER_STD_HANDLERS (btyp_double, NDArray); @@ -420,9 +420,9 @@ for (octave_idx_type i = 1; i < nd; i++) ncount *= dvc(i); -#define BSXDEF(T) \ - T result_ ## T; \ - bool have_ ## T = false; +#define BSXDEF(T) \ + T result_ ## T; \ + bool have_ ## T = false; BSXDEF(NDArray); BSXDEF(ComplexNDArray); @@ -456,13 +456,13 @@ octave_value_list tmp = func.do_multi_index_op (1, inputs); -#define BSXINIT(T, CLS, EXTRACTOR) \ - (result_type == CLS) \ - { \ - have_ ## T = true; \ - result_ ## T = tmp(0). EXTRACTOR ## _array_value (); \ - result_ ## T .resize (dvc); \ - } +#define BSXINIT(T, CLS, EXTRACTOR) \ + (result_type == CLS) \ + { \ + have_ ## T = true; \ + result_ ## T = tmp(0). EXTRACTOR ## _array_value (); \ + result_ ## T .resize (dvc); \ + } if (i == 0) { @@ -602,18 +602,18 @@ } } -#define BSXLOOP(T, CLS, EXTRACTOR) \ - (have_ ## T) \ - { \ - if (tmp(0).class_name () != CLS) \ - { \ - have_ ## T = false; \ - C = result_ ## T; \ - C = do_cat_op (C, tmp(0), ra_idx); \ - } \ - else \ - result_ ## T .insert (tmp(0). EXTRACTOR ## _array_value (), ra_idx); \ - } +#define BSXLOOP(T, CLS, EXTRACTOR) \ + (have_ ## T) \ + { \ + if (tmp(0).class_name () != CLS) \ + { \ + have_ ## T = false; \ + C = result_ ## T; \ + C = do_cat_op (C, tmp(0), ra_idx); \ + } \ + else \ + result_ ## T .insert (tmp(0). EXTRACTOR ## _array_value (), ra_idx); \ + } else if BSXLOOP(ComplexNDArray, "double", complex) else if BSXLOOP(boolNDArray, "logical", bool) @@ -630,9 +630,9 @@ } } -#define BSXEND(T) \ - (have_ ## T) \ - retval(0) = result_ ## T; +#define BSXEND(T) \ + (have_ ## T) \ + retval(0) = result_ ## T; if BSXEND(NDArray) else if BSXEND(ComplexNDArray) diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/cellfun.cc --- a/libinterp/corefcn/cellfun.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/cellfun.cc Mon Aug 01 12:40:18 2016 -0400 @@ -2139,10 +2139,10 @@ } break; -#define BTYP_BRANCH(X, Y) \ - case btyp_ ## X: \ - retval = do_mat2cell (a.Y ## _value (), d, nargin - 1); \ - break +#define BTYP_BRANCH(X, Y) \ + case btyp_ ## X: \ + retval = do_mat2cell (a.Y ## _value (), d, nargin - 1); \ + break BTYP_BRANCH (float, float_array); BTYP_BRANCH (float_complex, float_complex_array); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/data.cc --- a/libinterp/corefcn/data.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/data.cc Mon Aug 01 12:40:18 2016 -0400 @@ -627,14 +627,14 @@ switch (btyp0) { -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - { \ - X##NDArray a0 = args(0).X##_array_value (); \ - X##NDArray a1 = args(1).X##_array_value (); \ - retval = binmap (a0, a1, rem, "rem"); \ - } \ - break +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + { \ + X##NDArray a0 = args(0).X##_array_value (); \ + X##NDArray a1 = args(1).X##_array_value (); \ + retval = binmap (a0, a1, rem, "rem"); \ + } \ + break MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -808,14 +808,14 @@ switch (btyp0) { -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - { \ - X##NDArray a0 = args(0).X##_array_value (); \ - X##NDArray a1 = args(1).X##_array_value (); \ - retval = binmap (a0, a1, mod, "mod"); \ - } \ - break +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + { \ + X##NDArray a0 = args(0).X##_array_value (); \ + X##NDArray a1 = args(1).X##_array_value (); \ + retval = binmap (a0, a1, mod, "mod"); \ + } \ + break MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -918,218 +918,218 @@ // Checked 1/23/2016. They should probably be removed for clarity. // FIXME: Need to convert reduction functions of this file for single precision -#define NATIVE_REDUCTION_1(FCN, TYPE, DIM) \ - (arg.is_ ## TYPE ## _type ()) \ - { \ - TYPE ## NDArray tmp = arg. TYPE ##_array_value (); \ - \ - retval = tmp.FCN (DIM); \ - } - -#define NATIVE_REDUCTION(FCN, BOOL_FCN) \ - \ - int nargin = args.length (); \ - \ - bool isnative = false; \ - bool isdouble = false; \ - \ - if (nargin > 1 && args(nargin - 1).is_string ()) \ - { \ - std::string str = args(nargin - 1).string_value (); \ - \ - if (str == "native") \ - isnative = true; \ - else if (str == "double") \ - isdouble = true; \ - else \ - error ("sum: unrecognized string argument"); \ - \ - nargin--; \ - } \ - \ - if (nargin < 1 || nargin > 2) \ - print_usage (); \ - \ - octave_value retval; \ - \ - octave_value arg = args(0); \ - \ - int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \ - \ - if (dim < -1) \ - error (#FCN ": invalid dimension argument = %d", dim + 1); \ - \ - if (arg.is_sparse_type ()) \ - { \ - if (arg.is_real_type ()) \ - { \ - SparseMatrix tmp = arg.sparse_matrix_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else \ - { \ - SparseComplexMatrix tmp \ - = arg.sparse_complex_matrix_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - } \ - else \ - { \ - if (isnative) \ - { \ - if NATIVE_REDUCTION_1 (FCN, uint8, dim) \ - else if NATIVE_REDUCTION_1 (FCN, uint16, dim) \ - else if NATIVE_REDUCTION_1 (FCN, uint32, dim) \ - else if NATIVE_REDUCTION_1 (FCN, uint64, dim) \ - else if NATIVE_REDUCTION_1 (FCN, int8, dim) \ - else if NATIVE_REDUCTION_1 (FCN, int16, dim) \ - else if NATIVE_REDUCTION_1 (FCN, int32, dim) \ - else if NATIVE_REDUCTION_1 (FCN, int64, dim) \ - else if (arg.is_bool_type ()) \ - { \ - boolNDArray tmp = arg.bool_array_value (); \ -\ - retval = boolNDArray (tmp.BOOL_FCN (dim)); \ - } \ - else if (arg.is_char_matrix ()) \ - { \ - error (#FCN, ": invalid char type"); \ - } \ - else if (! isdouble && arg.is_single_type ()) \ - { \ - if (arg.is_complex_type ()) \ - { \ - FloatComplexNDArray tmp = \ - arg.float_complex_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (arg.is_real_type ()) \ - { \ - FloatNDArray tmp = arg.float_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - } \ - else if (arg.is_complex_type ()) \ - { \ - ComplexNDArray tmp = arg.complex_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (arg.is_real_type ()) \ - { \ - NDArray tmp = arg.array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else \ - err_wrong_type_arg (#FCN, arg); \ - } \ - else if (arg.is_bool_type ()) \ - { \ - boolNDArray tmp = arg.bool_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (! isdouble && arg.is_single_type ()) \ - { \ - if (arg.is_real_type ()) \ - { \ - FloatNDArray tmp = arg.float_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (arg.is_complex_type ()) \ - { \ - FloatComplexNDArray tmp = \ - arg.float_complex_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - } \ - else if (arg.is_real_type ()) \ - { \ - NDArray tmp = arg.array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (arg.is_complex_type ()) \ - { \ - ComplexNDArray tmp = arg.complex_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else \ - err_wrong_type_arg (#FCN, arg); \ - } \ - \ +#define NATIVE_REDUCTION_1(FCN, TYPE, DIM) \ + (arg.is_ ## TYPE ## _type ()) \ + { \ + TYPE ## NDArray tmp = arg. TYPE ##_array_value (); \ + \ + retval = tmp.FCN (DIM); \ + } + +#define NATIVE_REDUCTION(FCN, BOOL_FCN) \ + \ + int nargin = args.length (); \ + \ + bool isnative = false; \ + bool isdouble = false; \ + \ + if (nargin > 1 && args(nargin - 1).is_string ()) \ + { \ + std::string str = args(nargin - 1).string_value (); \ + \ + if (str == "native") \ + isnative = true; \ + else if (str == "double") \ + isdouble = true; \ + else \ + error ("sum: unrecognized string argument"); \ + \ + nargin--; \ + } \ + \ + if (nargin < 1 || nargin > 2) \ + print_usage (); \ + \ + octave_value retval; \ + \ + octave_value arg = args(0); \ + \ + int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \ + \ + if (dim < -1) \ + error (#FCN ": invalid dimension argument = %d", dim + 1); \ + \ + if (arg.is_sparse_type ()) \ + { \ + if (arg.is_real_type ()) \ + { \ + SparseMatrix tmp = arg.sparse_matrix_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else \ + { \ + SparseComplexMatrix tmp \ + = arg.sparse_complex_matrix_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + } \ + else \ + { \ + if (isnative) \ + { \ + if NATIVE_REDUCTION_1 (FCN, uint8, dim) \ + else if NATIVE_REDUCTION_1 (FCN, uint16, dim) \ + else if NATIVE_REDUCTION_1 (FCN, uint32, dim) \ + else if NATIVE_REDUCTION_1 (FCN, uint64, dim) \ + else if NATIVE_REDUCTION_1 (FCN, int8, dim) \ + else if NATIVE_REDUCTION_1 (FCN, int16, dim) \ + else if NATIVE_REDUCTION_1 (FCN, int32, dim) \ + else if NATIVE_REDUCTION_1 (FCN, int64, dim) \ + else if (arg.is_bool_type ()) \ + { \ + boolNDArray tmp = arg.bool_array_value (); \ + \ + retval = boolNDArray (tmp.BOOL_FCN (dim)); \ + } \ + else if (arg.is_char_matrix ()) \ + { \ + error (#FCN, ": invalid char type"); \ + } \ + else if (! isdouble && arg.is_single_type ()) \ + { \ + if (arg.is_complex_type ()) \ + { \ + FloatComplexNDArray tmp = \ + arg.float_complex_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (arg.is_real_type ()) \ + { \ + FloatNDArray tmp = arg.float_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + } \ + else if (arg.is_complex_type ()) \ + { \ + ComplexNDArray tmp = arg.complex_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (arg.is_real_type ()) \ + { \ + NDArray tmp = arg.array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else \ + err_wrong_type_arg (#FCN, arg); \ + } \ + else if (arg.is_bool_type ()) \ + { \ + boolNDArray tmp = arg.bool_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (! isdouble && arg.is_single_type ()) \ + { \ + if (arg.is_real_type ()) \ + { \ + FloatNDArray tmp = arg.float_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (arg.is_complex_type ()) \ + { \ + FloatComplexNDArray tmp = \ + arg.float_complex_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + } \ + else if (arg.is_real_type ()) \ + { \ + NDArray tmp = arg.array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (arg.is_complex_type ()) \ + { \ + ComplexNDArray tmp = arg.complex_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else \ + err_wrong_type_arg (#FCN, arg); \ + } \ + \ return retval -#define DATA_REDUCTION(FCN) \ - \ - int nargin = args.length (); \ - \ - if (nargin < 1 || nargin > 2) \ - print_usage (); \ - \ - octave_value retval; \ - \ - octave_value arg = args(0); \ - \ - int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \ - \ - if (dim < -1) \ - error (#FCN ": invalid dimension argument = %d", dim + 1); \ - \ - if (arg.is_real_type ()) \ - { \ - if (arg.is_sparse_type ()) \ - { \ - SparseMatrix tmp = arg.sparse_matrix_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (arg.is_single_type ()) \ - { \ - FloatNDArray tmp = arg.float_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else \ - { \ - NDArray tmp = arg.array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - } \ - else if (arg.is_complex_type ()) \ - { \ - if (arg.is_sparse_type ()) \ - { \ +#define DATA_REDUCTION(FCN) \ + \ + int nargin = args.length (); \ + \ + if (nargin < 1 || nargin > 2) \ + print_usage (); \ + \ + octave_value retval; \ + \ + octave_value arg = args(0); \ + \ + int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \ + \ + if (dim < -1) \ + error (#FCN ": invalid dimension argument = %d", dim + 1); \ + \ + if (arg.is_real_type ()) \ + { \ + if (arg.is_sparse_type ()) \ + { \ + SparseMatrix tmp = arg.sparse_matrix_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (arg.is_single_type ()) \ + { \ + FloatNDArray tmp = arg.float_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else \ + { \ + NDArray tmp = arg.array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + } \ + else if (arg.is_complex_type ()) \ + { \ + if (arg.is_sparse_type ()) \ + { \ SparseComplexMatrix tmp = arg.sparse_complex_matrix_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else if (arg.is_single_type ()) \ - { \ - FloatComplexNDArray tmp \ - = arg.float_complex_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - else \ - { \ - ComplexNDArray tmp = arg.complex_array_value (); \ -\ - retval = tmp.FCN (dim); \ - } \ - } \ - else \ - err_wrong_type_arg (#FCN, arg); \ - \ + \ + retval = tmp.FCN (dim); \ + } \ + else if (arg.is_single_type ()) \ + { \ + FloatComplexNDArray tmp \ + = arg.float_complex_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + else \ + { \ + ComplexNDArray tmp = arg.complex_array_value (); \ + \ + retval = tmp.FCN (dim); \ + } \ + } \ + else \ + err_wrong_type_arg (#FCN, arg); \ + \ return retval DEFUN (cumprod, args, , @@ -1261,13 +1261,13 @@ retval = arg.float_complex_array_value ().cumsum (dim); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - if (isnative) \ - retval = arg.X ## _array_value ().cumsum (dim); \ - else \ - retval = arg.array_value ().cumsum (dim); \ - break; +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + if (isnative) \ + retval = arg.X ## _array_value ().cumsum (dim); \ + else \ + retval = arg.array_value ().cumsum (dim); \ + break; MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -1548,13 +1548,13 @@ retval = arg.float_complex_array_value ().prod (dim); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - if (isnative) \ - retval = arg.X ## _array_value ().prod (dim); \ - else \ - retval = arg.array_value ().prod (dim); \ - break; +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + if (isnative) \ + retval = arg.X ## _array_value ().prod (dim); \ + else \ + retval = arg.array_value ().prod (dim); \ + break; MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -3061,13 +3061,13 @@ retval = arg.float_complex_array_value ().sum (dim); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - if (isnative) \ - retval = arg.X ## _array_value ().sum (dim); \ - else \ - retval = arg.X ## _array_value ().dsum (dim); \ - break; +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + if (isnative) \ + retval = arg.X ## _array_value ().sum (dim); \ + else \ + retval = arg.X ## _array_value ().dsum (dim); \ + break; MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -4878,7 +4878,7 @@ return retval; } -#define INSTANTIATE_EYE(T) \ +#define INSTANTIATE_EYE(T) \ template octave_value identity_matrix (int, int) INSTANTIATE_EYE (int8NDArray); @@ -6897,10 +6897,11 @@ case btyp_float_complex: retval = argx.float_complex_array_value ().nth_element (n, dim); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = argx.X ## _array_value ().nth_element (n, dim); \ - break; + +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = argx.X ## _array_value ().nth_element (n, dim); \ + break; MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -6911,7 +6912,9 @@ MAKE_INT_BRANCH (uint32); MAKE_INT_BRANCH (uint64); MAKE_INT_BRANCH (bool); + #undef MAKE_INT_BRANCH + default: if (argx.is_cellstr ()) retval = argx.cellstr_value ().nth_element (n, dim); @@ -7090,10 +7093,10 @@ zero.float_complex_value ()); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = do_accumarray_minmax (idx, vals.X ## _array_value (), \ - n, ismin, zero.X ## _scalar_value ()); \ +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = do_accumarray_minmax (idx, vals.X ## _array_value (), \ + n, ismin, zero.X ## _scalar_value ()); \ break; MAKE_INT_BRANCH (int8); @@ -7289,12 +7292,12 @@ return retval; } -#define MAKE_INT_BRANCH(INTX) \ +#define MAKE_INT_BRANCH(INTX) \ else if (tval.is_ ## INTX ## _type () && fval.is_ ## INTX ## _type ()) \ - { \ - retval = do_merge (mask, \ - tval.INTX ## _array_value (), \ - fval.INTX ## _array_value ()); \ + { \ + retval = do_merge (mask, \ + tval.INTX ## _array_value (), \ + fval.INTX ## _array_value ()); \ } DEFUN (merge, args, , @@ -7688,10 +7691,10 @@ switch (x.builtin_type ()) { -#define BTYP_BRANCH(X, EX) \ - case btyp_ ## X: \ - retval = do_repelems (x.EX ## _value (), r); \ - break; +#define BTYP_BRANCH(X, EX) \ + case btyp_ ## X: \ + retval = do_repelems (x.EX ## _value (), r); \ + break; BTYP_BRANCH (double, array); BTYP_BRANCH (float, float_array); @@ -7711,6 +7714,7 @@ BTYP_BRANCH (cell, cell); //BTYP_BRANCH (struct, map);//FIXME + #undef BTYP_BRANCH default: @@ -7742,18 +7746,18 @@ if (args(0).is_integer_type ()) { -#define MAKE_INT_BRANCH(X) \ - if (args(0).is_ ## X ## _type ()) \ - { \ - const X##NDArray in = args(0). X## _array_value (); \ +#define MAKE_INT_BRANCH(X) \ + if (args(0).is_ ## X ## _type ()) \ + { \ + const X##NDArray in = args(0). X## _array_value (); \ size_t inlen = in.numel () * sizeof (X## _t) / sizeof (char); \ const char* inc = reinterpret_cast (in.data ()); \ - char* out; \ - if (octave_base64_encode (inc, inlen, &out)) \ - { \ - retval(0) = octave_value (out); \ - ::free (out); \ - } \ + char* out; \ + if (octave_base64_encode (inc, inlen, &out)) \ + { \ + retval(0) = octave_value (out); \ + ::free (out); \ + } \ } MAKE_INT_BRANCH(int8) @@ -7764,6 +7768,7 @@ else MAKE_INT_BRANCH(uint16) else MAKE_INT_BRANCH(uint32) else MAKE_INT_BRANCH(uint64) + #undef MAKE_INT_BRANCH else diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/defun-dld.h --- a/libinterp/corefcn/defun-dld.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/defun-dld.h Mon Aug 01 12:40:18 2016 -0400 @@ -42,14 +42,14 @@ // // The DECLARE_FUN is for the definition of the function. -#define DEFUN_DLD(name, args_name, nargout_name, doc) \ - FORWARD_DECLARE_FUN (name); \ - DEFINE_FUN_INSTALLER_FUN (name, doc) \ +#define DEFUN_DLD(name, args_name, nargout_name, doc) \ + FORWARD_DECLARE_FUN (name); \ + DEFINE_FUN_INSTALLER_FUN (name, doc) \ DECLARE_FUN (name, args_name, nargout_name) -#define DEFUNX_DLD(name, fname, gname, args_name, nargout_name, doc) \ - FORWARD_DECLARE_FUNX (fname); \ - DEFINE_FUNX_INSTALLER_FUN (name, fname, gname, doc) \ +#define DEFUNX_DLD(name, fname, gname, args_name, nargout_name, doc) \ + FORWARD_DECLARE_FUNX (fname); \ + DEFINE_FUNX_INSTALLER_FUN (name, fname, gname, doc) \ DECLARE_FUNX (fname, args_name, nargout_name) #endif diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/defun-int.h --- a/libinterp/corefcn/defun-int.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/defun-int.h Mon Aug 01 12:40:18 2016 -0400 @@ -80,18 +80,18 @@ extern OCTINTERP_API void defun_isargout (int, int, bool *); -#define FORWARD_DECLARE_FUNX(name) \ - extern OCTAVE_EXPORT octave_value_list \ +#define FORWARD_DECLARE_FUNX(name) \ + extern OCTAVE_EXPORT octave_value_list \ name (const octave_value_list&, int) -#define FORWARD_DECLARE_FUN(name) \ +#define FORWARD_DECLARE_FUN(name) \ FORWARD_DECLARE_FUNX (F ## name) -#define DECLARE_FUNX(name, args_name, nargout_name) \ - OCTAVE_EXPORT octave_value_list \ +#define DECLARE_FUNX(name, args_name, nargout_name) \ + OCTAVE_EXPORT octave_value_list \ name (const octave_value_list& args_name, int nargout_name) -#define DECLARE_FUN(name, args_name, nargout_name) \ +#define DECLARE_FUN(name, args_name, nargout_name) \ DECLARE_FUNX (F ## name, args_name, nargout_name) // Define the code that will be used to insert the new function into @@ -104,27 +104,28 @@ (*octave_dld_fcn_getter) (const octave::dynamic_library&, bool relative); #if defined (OCTAVE_SOURCE) -# define DEFINE_FUN_INSTALLER_FUN(name, doc) \ - DEFINE_FUNX_INSTALLER_FUN(#name, F ## name, G ## name, "external-doc") +# define DEFINE_FUN_INSTALLER_FUN(name, doc) \ + DEFINE_FUNX_INSTALLER_FUN(#name, F ## name, G ## name, "external-doc") #else -# define DEFINE_FUN_INSTALLER_FUN(name, doc) \ - DEFINE_FUNX_INSTALLER_FUN(#name, F ## name, G ## name, doc) +# define DEFINE_FUN_INSTALLER_FUN(name, doc) \ + DEFINE_FUNX_INSTALLER_FUN(#name, F ## name, G ## name, doc) #endif -#define DEFINE_FUNX_INSTALLER_FUN(name, fname, gname, doc) \ - extern "C" \ - OCTAVE_EXPORT \ - octave_function * \ - gname (const octave::dynamic_library& shl, bool relative) \ - { \ - check_version (OCTAVE_API_VERSION, name); \ - \ - octave_dld_function *fcn = octave_dld_function::create (fname, shl, name, doc); \ - \ - if (relative) \ - fcn->mark_relative (); \ - \ - return fcn; \ +#define DEFINE_FUNX_INSTALLER_FUN(name, fname, gname, doc) \ + extern "C" \ + OCTAVE_EXPORT \ + octave_function * \ + gname (const octave::dynamic_library& shl, bool relative) \ + { \ + check_version (OCTAVE_API_VERSION, name); \ + \ + octave_dld_function *fcn \ + = octave_dld_function::create (fname, shl, name, doc); \ + \ + if (relative) \ + fcn->mark_relative (); \ + \ + return fcn; \ } #endif diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/defun.h --- a/libinterp/corefcn/defun.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/defun.h Mon Aug 01 12:40:18 2016 -0400 @@ -43,7 +43,7 @@ // // doc is the simple help text for the function. -#define DEFUN(name, args_name, nargout_name, doc) \ +#define DEFUN(name, args_name, nargout_name, doc) \ DECLARE_FUN (name, args_name, nargout_name) // This one can be used when 'name' cannot be used directly (if it is @@ -51,7 +51,7 @@ // quoted string, and the internal name of the function must be passed // too (the convention is to use a prefix of "F", so "foo" becomes "Ffoo"). -#define DEFUNX(name, fname, args_name, nargout_name, doc) \ +#define DEFUNX(name, fname, args_name, nargout_name, doc) \ DECLARE_FUNX (fname, args_name, nargout_name) // This is a function with a name that can't be hidden by a variable. diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/det.cc --- a/libinterp/corefcn/det.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/det.cc Mon Aug 01 12:40:18 2016 -0400 @@ -43,9 +43,10 @@ #include "ov-flt-cx-diag.h" #include "ov-perm.h" -#define MAYBE_CAST(VAR, CLASS) \ - const CLASS *VAR = arg.type_id () == CLASS::static_type_id () ? \ - dynamic_cast (&arg.get_rep ()) : 0 +#define MAYBE_CAST(VAR, CLASS) \ + const CLASS *VAR = (arg.type_id () == CLASS::static_type_id () \ + ? dynamic_cast (&arg.get_rep ()) \ + : 0) DEFUN (det, args, nargout, doc: /* -*- texinfo -*- diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/error.h --- a/libinterp/corefcn/error.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/error.h Mon Aug 01 12:40:18 2016 -0400 @@ -34,7 +34,7 @@ class octave_value_list; class octave_execution_exception; -#define panic_impossible() \ +#define panic_impossible() \ panic ("impossible state reached in file '%s' at line %d", __FILE__, __LINE__) extern OCTINTERP_API void reset_error_handler (void); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/find.cc --- a/libinterp/corefcn/find.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/find.cc Mon Aug 01 12:40:18 2016 -0400 @@ -446,11 +446,11 @@ } else if (arg.is_integer_type ()) { -#define DO_INT_BRANCH(INTT) \ - else if (arg.is_ ## INTT ## _type ()) \ - { \ - INTT ## NDArray v = arg.INTT ## _array_value (); \ - \ +#define DO_INT_BRANCH(INTT) \ + else if (arg.is_ ## INTT ## _type ()) \ + { \ + INTT ## NDArray v = arg.INTT ## _array_value (); \ + \ retval = find_nonzero_elem_idx (v, nargout, n_to_find, direction); \ } diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/gcd.cc --- a/libinterp/corefcn/gcd.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/gcd.cc Mon Aug 01 12:40:18 2016 -0400 @@ -274,10 +274,10 @@ retval = do_simple_gcd (a, b); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = do_simple_gcd (a, b); \ - break +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = do_simple_gcd (a, b); \ + break MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -386,10 +386,10 @@ retval = do_extended_gcd (a, b, x, y); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = do_extended_gcd (a, b, x, y); \ - break +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = do_extended_gcd (a, b, x, y); \ + break MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/graphics.cc --- a/libinterp/corefcn/graphics.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/graphics.cc Mon Aug 01 12:40:18 2016 -0400 @@ -945,14 +945,14 @@ // is supported anyways so there is another double for loop across // height and width to convert all of the input data to GLfloat. -#define CONVERT_CDATA_1(ARRAY_T, VAL_FN, IS_REAL) \ - do \ - { \ - ARRAY_T tmp = cdata. VAL_FN ## array_value (); \ - \ - convert_cdata_1 (is_scaled, IS_REAL, clim_0, clim_1, cmapv, \ - tmp.data (), lda, nc, av); \ - } \ +#define CONVERT_CDATA_1(ARRAY_T, VAL_FN, IS_REAL) \ + do \ + { \ + ARRAY_T tmp = cdata. VAL_FN ## array_value (); \ + \ + convert_cdata_1 (is_scaled, IS_REAL, clim_0, clim_1, cmapv, \ + tmp.data (), lda, nc, av); \ + } \ while (0) if (cdata.is_uint8_type ()) @@ -1461,29 +1461,30 @@ if (data.dims () == v.dims ()) { -#define CHECK_ARRAY_EQUAL(T,F,A) \ - { \ - if (data.numel () == 1) \ - return data.F ## scalar_value () == \ - v.F ## scalar_value (); \ - else \ - { \ - /* Keep copy of array_value to allow sparse/bool arrays */ \ - /* that are converted, to not be deallocated early */ \ - const A m1 = data.F ## array_value (); \ - const T* d1 = m1.data (); \ - const A m2 = v.F ## array_value (); \ - const T* d2 = m2.data ();\ - \ - bool flag = true; \ - \ - for (int i = 0; flag && i < data.numel (); i++) \ - if (d1[i] != d2[i]) \ - flag = false; \ - \ - return flag; \ - } \ - } +#define CHECK_ARRAY_EQUAL(T, F, A) \ + { \ + if (data.numel () == 1) \ + return data.F ## scalar_value () == \ + v.F ## scalar_value (); \ + else \ + { \ + /* Keep copy of array_value to allow */ \ + /* sparse/bool arrays that are converted, to */ \ + /* not be deallocated early */ \ + const A m1 = data.F ## array_value (); \ + const T* d1 = m1.data (); \ + const A m2 = v.F ## array_value (); \ + const T* d2 = m2.data (); \ + \ + bool flag = true; \ + \ + for (int i = 0; flag && i < data.numel (); i++) \ + if (d1[i] != d2[i]) \ + flag = false; \ + \ + return flag; \ + } \ + } if (data.is_double_type () || data.is_bool_type ()) CHECK_ARRAY_EQUAL (double, , NDArray) @@ -7138,29 +7139,29 @@ Matrix limits; double val; -#define FIX_LIMITS \ - if (limits.numel () == 4) \ - { \ - val = limits(0); \ - if (octave::math::finite (val)) \ - min_val = val; \ - val = limits(1); \ - if (octave::math::finite (val)) \ - max_val = val; \ - val = limits(2); \ - if (octave::math::finite (val)) \ - min_pos = val; \ - val = limits(3); \ - if (octave::math::finite (val)) \ - max_neg = val; \ - } \ - else \ - { \ - limits.resize (4, 1); \ - limits(0) = min_val; \ - limits(1) = max_val; \ - limits(2) = min_pos; \ - limits(3) = max_neg; \ +#define FIX_LIMITS \ + if (limits.numel () == 4) \ + { \ + val = limits(0); \ + if (octave::math::finite (val)) \ + min_val = val; \ + val = limits(1); \ + if (octave::math::finite (val)) \ + max_val = val; \ + val = limits(2); \ + if (octave::math::finite (val)) \ + min_pos = val; \ + val = limits(3); \ + if (octave::math::finite (val)) \ + max_neg = val; \ + } \ + else \ + { \ + limits.resize (4, 1); \ + limits(0) = min_val; \ + limits(1) = max_val; \ + limits(2) = min_pos; \ + limits(3) = max_neg; \ } if (axis_type == "xdata" || axis_type == "xscale" @@ -10446,13 +10447,13 @@ return retval; } -#define GO_BODY(TYPE) \ - gh_manager::auto_lock guard; \ - \ - if (args.length () == 0) \ - print_usage (); \ - \ - return octave_value (make_graphics_object (#TYPE, false, args)); \ +#define GO_BODY(TYPE) \ + gh_manager::auto_lock guard; \ + \ + if (args.length () == 0) \ + print_usage (); \ + \ + return octave_value (make_graphics_object (#TYPE, false, args)); \ int calc_dimensions (const graphics_object& go) diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/interpreter.h --- a/libinterp/corefcn/interpreter.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/interpreter.h Mon Aug 01 12:40:18 2016 -0400 @@ -57,32 +57,32 @@ // Call a function with exceptions handled to avoid problems with // errors while shutting down. -#define OCTAVE_IGNORE_EXCEPTION(E) \ - catch (E) \ - { \ +#define OCTAVE_IGNORE_EXCEPTION(E) \ + catch (E) \ + { \ std::cerr << "error: ignoring " #E " while preparing to exit" << std::endl; \ - recover_from_exception (); \ + recover_from_exception (); \ } -#define OCTAVE_SAFE_CALL(F, ARGS) \ - do \ - { \ - try \ - { \ - octave::unwind_protect frame; \ - \ - frame.protect_var (Vdebug_on_error); \ - frame.protect_var (Vdebug_on_warning); \ - \ - Vdebug_on_error = false; \ - Vdebug_on_warning = false; \ - \ - F ARGS; \ - } \ - OCTAVE_IGNORE_EXCEPTION (const octave_interrupt_exception&) \ - OCTAVE_IGNORE_EXCEPTION (const octave_execution_exception&) \ - OCTAVE_IGNORE_EXCEPTION (const std::bad_alloc&) \ - } \ +#define OCTAVE_SAFE_CALL(F, ARGS) \ + do \ + { \ + try \ + { \ + octave::unwind_protect frame; \ + \ + frame.protect_var (Vdebug_on_error); \ + frame.protect_var (Vdebug_on_warning); \ + \ + Vdebug_on_error = false; \ + Vdebug_on_warning = false; \ + \ + F ARGS; \ + } \ + OCTAVE_IGNORE_EXCEPTION (const octave_interrupt_exception&) \ + OCTAVE_IGNORE_EXCEPTION (const octave_execution_exception&) \ + OCTAVE_IGNORE_EXCEPTION (const std::bad_alloc&) \ + } \ while (0) namespace octave diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/jit-ir.h --- a/libinterp/corefcn/jit-ir.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/jit-ir.h Mon Aug 01 12:40:18 2016 -0400 @@ -102,7 +102,9 @@ jit_factory { typedef std::list value_list; + public: + ~jit_factory (void); const value_list& constants (void) const { return mconstants; } @@ -116,6 +118,7 @@ } #define DECL_ARG(n) const ARG ## n& arg ## n + #define JIT_CREATE(N) \ template \ T *create (OCT_MAKE_LIST (DECL_ARG, N)) \ @@ -132,7 +135,9 @@ #undef JIT_CREATE #undef DECL_ARG + private: + void track_value (jit_value *v); value_list all_values; @@ -340,6 +345,7 @@ } #define STASH_ARG(i) stash_argument (i, arg ## i); + #define JIT_INSTRUCTION_CTOR(N) \ jit_instruction (OCT_MAKE_DECL_LIST (jit_value *, arg, N)) \ : already_infered (N), marguments (N), mid (next_id ()), mparent (0) \ @@ -1022,10 +1028,11 @@ jit_terminator : public jit_instruction { public: -#define JIT_TERMINATOR_CONST(N) \ - jit_terminator (size_t asuccessor_count, \ - OCT_MAKE_DECL_LIST (jit_value *, arg, N)) \ - : jit_instruction (OCT_MAKE_ARG_LIST (arg, N)), \ + +#define JIT_TERMINATOR_CONST(N) \ + jit_terminator (size_t asuccessor_count, \ + OCT_MAKE_DECL_LIST (jit_value *, arg, N)) \ + : jit_instruction (OCT_MAKE_ARG_LIST (arg, N)), \ malive (asuccessor_count, false) { } JIT_TERMINATOR_CONST (1) @@ -1416,7 +1423,7 @@ public: virtual ~jit_ir_walker () { } -#define JIT_METH(clname) \ +#define JIT_METH(clname) \ virtual void visit (jit_ ## clname&) = 0; JIT_VISIT_IR_CLASSES; diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/jit-typeinfo.h --- a/libinterp/corefcn/jit-typeinfo.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/jit-typeinfo.h Mon Aug 01 12:40:18 2016 -0400 @@ -693,7 +693,7 @@ return retval; } -#define JIT_PARAM_ARGS llvm::ExecutionEngine *ee, T fn, \ +#define JIT_PARAM_ARGS llvm::ExecutionEngine *ee, T fn, \ const llvm::Twine& name, jit_type *ret, #define JIT_PARAMS ee, fn, name, ret, #define CREATE_FUNCTION(N) JIT_EXPAND(template jit_function, \ diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/lookup.cc --- a/libinterp/corefcn/lookup.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/lookup.cc Mon Aug 01 12:40:18 2016 -0400 @@ -100,11 +100,11 @@ // The question is, how should it behave w.r.t. the second argument's type. // We'd need a dispatch on two arguments. Hmmm... -#define INT_ARRAY_LOOKUP(TYPE) \ - (table.is_ ## TYPE ## _type () && y.is_ ## TYPE ## _type ()) \ - retval = do_numeric_lookup (table.TYPE ## _array_value (), \ - y.TYPE ## _array_value (), \ - left_inf, right_inf, \ +#define INT_ARRAY_LOOKUP(TYPE) \ + (table.is_ ## TYPE ## _type () && y.is_ ## TYPE ## _type ()) \ + retval = do_numeric_lookup (table.TYPE ## _array_value (), \ + y.TYPE ## _array_value (), \ + left_inf, right_inf, \ match_idx, match_bool); template static octave_value diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/ls-mat5.cc --- a/libinterp/corefcn/ls-mat5.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/ls-mat5.cc Mon Aug 01 12:40:18 2016 -0400 @@ -259,20 +259,20 @@ bool swap, mat5_data_type type) { -#define READ_INTEGER_DATA(TYPE, swap, data, size, len, stream) \ - do \ - { \ - if (len > 0) \ - { \ - OCTAVE_LOCAL_BUFFER (TYPE, ptr, len); \ +#define READ_INTEGER_DATA(TYPE, swap, data, size, len, stream) \ + do \ + { \ + if (len > 0) \ + { \ + OCTAVE_LOCAL_BUFFER (TYPE, ptr, len); \ std::streamsize n_bytes = size * static_cast (len); \ - stream.read (reinterpret_cast (ptr), n_bytes); \ - if (swap) \ - swap_bytes< size > (ptr, len); \ - for (octave_idx_type i = 0; i < len; i++) \ - data[i] = ptr[i]; \ - } \ - } \ + stream.read (reinterpret_cast (ptr), n_bytes); \ + if (swap) \ + swap_bytes< size > (ptr, len); \ + for (octave_idx_type i = 0; i < len; i++) \ + data[i] = ptr[i]; \ + } \ + } \ while (0) switch (type) @@ -370,52 +370,52 @@ octave_idx_type count, bool swap, mat5_data_type type); -#define OCTAVE_MAT5_INTEGER_READ(TYP) \ - { \ - TYP re (dims); \ - \ - std::streampos tmp_pos; \ - \ +#define OCTAVE_MAT5_INTEGER_READ(TYP) \ + { \ + TYP re (dims); \ + \ + std::streampos tmp_pos; \ + \ + if (read_mat5_tag (is, swap, type, len, is_small_data_element)) \ + error ("load: reading matrix data for '%s'", retval.c_str ()); \ + \ + octave_idx_type n = re.numel (); \ + tmp_pos = is.tellg (); \ + read_mat5_integer_data (is, re.fortran_vec (), n, swap, \ + static_cast (type)); \ + \ + if (! is) \ + error ("load: reading matrix data for '%s'", retval.c_str ()); \ + \ + is.seekg (tmp_pos + static_cast \ + (READ_PAD (is_small_data_element, len))); \ + \ + if (imag) \ + { \ + /* We don't handle imag integer types, convert to an array */ \ + NDArray im (dims); \ + \ if (read_mat5_tag (is, swap, type, len, is_small_data_element)) \ - error ("load: reading matrix data for '%s'", retval.c_str ()); \ - \ - octave_idx_type n = re.numel (); \ - tmp_pos = is.tellg (); \ - read_mat5_integer_data (is, re.fortran_vec (), n, swap, \ - static_cast (type)); \ - \ - if (! is) \ - error ("load: reading matrix data for '%s'", retval.c_str ()); \ - \ - is.seekg (tmp_pos + static_cast\ - (READ_PAD (is_small_data_element, len))); \ - \ - if (imag) \ - { \ - /* We don't handle imag integer types, convert to an array */ \ - NDArray im (dims); \ - \ - if (read_mat5_tag (is, swap, type, len, is_small_data_element)) \ - error ("load: reading matrix data for '%s'", \ - retval.c_str ()); \ - \ - n = im.numel (); \ - read_mat5_binary_data (is, im.fortran_vec (), n, swap, \ - static_cast (type), flt_fmt); \ - \ - if (! is) \ - error ("load: reading imaginary matrix data for '%s'", \ - retval.c_str ()); \ - \ - ComplexNDArray ctmp (dims); \ - \ - for (octave_idx_type i = 0; i < n; i++) \ - ctmp(i) = Complex (re(i).double_value (), im(i)); \ - \ - tc = ctmp; \ - } \ - else \ - tc = re; \ + error ("load: reading matrix data for '%s'", \ + retval.c_str ()); \ + \ + n = im.numel (); \ + read_mat5_binary_data (is, im.fortran_vec (), n, swap, \ + static_cast (type), flt_fmt); \ + \ + if (! is) \ + error ("load: reading imaginary matrix data for '%s'", \ + retval.c_str ()); \ + \ + ComplexNDArray ctmp (dims); \ + \ + for (octave_idx_type i = 0; i < n; i++) \ + ctmp(i) = Complex (re(i).double_value (), im(i)); \ + \ + tc = ctmp; \ + } \ + else \ + tc = re; \ } // Read one element tag from stream IS, @@ -1572,15 +1572,15 @@ // Have to use copy here to avoid writing over data accessed via // Matrix::data(). -#define MAT5_DO_WRITE(TYPE, data, count, stream) \ - do \ - { \ - OCTAVE_LOCAL_BUFFER (TYPE, ptr, count); \ - for (octave_idx_type i = 0; i < count; i++) \ - ptr[i] = static_cast (data[i]); \ +#define MAT5_DO_WRITE(TYPE, data, count, stream) \ + do \ + { \ + OCTAVE_LOCAL_BUFFER (TYPE, ptr, count); \ + for (octave_idx_type i = 0; i < count; i++) \ + ptr[i] = static_cast (data[i]); \ std::streamsize n_bytes = sizeof (TYPE) * static_cast (count); \ - stream.write (reinterpret_cast (ptr), n_bytes); \ - } \ + stream.write (reinterpret_cast (ptr), n_bytes); \ + } \ while (0) // write out the numeric values in M to OS, @@ -2097,12 +2097,12 @@ } } -#define INT_LEN(nel, size) \ - { \ - ret += 8; \ - octave_idx_type sz = nel * size; \ - if (sz > 4) \ - ret += PAD (sz); \ +#define INT_LEN(nel, size) \ + { \ + ret += 8; \ + octave_idx_type sz = nel * size; \ + if (sz > 4) \ + ret += PAD (sz); \ } else if (cname == "int8") diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/max.cc --- a/libinterp/corefcn/max.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/max.cc Mon Aug 01 12:40:18 2016 -0400 @@ -321,10 +321,11 @@ retval = do_minmax_red_op (arg, nargout, dim, ismin); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = do_minmax_red_op (arg, nargout, dim, ismin); \ - break; +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = do_minmax_red_op (arg, nargout, dim, \ + ismin); \ + break; MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -397,10 +398,10 @@ retval = do_minmax_bin_op (argx, argy, ismin); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = do_minmax_bin_op (argx, argy, ismin); \ - break; +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = do_minmax_bin_op (argx, argy, ismin); \ + break; MAKE_INT_BRANCH (int8); MAKE_INT_BRANCH (int16); @@ -950,10 +951,10 @@ ismin); break; -#define MAKE_INT_BRANCH(X) \ - case btyp_ ## X: \ - retval = do_cumminmax_red_op (arg, nargout, dim, \ - ismin); \ +#define MAKE_INT_BRANCH(X) \ + case btyp_ ## X: \ + retval = do_cumminmax_red_op (arg, nargout, dim, \ + ismin); \ break; MAKE_INT_BRANCH (int8); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/mexproto.h --- a/libinterp/corefcn/mexproto.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/mexproto.h Mon Aug 01 12:40:18 2016 -0400 @@ -276,35 +276,35 @@ /* Miscellaneous. */ #if defined (NDEBUG) -# define mxAssert(expr, msg) \ - do \ - { \ - if (! expr) \ - { \ - mexPrintf ("Assertion failed: %s, at line %d of file \"%s\".\n%s\n", \ - #expr, __LINE__, __FILE__, msg); \ - } \ - } \ - while (0) +# define mxAssert(expr, msg) \ + do \ + { \ + if (! expr) \ + { \ + mexPrintf ("Assertion failed: %s, at line %d of file \"%s\".\n%s\n", \ + #expr, __LINE__, __FILE__, msg); \ + } \ + } \ + while (0) -# define mxAssertS(expr, msg) \ - do \ - { \ - if (! expr) \ - { \ - mexPrintf ("Assertion failed at line %d of file \"%s\".\n%s\n", \ - __LINE__, __FILE__, msg); \ - abort (); \ - } \ - } \ - while (0) +# define mxAssertS(expr, msg) \ + do \ + { \ + if (! expr) \ + { \ + mexPrintf ("Assertion failed at line %d of file \"%s\".\n%s\n", \ + __LINE__, __FILE__, msg); \ + abort (); \ + } \ + } \ + while (0) #else # define mxAssert(expr, msg) # define mxAssertS(expr, msg) #endif -extern OCTINTERP_API mwIndex mxCalcSingleSubscript (const mxArray *ptr, - mwSize nsubs, mwIndex *subs); +extern OCTINTERP_API mwIndex +mxCalcSingleSubscript (const mxArray *ptr, mwSize nsubs, mwIndex *subs); extern OCTINTERP_API size_t mxGetElementSize (const mxArray *ptr); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/mxarray.in.h --- a/libinterp/corefcn/mxarray.in.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/mxarray.in.h Mon Aug 01 12:40:18 2016 -0400 @@ -99,24 +99,24 @@ class octave_value; -#define DO_MUTABLE_METHOD(RET_T, METHOD_CALL) \ - RET_T retval = rep->METHOD_CALL; \ - \ - if (rep->mutation_needed ()) \ - { \ - maybe_mutate (); \ - retval = rep->METHOD_CALL; \ - } \ - \ +#define DO_MUTABLE_METHOD(RET_T, METHOD_CALL) \ + RET_T retval = rep->METHOD_CALL; \ + \ + if (rep->mutation_needed ()) \ + { \ + maybe_mutate (); \ + retval = rep->METHOD_CALL; \ + } \ + \ return retval -#define DO_VOID_MUTABLE_METHOD(METHOD_CALL) \ - rep->METHOD_CALL; \ - \ - if (rep->mutation_needed ()) \ - { \ - maybe_mutate (); \ - rep->METHOD_CALL; \ +#define DO_VOID_MUTABLE_METHOD(METHOD_CALL) \ + rep->METHOD_CALL; \ + \ + if (rep->mutation_needed ()) \ + { \ + maybe_mutate (); \ + rep->METHOD_CALL; \ } class mxArray; diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/oct-stream.cc --- a/libinterp/corefcn/oct-stream.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/oct-stream.cc Mon Aug 01 12:40:18 2016 -0400 @@ -4344,222 +4344,223 @@ Matrix&, double*, octave_idx_type&, octave_idx_type&, octave_idx_type, octave_idx_type, bool); -#define DO_WHITESPACE_CONVERSION() \ - do \ - { \ - int c = std::istream::traits_type::eof (); \ - \ +#define DO_WHITESPACE_CONVERSION() \ + do \ + { \ + int c = std::istream::traits_type::eof (); \ + \ while (is && (c = is.get ()) != std::istream::traits_type::eof () \ - && isspace (c)) \ - { /* skip whitespace */ } \ - \ - if (c != std::istream::traits_type::eof ()) \ - is.putback (c); \ - } \ + && isspace (c)) \ + { /* skip whitespace */ } \ + \ + if (c != std::istream::traits_type::eof ()) \ + is.putback (c); \ + } \ while (0) -#define DO_LITERAL_CONVERSION() \ - do \ - { \ - int c = std::istream::traits_type::eof (); \ - \ - int n = strlen (fmt); \ - int i = 0; \ - \ - while (i < n && is && (c = is.get ()) != std::istream::traits_type::eof ()) \ - { \ - if (c == static_cast (fmt[i])) \ - { \ - i++; \ - continue; \ - } \ - else \ - { \ - is.putback (c); \ - break; \ - } \ - } \ - \ - if (i != n) \ - is.setstate (std::ios::failbit); \ - } \ +#define DO_LITERAL_CONVERSION() \ + do \ + { \ + int c = std::istream::traits_type::eof (); \ + \ + int n = strlen (fmt); \ + int i = 0; \ + \ + while (i < n && is && (c = is.get ()) != std::istream::traits_type::eof ()) \ + { \ + if (c == static_cast (fmt[i])) \ + { \ + i++; \ + continue; \ + } \ + else \ + { \ + is.putback (c); \ + break; \ + } \ + } \ + \ + if (i != n) \ + is.setstate (std::ios::failbit); \ + } \ while (0) -#define DO_PCT_CONVERSION() \ - do \ - { \ - int c = is.get (); \ - \ - if (c != std::istream::traits_type::eof ()) \ - { \ - if (c != '%') \ - { \ - is.putback (c); \ - is.setstate (std::ios::failbit); \ - } \ - } \ - else \ - is.setstate (std::ios::failbit); \ - } \ +#define DO_PCT_CONVERSION() \ + do \ + { \ + int c = is.get (); \ + \ + if (c != std::istream::traits_type::eof ()) \ + { \ + if (c != '%') \ + { \ + is.putback (c); \ + is.setstate (std::ios::failbit); \ + } \ + } \ + else \ + is.setstate (std::ios::failbit); \ + } \ while (0) -#define BEGIN_C_CONVERSION() \ - is.unsetf (std::ios::skipws); \ - \ - int width = elt->width ? elt->width : 1; \ - \ - std::string tmp (width, '\0'); \ - \ - int c = std::istream::traits_type::eof (); \ - int n = 0; \ - \ - while (is && n < width && (c = is.get ()) != std::istream::traits_type::eof ()) \ - tmp[n++] = static_cast (c); \ - \ - if (n > 0 && c == std::istream::traits_type::eof ()) \ - is.clear (); \ - \ +#define BEGIN_C_CONVERSION() \ + is.unsetf (std::ios::skipws); \ + \ + int width = elt->width ? elt->width : 1; \ + \ + std::string tmp (width, '\0'); \ + \ + int c = std::istream::traits_type::eof (); \ + int n = 0; \ + \ + while (is && n < width \ + && (c = is.get ()) != std::istream::traits_type::eof ()) \ + tmp[n++] = static_cast (c); \ + \ + if (n > 0 && c == std::istream::traits_type::eof ()) \ + is.clear (); \ + \ tmp.resize (n) // For a '%s' format, skip initial whitespace and then read until the // next whitespace character or until WIDTH characters have been read. -#define BEGIN_S_CONVERSION() \ - int width = elt->width; \ - \ - std::string tmp; \ - \ - do \ - { \ - if (width) \ - { \ - tmp = std::string (width, '\0'); \ - \ - int c = std::istream::traits_type::eof (); \ - \ - int n = 0; \ - \ +#define BEGIN_S_CONVERSION() \ + int width = elt->width; \ + \ + std::string tmp; \ + \ + do \ + { \ + if (width) \ + { \ + tmp = std::string (width, '\0'); \ + \ + int c = std::istream::traits_type::eof (); \ + \ + int n = 0; \ + \ while (is && (c = is.get ()) != std::istream::traits_type::eof ()) \ - { \ - if (! isspace (c)) \ - { \ - tmp[n++] = static_cast (c); \ - break; \ - } \ - } \ - \ - while (is && n < width \ + { \ + if (! isspace (c)) \ + { \ + tmp[n++] = static_cast (c); \ + break; \ + } \ + } \ + \ + while (is && n < width \ && (c = is.get ()) != std::istream::traits_type::eof ()) \ - { \ - if (isspace (c)) \ - { \ - is.putback (c); \ - break; \ - } \ - else \ - tmp[n++] = static_cast (c); \ - } \ - \ - if (n > 0 && c == std::istream::traits_type::eof ()) \ - is.clear (); \ - \ - tmp.resize (n); \ - } \ - else \ - { \ - is >> std::ws >> tmp; \ - } \ - } \ + { \ + if (isspace (c)) \ + { \ + is.putback (c); \ + break; \ + } \ + else \ + tmp[n++] = static_cast (c); \ + } \ + \ + if (n > 0 && c == std::istream::traits_type::eof ()) \ + is.clear (); \ + \ + tmp.resize (n); \ + } \ + else \ + { \ + is >> std::ws >> tmp; \ + } \ + } \ while (0) // This format must match a nonempty sequence of characters. -#define BEGIN_CHAR_CLASS_CONVERSION() \ - int width = elt->width; \ - \ - std::string tmp; \ - \ - do \ - { \ - if (! width) \ - width = std::numeric_limits::max (); \ - \ - std::ostringstream buf; \ - \ - std::string char_class = elt->char_class; \ - \ - int c = std::istream::traits_type::eof (); \ - \ - if (elt->type == '[') \ - { \ - int chars_read = 0; \ - while (is && chars_read++ < width \ - && (c = is.get ()) != std::istream::traits_type::eof () \ - && char_class.find (c) != std::string::npos) \ - buf << static_cast (c); \ - } \ - else \ - { \ - int chars_read = 0; \ - while (is && chars_read++ < width \ - && (c = is.get ()) != std::istream::traits_type::eof () \ - && char_class.find (c) == std::string::npos) \ - buf << static_cast (c); \ - } \ - \ - if (width == std::numeric_limits::max () \ - && c != std::istream::traits_type::eof ()) \ - is.putback (c); \ - \ - tmp = buf.str (); \ - \ - if (tmp.empty ()) \ - is.setstate (std::ios::failbit); \ - else if (c == std::istream::traits_type::eof ()) \ - is.clear (); \ - \ - } \ +#define BEGIN_CHAR_CLASS_CONVERSION() \ + int width = elt->width; \ + \ + std::string tmp; \ + \ + do \ + { \ + if (! width) \ + width = std::numeric_limits::max (); \ + \ + std::ostringstream buf; \ + \ + std::string char_class = elt->char_class; \ + \ + int c = std::istream::traits_type::eof (); \ + \ + if (elt->type == '[') \ + { \ + int chars_read = 0; \ + while (is && chars_read++ < width \ + && (c = is.get ()) != std::istream::traits_type::eof () \ + && char_class.find (c) != std::string::npos) \ + buf << static_cast (c); \ + } \ + else \ + { \ + int chars_read = 0; \ + while (is && chars_read++ < width \ + && (c = is.get ()) != std::istream::traits_type::eof () \ + && char_class.find (c) == std::string::npos) \ + buf << static_cast (c); \ + } \ + \ + if (width == std::numeric_limits::max () \ + && c != std::istream::traits_type::eof ()) \ + is.putback (c); \ + \ + tmp = buf.str (); \ + \ + if (tmp.empty ()) \ + is.setstate (std::ios::failbit); \ + else if (c == std::istream::traits_type::eof ()) \ + is.clear (); \ + \ + } \ while (0) -#define FINISH_CHARACTER_CONVERSION() \ - do \ - { \ - width = tmp.length (); \ - \ - if (is) \ - { \ - int i = 0; \ - \ - if (! discard) \ - { \ - conversion_count++; \ - \ - while (i < width) \ - { \ - if (data_index == max_size) \ - { \ - max_size *= 2; \ - \ - if (all_char_conv) \ - { \ - if (one_elt_size_spec) \ - mval.resize (1, max_size, 0.0); \ - else if (nr > 0) \ - mval.resize (nr, max_size / nr, 0.0); \ - else \ - panic_impossible (); \ - } \ - else if (nr > 0) \ - mval.resize (nr, max_size / nr, 0.0); \ - else \ - mval.resize (max_size, 1, 0.0); \ - \ - data = mval.fortran_vec (); \ - } \ - \ - data[data_index++] = tmp[i++]; \ - } \ - } \ - } \ - } \ +#define FINISH_CHARACTER_CONVERSION() \ + do \ + { \ + width = tmp.length (); \ + \ + if (is) \ + { \ + int i = 0; \ + \ + if (! discard) \ + { \ + conversion_count++; \ + \ + while (i < width) \ + { \ + if (data_index == max_size) \ + { \ + max_size *= 2; \ + \ + if (all_char_conv) \ + { \ + if (one_elt_size_spec) \ + mval.resize (1, max_size, 0.0); \ + else if (nr > 0) \ + mval.resize (nr, max_size / nr, 0.0); \ + else \ + panic_impossible (); \ + } \ + else if (nr > 0) \ + mval.resize (nr, max_size / nr, 0.0); \ + else \ + mval.resize (max_size, 1, 0.0); \ + \ + data = mval.fortran_vec (); \ + } \ + \ + data[data_index++] = tmp[i++]; \ + } \ + } \ + } \ + } \ while (0) octave_value @@ -6321,23 +6322,23 @@ bool swap, bool do_float_fmt_conv, bool do_NA_conv, octave::mach_info::float_format from_flt_fmt); -#define TABLE_ELT(T, U, V, W) \ +#define TABLE_ELT(T, U, V, W) \ conv_fptr_table[oct_data_conv::T][oct_data_conv::U] = convert_and_copy -#define FILL_TABLE_ROW(T, V) \ - TABLE_ELT (T, dt_int8, V, int8NDArray); \ - TABLE_ELT (T, dt_uint8, V, uint8NDArray); \ - TABLE_ELT (T, dt_int16, V, int16NDArray); \ - TABLE_ELT (T, dt_uint16, V, uint16NDArray); \ - TABLE_ELT (T, dt_int32, V, int32NDArray); \ - TABLE_ELT (T, dt_uint32, V, uint32NDArray); \ - TABLE_ELT (T, dt_int64, V, int64NDArray); \ - TABLE_ELT (T, dt_uint64, V, uint64NDArray); \ - TABLE_ELT (T, dt_single, V, FloatNDArray); \ - TABLE_ELT (T, dt_double, V, NDArray); \ - TABLE_ELT (T, dt_char, V, charNDArray); \ - TABLE_ELT (T, dt_schar, V, charNDArray); \ - TABLE_ELT (T, dt_uchar, V, charNDArray); \ +#define FILL_TABLE_ROW(T, V) \ + TABLE_ELT (T, dt_int8, V, int8NDArray); \ + TABLE_ELT (T, dt_uint8, V, uint8NDArray); \ + TABLE_ELT (T, dt_int16, V, int16NDArray); \ + TABLE_ELT (T, dt_uint16, V, uint16NDArray); \ + TABLE_ELT (T, dt_int32, V, int32NDArray); \ + TABLE_ELT (T, dt_uint32, V, uint32NDArray); \ + TABLE_ELT (T, dt_int64, V, int64NDArray); \ + TABLE_ELT (T, dt_uint64, V, uint64NDArray); \ + TABLE_ELT (T, dt_single, V, FloatNDArray); \ + TABLE_ELT (T, dt_double, V, NDArray); \ + TABLE_ELT (T, dt_char, V, charNDArray); \ + TABLE_ELT (T, dt_schar, V, charNDArray); \ + TABLE_ELT (T, dt_uchar, V, charNDArray); \ TABLE_ELT (T, dt_logical, V, boolNDArray); octave_value @@ -6930,12 +6931,12 @@ return nel; } -#define INSTANTIATE_WRITE(T) \ - template \ - octave_idx_type \ +#define INSTANTIATE_WRITE(T) \ + template \ + octave_idx_type \ octave_stream::write (const Array& data, octave_idx_type block_size, \ - oct_data_conv::data_type output_type, \ - octave_idx_type skip, \ + oct_data_conv::data_type output_type, \ + octave_idx_type skip, \ octave::mach_info::float_format flt_fmt) INSTANTIATE_WRITE (octave_int8); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/ordschur.cc --- a/libinterp/corefcn/ordschur.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/ordschur.cc Mon Aug 01 12:40:18 2016 -0400 @@ -129,20 +129,20 @@ const bool complex_type = args(0).is_complex_type () || args(1).is_complex_type (); -#define PREPARE_ARGS(TYPE, TYPE_M, TYPE_COND) \ - TYPE ## Matrix U = args(0).x ## TYPE_M ## _value ("ordschur: U and S must be real or complex floating point matrices"); \ - TYPE ## Matrix S = args(1).x ## TYPE_M ## _value ("ordschur: U and S must be real or complex floating point matrices"); \ - TYPE ## Matrix w (dim_vector (n, 1)); \ - TYPE ## Matrix work (dim_vector (n, 1)); \ - octave_idx_type m; \ - octave_idx_type info; \ - TYPE_COND cond1, cond2; +#define PREPARE_ARGS(TYPE, TYPE_M, TYPE_COND) \ + TYPE ## Matrix U = args(0).x ## TYPE_M ## _value ("ordschur: U and S must be real or complex floating point matrices"); \ + TYPE ## Matrix S = args(1).x ## TYPE_M ## _value ("ordschur: U and S must be real or complex floating point matrices"); \ + TYPE ## Matrix w (dim_vector (n, 1)); \ + TYPE ## Matrix work (dim_vector (n, 1)); \ + octave_idx_type m; \ + octave_idx_type info; \ + TYPE_COND cond1, cond2; -#define PREPARE_OUTPUT()\ - if (info != 0) \ - error ("ordschur: trsen failed"); \ - \ - retval = ovl (U, S); +#define PREPARE_OUTPUT() \ + if (info != 0) \ + error ("ordschur: trsen failed"); \ + \ + retval = ovl (U, S); if (double_type) { diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/pr-output.cc --- a/libinterp/corefcn/pr-output.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/pr-output.cc Mon Aug 01 12:40:18 2016 -0400 @@ -1417,40 +1417,40 @@ unsigned char i[sizeof (double)]; }; -#define PRINT_CHAR_BITS(os, c) \ - do \ - { \ - unsigned char ctmp = c; \ - char stmp[9]; \ - stmp[0] = (ctmp & 0x80) ? '1' : '0'; \ - stmp[1] = (ctmp & 0x40) ? '1' : '0'; \ - stmp[2] = (ctmp & 0x20) ? '1' : '0'; \ - stmp[3] = (ctmp & 0x10) ? '1' : '0'; \ - stmp[4] = (ctmp & 0x08) ? '1' : '0'; \ - stmp[5] = (ctmp & 0x04) ? '1' : '0'; \ - stmp[6] = (ctmp & 0x02) ? '1' : '0'; \ - stmp[7] = (ctmp & 0x01) ? '1' : '0'; \ - stmp[8] = '\0'; \ - os << stmp; \ - } \ +#define PRINT_CHAR_BITS(os, c) \ + do \ + { \ + unsigned char ctmp = c; \ + char stmp[9]; \ + stmp[0] = (ctmp & 0x80) ? '1' : '0'; \ + stmp[1] = (ctmp & 0x40) ? '1' : '0'; \ + stmp[2] = (ctmp & 0x20) ? '1' : '0'; \ + stmp[3] = (ctmp & 0x10) ? '1' : '0'; \ + stmp[4] = (ctmp & 0x08) ? '1' : '0'; \ + stmp[5] = (ctmp & 0x04) ? '1' : '0'; \ + stmp[6] = (ctmp & 0x02) ? '1' : '0'; \ + stmp[7] = (ctmp & 0x01) ? '1' : '0'; \ + stmp[8] = '\0'; \ + os << stmp; \ + } \ while (0) -#define PRINT_CHAR_BITS_SWAPPED(os, c) \ - do \ - { \ - unsigned char ctmp = c; \ - char stmp[9]; \ - stmp[0] = (ctmp & 0x01) ? '1' : '0'; \ - stmp[1] = (ctmp & 0x02) ? '1' : '0'; \ - stmp[2] = (ctmp & 0x04) ? '1' : '0'; \ - stmp[3] = (ctmp & 0x08) ? '1' : '0'; \ - stmp[4] = (ctmp & 0x10) ? '1' : '0'; \ - stmp[5] = (ctmp & 0x20) ? '1' : '0'; \ - stmp[6] = (ctmp & 0x40) ? '1' : '0'; \ - stmp[7] = (ctmp & 0x80) ? '1' : '0'; \ - stmp[8] = '\0'; \ - os << stmp; \ - } \ +#define PRINT_CHAR_BITS_SWAPPED(os, c) \ + do \ + { \ + unsigned char ctmp = c; \ + char stmp[9]; \ + stmp[0] = (ctmp & 0x01) ? '1' : '0'; \ + stmp[1] = (ctmp & 0x02) ? '1' : '0'; \ + stmp[2] = (ctmp & 0x04) ? '1' : '0'; \ + stmp[3] = (ctmp & 0x08) ? '1' : '0'; \ + stmp[4] = (ctmp & 0x10) ? '1' : '0'; \ + stmp[5] = (ctmp & 0x20) ? '1' : '0'; \ + stmp[6] = (ctmp & 0x40) ? '1' : '0'; \ + stmp[7] = (ctmp & 0x80) ? '1' : '0'; \ + stmp[8] = '\0'; \ + os << stmp; \ + } \ while (0) static void @@ -2978,13 +2978,13 @@ typedef T print_conv_type; }; -#define PRINT_CONV(T1, T2) \ - template <> \ - class \ - octave_print_conv \ - { \ - public: \ - typedef T2 print_conv_type; \ +#define PRINT_CONV(T1, T2) \ + template <> \ + class \ + octave_print_conv \ + { \ + public: \ + typedef T2 print_conv_type; \ } PRINT_CONV (octave_int8, octave_int16); @@ -3068,7 +3068,7 @@ return x < 0 ? -x : x; } -#define INSTANTIATE_ABS(T) \ +#define INSTANTIATE_ABS(T) \ template /* static */ T abs (T) INSTANTIATE_ABS(signed char); @@ -3077,12 +3077,12 @@ INSTANTIATE_ABS(long); INSTANTIATE_ABS(long long); -#define SPECIALIZE_UABS(T) \ - template <> \ - /* static */ inline unsigned T \ - abs (unsigned T x) \ - { \ - return x; \ +#define SPECIALIZE_UABS(T) \ + template <> \ + /* static */ inline unsigned T \ + abs (unsigned T x) \ + { \ + return x; \ } SPECIALIZE_UABS(char) @@ -3133,11 +3133,11 @@ } } -#define PRINT_INT_SCALAR_INTERNAL(TYPE) \ - OCTINTERP_API void \ +#define PRINT_INT_SCALAR_INTERNAL(TYPE) \ + OCTINTERP_API void \ octave_print_internal (std::ostream& os, const octave_int& val, bool dummy) \ - { \ - octave_print_internal_template (os, val, dummy); \ + { \ + octave_print_internal_template (os, val, dummy); \ } PRINT_INT_SCALAR_INTERNAL (int8_t) @@ -3378,11 +3378,11 @@ } } -#define PRINT_INT_ARRAY_INTERNAL(TYPE) \ - OCTINTERP_API void \ +#define PRINT_INT_ARRAY_INTERNAL(TYPE) \ + OCTINTERP_API void \ octave_print_internal (std::ostream& os, const intNDArray& nda, \ - bool pr_as_read_syntax, int extra_indent) \ - { \ + bool pr_as_read_syntax, int extra_indent) \ + { \ octave_print_internal_template (os, nda, pr_as_read_syntax, extra_indent); \ } diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/profiler.h --- a/libinterp/corefcn/profiler.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/profiler.h Mon Aug 01 12:40:18 2016 -0400 @@ -208,11 +208,11 @@ // Helper macro to profile a block of code. -#define BEGIN_PROFILER_BLOCK(classname) \ - { \ +#define BEGIN_PROFILER_BLOCK(classname) \ + { \ profile_data_accumulator::enter pe (profiler, *this); -#define END_PROFILER_BLOCK \ - } // end of block => call pe's destructor +#define END_PROFILER_BLOCK \ + } // end of block => call pe's destructor #endif diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/psi.cc --- a/libinterp/corefcn/psi.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/psi.cc Mon Aug 01 12:40:18 2016 -0400 @@ -81,19 +81,19 @@ if (k == 0) { -#define FLOAT_BRANCH(T, A, M, E) \ - if (oct_z.is_ ## T ##_type ()) \ - { \ - const A ## NDArray z = oct_z.M ## array_value (); \ - A ## NDArray psi_z (z.dims ()); \ - \ - const E* zv = z.data (); \ - E* psi_zv = psi_z.fortran_vec (); \ - const octave_idx_type n = z.numel (); \ - for (octave_idx_type i = 0; i < n; i++) \ - *psi_zv++ = octave::math::psi (*zv++); \ - \ - retval = psi_z; \ +#define FLOAT_BRANCH(T, A, M, E) \ + if (oct_z.is_ ## T ##_type ()) \ + { \ + const A ## NDArray z = oct_z.M ## array_value (); \ + A ## NDArray psi_z (z.dims ()); \ + \ + const E* zv = z.data (); \ + E* psi_zv = psi_z.fortran_vec (); \ + const octave_idx_type n = z.numel (); \ + for (octave_idx_type i = 0; i < n; i++) \ + *psi_zv++ = octave::math::psi (*zv++); \ + \ + retval = psi_z; \ } if (oct_z.is_complex_type ()) @@ -118,23 +118,23 @@ if (! oct_z.is_real_type ()) error ("psi: Z must be real value for polygamma (K > 0)"); -#define FLOAT_BRANCH(T, A, M, E) \ - if (oct_z.is_ ## T ##_type ()) \ - { \ - const A ## NDArray z = oct_z.M ## array_value (); \ - A ## NDArray psi_z (z.dims ()); \ - \ - const E* zv = z.data (); \ - E* psi_zv = psi_z.fortran_vec (); \ - const octave_idx_type n = z.numel (); \ - for (octave_idx_type i = 0; i < n; i++) \ - { \ - if (*zv < 0) \ +#define FLOAT_BRANCH(T, A, M, E) \ + if (oct_z.is_ ## T ##_type ()) \ + { \ + const A ## NDArray z = oct_z.M ## array_value (); \ + A ## NDArray psi_z (z.dims ()); \ + \ + const E* zv = z.data (); \ + E* psi_zv = psi_z.fortran_vec (); \ + const octave_idx_type n = z.numel (); \ + for (octave_idx_type i = 0; i < n; i++) \ + { \ + if (*zv < 0) \ error ("psi: Z must be non-negative for polygamma (K > 0)"); \ - \ - *psi_zv++ = octave::math::psi (k, *zv++); \ - } \ - retval = psi_z; \ + \ + *psi_zv++ = octave::math::psi (k, *zv++); \ + } \ + retval = psi_z; \ } FLOAT_BRANCH(double, , , double) diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/regexp.cc --- a/libinterp/corefcn/regexp.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/regexp.cc Mon Aug 01 12:40:18 2016 -0400 @@ -102,8 +102,7 @@ { bad_esc_seq = true; tmpi = 0; - warning ("malformed octal escape sequence '\\o' --\ - converting to '\\0'"); + warning ("malformed octal escape sequence '\\o' -- converting to '\\0'"); } retval[i] = tmpi; j = k - 1; @@ -198,8 +197,7 @@ } if (bad_esc_seq || (brace && s[k++] != '}')) { - warning ("malformed octal escape sequence '\\o' --\ - converting to '\\0'"); + warning ("malformed octal escape sequence '\\o' -- converting to '\\0'"); tmpi = 0; } retval[i] = tmpi; @@ -236,8 +234,7 @@ } if (bad_esc_seq || (brace && s[k++] != '}')) { - warning ("malformed hex escape sequence '\\x' --\ - converting to '\\0'"); + warning ("malformed hex escape sequence '\\x' -- converting to '\\0'"); tmpi = 0; } retval[i] = tmpi; diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/sparse-xdiv.cc --- a/libinterp/corefcn/sparse-xdiv.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/sparse-xdiv.cc Mon Aug 01 12:40:18 2016 -0400 @@ -65,7 +65,7 @@ return true; } -#define INSTANTIATE_MX_LEFTDIV_CONFORM(T1, T2) \ +#define INSTANTIATE_MX_LEFTDIV_CONFORM(T1, T2) \ template bool mx_leftdiv_conform (const T1&, const T2&) INSTANTIATE_MX_LEFTDIV_CONFORM (SparseMatrix, SparseMatrix); @@ -99,7 +99,7 @@ return true; } -#define INSTANTIATE_MX_DIV_CONFORM(T1, T2) \ +#define INSTANTIATE_MX_DIV_CONFORM(T1, T2) \ template bool mx_div_conform (const T1&, const T2&) INSTANTIATE_MX_DIV_CONFORM (SparseMatrix, SparseMatrix); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/sparse-xpow.cc --- a/libinterp/corefcn/sparse-xpow.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/sparse-xpow.cc Mon Aug 01 12:40:18 2016 -0400 @@ -96,8 +96,7 @@ atmp = a.inverse (mattyp, info, rcond, 1); if (info == -1) - warning ("inverse: matrix singular to machine\ - precision, rcond = %g", rcond); + warning ("inverse: matrix singular to machine precision, rcond = %g", rcond); } else atmp = a; @@ -165,8 +164,7 @@ atmp = a.inverse (mattyp, info, rcond, 1); if (info == -1) - warning ("inverse: matrix singular to machine\ - precision, rcond = %g", rcond); + warning ("inverse: matrix singular to machine precision, rcond = %g", rcond); } else atmp = a; diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/tril.cc --- a/libinterp/corefcn/tril.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/tril.cc Mon Aug 01 12:40:18 2016 -0400 @@ -237,8 +237,8 @@ retval = do_trilu (arg.bool_array_value (), k, lower, pack); break; -#define ARRAYCASE(TYP) \ - case btyp_ ## TYP: \ +#define ARRAYCASE(TYP) \ + case btyp_ ## TYP: \ retval = do_trilu (arg.TYP ## _array_value (), k, lower, pack); \ break diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/txt-eng.h --- a/libinterp/corefcn/txt-eng.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/txt-eng.h Mon Aug 01 12:40:18 2016 -0400 @@ -352,10 +352,12 @@ virtual ~text_processor (void) { } }; -#define TEXT_ELEMENT_ACCEPT(cls) \ -inline void \ -cls::accept (text_processor& p) \ -{ p.visit (*this); } +#define TEXT_ELEMENT_ACCEPT(cls) \ + inline void \ + cls::accept (text_processor& p) \ + { \ + p.visit (*this); \ + } TEXT_ELEMENT_ACCEPT(text_element_string) TEXT_ELEMENT_ACCEPT(text_element_symbol) diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/utils.cc --- a/libinterp/corefcn/utils.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/utils.cc Mon Aug 01 12:40:18 2016 -0400 @@ -685,8 +685,7 @@ } if (k == j+1) - warning ("malformed hex escape sequence '\\x' --\ - converting to '\\0'"); + warning ("malformed hex escape sequence '\\x' -- converting to '\\0'"); retval[i] = tmpi; j = k - 1; @@ -694,8 +693,7 @@ } default: - warning ("unrecognized escape sequence '\\%c' --\ - converting to '%c'", s[j], s[j]); + warning ("unrecognized escape sequence '\\%c' -- converting to '%c'", s[j], s[j]); retval[i] = s[j]; break; } diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/variables.cc --- a/libinterp/corefcn/variables.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/variables.cc Mon Aug 01 12:40:18 2016 -0400 @@ -483,7 +483,7 @@ return 0; } -#define GET_IDX(LEN) \ +#define GET_IDX(LEN) \ static_cast ((LEN-1) * static_cast (rand ()) / RAND_MAX) std::string @@ -2316,12 +2316,12 @@ } } -#define CLEAR_OPTION_ERROR(cond) \ - do \ - { \ - if (cond) \ - print_usage (); \ - } \ +#define CLEAR_OPTION_ERROR(cond) \ + do \ + { \ + if (cond) \ + print_usage (); \ + } \ while (0) DEFUN (clear, args, , diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/variables.h --- a/libinterp/corefcn/variables.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/variables.h Mon Aug 01 12:40:18 2016 -0400 @@ -123,17 +123,17 @@ set_internal_variable (int& var, const octave_value_list& args, int nargout, const char *nm, const char **choices); -#define SET_INTERNAL_VARIABLE(NM) \ +#define SET_INTERNAL_VARIABLE(NM) \ set_internal_variable (V ## NM, args, nargout, #NM) -#define SET_NONEMPTY_INTERNAL_STRING_VARIABLE(NM) \ +#define SET_NONEMPTY_INTERNAL_STRING_VARIABLE(NM) \ set_internal_variable (V ## NM, args, nargout, #NM, false) -#define SET_INTERNAL_VARIABLE_WITH_LIMITS(NM, MINVAL, MAXVAL) \ +#define SET_INTERNAL_VARIABLE_WITH_LIMITS(NM, MINVAL, MAXVAL) \ set_internal_variable (V ## NM, args, nargout, #NM, MINVAL, MAXVAL) // in the following, CHOICES must be a C string array terminated by null. -#define SET_INTERNAL_VARIABLE_CHOICES(NM, CHOICES) \ +#define SET_INTERNAL_VARIABLE_CHOICES(NM, CHOICES) \ set_internal_variable (V ## NM, args, nargout, #NM, CHOICES) extern OCTINTERP_API std::string builtin_string_variable (const std::string&); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/xdiv.cc --- a/libinterp/corefcn/xdiv.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/xdiv.cc Mon Aug 01 12:40:18 2016 -0400 @@ -81,7 +81,7 @@ return true; } -#define INSTANTIATE_MX_LEFTDIV_CONFORM(T1, T2) \ +#define INSTANTIATE_MX_LEFTDIV_CONFORM(T1, T2) \ template bool mx_leftdiv_conform (const T1&, const T2&, blas_trans_type) INSTANTIATE_MX_LEFTDIV_CONFORM (Matrix, Matrix); @@ -107,7 +107,7 @@ return true; } -#define INSTANTIATE_MX_DIV_CONFORM(T1, T2) \ +#define INSTANTIATE_MX_DIV_CONFORM(T1, T2) \ template bool mx_div_conform (const T1&, const T2&) INSTANTIATE_MX_DIV_CONFORM (Matrix, Matrix); diff -r dd992fd74fce -r e43d83253e28 libinterp/corefcn/xpow.cc --- a/libinterp/corefcn/xpow.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/corefcn/xpow.cc Mon Aug 01 12:40:18 2016 -0400 @@ -232,8 +232,7 @@ atmp = a.inverse (mattype, info, rcond, 1); if (info == -1) - warning ("inverse: matrix singular to machine\ -precision, rcond = %g", rcond); + warning ("inverse: matrix singular to machine precision, rcond = %g", rcond); } else atmp = a; @@ -493,8 +492,7 @@ atmp = a.inverse (mattype, info, rcond, 1); if (info == -1) - warning ("inverse: matrix singular to machine\ -precision, rcond = %g", rcond); + warning ("inverse: matrix singular to machine precision, rcond = %g", rcond); } else atmp = a; @@ -1639,8 +1637,7 @@ atmp = a.inverse (mattype, info, rcond, 1); if (info == -1) - warning ("inverse: matrix singular to machine\ -precision, rcond = %g", rcond); + warning ("inverse: matrix singular to machine precision, rcond = %g", rcond); } else atmp = a; @@ -1890,8 +1887,7 @@ atmp = a.inverse (mattype, info, rcond, 1); if (info == -1) - warning ("inverse: matrix singular to machine\ -precision, rcond = %g", rcond); + warning ("inverse: matrix singular to machine precision, rcond = %g", rcond); } else atmp = a; diff -r dd992fd74fce -r e43d83253e28 libinterp/dldfcn/__glpk__.cc --- a/libinterp/dldfcn/__glpk__.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/dldfcn/__glpk__.cc Mon Aug 01 12:40:18 2016 -0400 @@ -296,34 +296,34 @@ #endif -#define OCTAVE_GLPK_GET_REAL_PARAM(NAME, VAL) \ - do \ - { \ - octave_value tmp = PARAM.getfield (NAME); \ - \ - if (tmp.is_defined ()) \ - { \ - if (! tmp.is_empty ()) \ +#define OCTAVE_GLPK_GET_REAL_PARAM(NAME, VAL) \ + do \ + { \ + octave_value tmp = PARAM.getfield (NAME); \ + \ + if (tmp.is_defined ()) \ + { \ + if (! tmp.is_empty ()) \ VAL = tmp.xscalar_value ("glpk: invalid value in PARAM" NAME); \ - else \ - error ("glpk: invalid value in PARAM" NAME); \ - } \ - } \ + else \ + error ("glpk: invalid value in PARAM" NAME); \ + } \ + } \ while (0) -#define OCTAVE_GLPK_GET_INT_PARAM(NAME, VAL) \ - do \ - { \ - octave_value tmp = PARAM.getfield (NAME); \ - \ - if (tmp.is_defined ()) \ - { \ - if (! tmp.is_empty ()) \ +#define OCTAVE_GLPK_GET_INT_PARAM(NAME, VAL) \ + do \ + { \ + octave_value tmp = PARAM.getfield (NAME); \ + \ + if (tmp.is_defined ()) \ + { \ + if (! tmp.is_empty ()) \ VAL = tmp.xint_value ("glpk: invalid value in PARAM" NAME); \ - else \ - error ("glpk: invalid value in PARAM" NAME); \ - } \ - } \ + else \ + error ("glpk: invalid value in PARAM" NAME); \ + } \ + } \ while (0) DEFUN_DLD (__glpk__, args, , diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-base.cc --- a/libinterp/octave-value/ov-base.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-base.cc Mon Aug 01 12:40:18 2016 -0400 @@ -435,33 +435,33 @@ os << "no info for type: " << type_name () << "\n"; } -#define INT_CONV_METHOD(T, F) \ - T \ +#define INT_CONV_METHOD(T, F) \ + T \ octave_base_value::F ## _value (bool require_int, bool frc_str_conv) const \ - { \ - T retval = 0; \ - \ - double d = 0.0; \ - \ - try \ - { \ - d = double_value (frc_str_conv); \ - } \ - catch (octave_execution_exception& e) \ - { \ + { \ + T retval = 0; \ + \ + double d = 0.0; \ + \ + try \ + { \ + d = double_value (frc_str_conv); \ + } \ + catch (octave_execution_exception& e) \ + { \ err_wrong_type_arg (e, "octave_base_value::" #F "_value ()", type_name ()); \ - } \ - \ - if (require_int && octave::math::x_nint (d) != d) \ - error_with_cfn ("conversion of %g to " #T " value failed", d); \ - else if (d < std::numeric_limits::min ()) \ - retval = std::numeric_limits::min (); \ - else if (d > std::numeric_limits::max ()) \ - retval = std::numeric_limits::max (); \ - else \ - retval = static_cast (octave::math::fix (d)); \ - \ - return retval; \ + } \ + \ + if (require_int && octave::math::x_nint (d) != d) \ + error_with_cfn ("conversion of %g to " #T " value failed", d); \ + else if (d < std::numeric_limits::min ()) \ + retval = std::numeric_limits::min (); \ + else if (d > std::numeric_limits::max ()) \ + retval = std::numeric_limits::max (); \ + else \ + retval = static_cast (octave::math::fix (d)); \ + \ + return retval; \ } INT_CONV_METHOD (short int, short) diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-base.h --- a/libinterp/octave-value/ov-base.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-base.h Mon Aug 01 12:40:18 2016 -0400 @@ -118,10 +118,12 @@ static const builtin_type_t btyp = btyp_unknown; }; -#define DEF_CLASS_TO_BTYP(CLASS,BTYP) \ -template <> \ -struct class_to_btyp \ -{ static const builtin_type_t btyp = BTYP; } +#define DEF_CLASS_TO_BTYP(CLASS,BTYP) \ + template <> \ + struct class_to_btyp \ + { \ + static const builtin_type_t btyp = BTYP; \ + } DEF_CLASS_TO_BTYP (double, btyp_double); DEF_CLASS_TO_BTYP (float, btyp_float); @@ -143,37 +145,37 @@ #define OCTAVE_EMPTY_CPP_ARG /* empty */ -#define DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA \ +#define DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA \ DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA2 (OCTAVE_EMPTY_CPP_ARG) -#define DECLARE_OV_BASE_TYPEID_FUNCTIONS_AND_DATA \ +#define DECLARE_OV_BASE_TYPEID_FUNCTIONS_AND_DATA \ DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA2(virtual) -#define DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA2(VIRTUAL) \ - public: \ - VIRTUAL int type_id (void) const { return t_id; } \ - VIRTUAL std::string type_name (void) const { return t_name; } \ - VIRTUAL std::string class_name (void) const { return c_name; } \ - static int static_type_id (void) { return t_id; } \ - static std::string static_type_name (void) { return t_name; } \ - static std::string static_class_name (void) { return c_name; } \ - static void register_type (void); \ - \ - private: \ - static int t_id; \ - static const std::string t_name; \ +#define DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA2(VIRTUAL) \ + public: \ + VIRTUAL int type_id (void) const { return t_id; } \ + VIRTUAL std::string type_name (void) const { return t_name; } \ + VIRTUAL std::string class_name (void) const { return c_name; } \ + static int static_type_id (void) { return t_id; } \ + static std::string static_type_name (void) { return t_name; } \ + static std::string static_class_name (void) { return c_name; } \ + static void register_type (void); \ + \ + private: \ + static int t_id; \ + static const std::string t_name; \ static const std::string c_name; -#define DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA(t, n, c) \ - int t::t_id (-1); \ - const std::string t::t_name (n); \ - const std::string t::c_name (c); \ - void t::register_type (void) \ - { \ - static t exemplar; \ - octave_value v (&exemplar, true); \ - t_id = octave_value_typeinfo::register_type (t::t_name, t::c_name, v); \ - } +#define DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA(t, n, c) \ + int t::t_id (-1); \ + const std::string t::t_name (n); \ + const std::string t::c_name (c); \ + void t::register_type (void) \ + { \ + static t exemplar; \ + octave_value v (&exemplar, true); \ + t_id = octave_value_typeinfo::register_type (t::t_name, t::c_name, v); \ + } // A base value type, so that derived types only have to redefine what // they need (if they are derived from octave_base_value instead of diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-cell.cc --- a/libinterp/octave-value/ov-cell.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-cell.cc Mon Aug 01 12:40:18 2016 -0400 @@ -1388,9 +1388,10 @@ { switch (umap) { -#define FORWARD_MAPPER(UMAP) \ - case umap_ ## UMAP: \ - return matrix.UMAP () +#define FORWARD_MAPPER(UMAP) \ + case umap_ ## UMAP: \ + return matrix.UMAP () + FORWARD_MAPPER (xisalnum); FORWARD_MAPPER (xisalpha); FORWARD_MAPPER (xisascii); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-ch-mat.cc --- a/libinterp/octave-value/ov-ch-mat.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-ch-mat.cc Mon Aug 01 12:40:18 2016 -0400 @@ -206,9 +206,9 @@ switch (umap) { -#define STRING_MAPPER(UMAP,FCN,TYPE) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define STRING_MAPPER(UMAP,FCN,TYPE) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) STRING_MAPPER (xisalnum, std::isalnum, bool); STRING_MAPPER (xisalpha, std::isalpha, bool); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-classdef.cc --- a/libinterp/octave-value/ov-classdef.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-classdef.cc Mon Aug 01 12:40:18 2016 -0400 @@ -580,27 +580,27 @@ return retval; } -#define META_CLASS_CMP(OP, CLSA, CLSB, FUN) \ -static octave_value_list \ -class_ ## OP (const octave_value_list& args, int /* nargout */) \ -{ \ - octave_value_list retval; \ - \ - if (args.length () != 2 \ - || args(0).type_name () != "object" \ - || args(1).type_name () != "object" \ - || args(0).class_name () != "meta.class" \ - || args(1).class_name () != "meta.class") \ - error (#OP ": invalid arguments"); \ - \ - cdef_class clsa = to_cdef (args(0)); \ - \ - cdef_class clsb = to_cdef (args(1)); \ - \ - retval(0) = FUN (CLSA, CLSB); \ -\ - return retval; \ -} +#define META_CLASS_CMP(OP, CLSA, CLSB, FUN) \ + static octave_value_list \ + class_ ## OP (const octave_value_list& args, int /* nargout */) \ + { \ + octave_value_list retval; \ + \ + if (args.length () != 2 \ + || args(0).type_name () != "object" \ + || args(1).type_name () != "object" \ + || args(0).class_name () != "meta.class" \ + || args(1).class_name () != "meta.class") \ + error (#OP ": invalid arguments"); \ + \ + cdef_class clsa = to_cdef (args(0)); \ + \ + cdef_class clsb = to_cdef (args(1)); \ + \ + retval(0) = FUN (CLSA, CLSB); \ + \ + return retval; \ + } META_CLASS_CMP (lt, clsb, clsa, is_strict_superclass) META_CLASS_CMP (le, clsb, clsa, is_superclass) diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-complex.cc --- a/libinterp/octave-value/ov-complex.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-complex.cc Mon Aug 01 12:40:18 2016 -0400 @@ -474,9 +474,9 @@ { switch (umap) { -#define SCALAR_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (FCN (scalar)) +#define SCALAR_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (FCN (scalar)) SCALAR_MAPPER (abs, std::abs); SCALAR_MAPPER (acos, octave::math::acos); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-cx-mat.cc --- a/libinterp/octave-value/ov-cx-mat.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-cx-mat.cc Mon Aug 01 12:40:18 2016 -0400 @@ -751,18 +751,18 @@ case umap_conj: return ::conj (matrix); -#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.FCN ()) +#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.FCN ()) ARRAY_METHOD_MAPPER (abs, abs); ARRAY_METHOD_MAPPER (isnan, isnan); ARRAY_METHOD_MAPPER (isinf, isinf); ARRAY_METHOD_MAPPER (isfinite, isfinite); -#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) ARRAY_MAPPER (acos, Complex, octave::math::acos); ARRAY_MAPPER (acosh, Complex, octave::math::acosh); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-cx-sparse.cc --- a/libinterp/octave-value/ov-cx-sparse.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-cx-sparse.cc Mon Aug 01 12:40:18 2016 -0400 @@ -908,15 +908,15 @@ case umap_imag: return ::imag (matrix); -#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.FCN ()) +#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.FCN ()) ARRAY_METHOD_MAPPER (abs, abs); -#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) ARRAY_MAPPER (acos, Complex, octave::math::acos); ARRAY_MAPPER (acosh, Complex, octave::math::acosh); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-float.cc --- a/libinterp/octave-value/ov-float.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-float.cc Mon Aug 01 12:40:18 2016 -0400 @@ -292,9 +292,9 @@ case umap_conj: return scalar; -#define SCALAR_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (FCN (scalar)) +#define SCALAR_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (FCN (scalar)) SCALAR_MAPPER (abs, ::fabsf); SCALAR_MAPPER (acos, octave::math::rc_acos); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-float.h --- a/libinterp/octave-value/ov-float.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-float.h Mon Aug 01 12:40:18 2016 -0400 @@ -125,10 +125,12 @@ uint64_array_value (void) const { return uint64NDArray (dim_vector (1, 1), scalar); } -#define DEFINE_INT_SCALAR_VALUE(TYPE) \ - octave_ ## TYPE \ - TYPE ## _scalar_value (void) const \ - { return octave_ ## TYPE (scalar); } +#define DEFINE_INT_SCALAR_VALUE(TYPE) \ + octave_ ## TYPE \ + TYPE ## _scalar_value (void) const \ + { \ + return octave_ ## TYPE (scalar); \ + } DEFINE_INT_SCALAR_VALUE (int8) DEFINE_INT_SCALAR_VALUE (int16) diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-flt-complex.cc --- a/libinterp/octave-value/ov-flt-complex.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-flt-complex.cc Mon Aug 01 12:40:18 2016 -0400 @@ -424,9 +424,9 @@ { switch (umap) { -#define SCALAR_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (FCN (scalar)) +#define SCALAR_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (FCN (scalar)) SCALAR_MAPPER (abs, std::abs); SCALAR_MAPPER (acos, octave::math::acos); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-flt-cx-mat.cc --- a/libinterp/octave-value/ov-flt-cx-mat.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-flt-cx-mat.cc Mon Aug 01 12:40:18 2016 -0400 @@ -702,18 +702,18 @@ case umap_conj: return ::conj (matrix); -#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.FCN ()) +#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.FCN ()) ARRAY_METHOD_MAPPER (abs, abs); ARRAY_METHOD_MAPPER (isnan, isnan); ARRAY_METHOD_MAPPER (isinf, isinf); ARRAY_METHOD_MAPPER (isfinite, isfinite); -#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) ARRAY_MAPPER (acos, FloatComplex, octave::math::acos); ARRAY_MAPPER (acosh, FloatComplex, octave::math::acosh); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-flt-re-mat.cc --- a/libinterp/octave-value/ov-flt-re-mat.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-flt-re-mat.cc Mon Aug 01 12:40:18 2016 -0400 @@ -726,22 +726,22 @@ return matrix; // Mappers handled specially. -#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.FCN ()) +#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.FCN ()) ARRAY_METHOD_MAPPER (abs, abs); ARRAY_METHOD_MAPPER (isnan, isnan); ARRAY_METHOD_MAPPER (isinf, isinf); ARRAY_METHOD_MAPPER (isfinite, isfinite); -#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) -#define RC_ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return do_rc_map (matrix, FCN) +#define RC_ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return do_rc_map (matrix, FCN) RC_ARRAY_MAPPER (acos, FloatComplex, octave::math::rc_acos); RC_ARRAY_MAPPER (acosh, FloatComplex, octave::math::rc_acosh); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-int-traits.h --- a/libinterp/octave-value/ov-int-traits.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-int-traits.h Mon Aug 01 12:40:18 2016 -0400 @@ -43,13 +43,13 @@ typedef T scalar_type; }; -#define OCTAVE_VALUE_INT_TRAITS(MT, ST) \ - template <> \ - class \ - octave_value_int_traits \ - { \ - public: \ - typedef ST scalar_type; \ +#define OCTAVE_VALUE_INT_TRAITS(MT, ST) \ + template <> \ + class \ + octave_value_int_traits \ + { \ + public: \ + typedef ST scalar_type; \ } OCTAVE_VALUE_INT_TRAITS(int8NDArray, octave_int8_scalar); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-lazy-idx.h --- a/libinterp/octave-value/ov-lazy-idx.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-lazy-idx.h Mon Aug 01 12:40:18 2016 -0400 @@ -138,9 +138,11 @@ void print_info (std::ostream& os, const std::string& prefix) const { make_value ().print_info (os, prefix); } -#define FORWARD_VALUE_QUERY(TYPE,NAME) \ - TYPE \ - NAME (void) const { return make_value ().NAME (); } +#define FORWARD_VALUE_QUERY(TYPE, NAME) \ + TYPE NAME (void) const \ + { \ + return make_value ().NAME (); \ + } FORWARD_VALUE_QUERY (int8NDArray, int8_array_value) FORWARD_VALUE_QUERY (int16NDArray, int16_array_value) @@ -151,52 +153,44 @@ FORWARD_VALUE_QUERY (uint32NDArray, uint32_array_value) FORWARD_VALUE_QUERY (uint64NDArray, uint64_array_value) -#define FORWARD_VALUE_QUERY1(TYPE,NAME) \ - TYPE \ - NAME (bool flag = false) const { return make_value ().NAME (flag); } +#define FORWARD_VALUE_QUERY1(TYPE, NAME) \ + TYPE NAME (bool flag = false) const \ + { \ + return make_value ().NAME (flag); \ + } FORWARD_VALUE_QUERY1 (double, double_value) - FORWARD_VALUE_QUERY1 (float, float_value) - FORWARD_VALUE_QUERY1 (double, scalar_value) - FORWARD_VALUE_QUERY1 (Matrix, matrix_value) - FORWARD_VALUE_QUERY1 (FloatMatrix, float_matrix_value) - FORWARD_VALUE_QUERY1 (Complex, complex_value) - FORWARD_VALUE_QUERY1 (FloatComplex, float_complex_value) - FORWARD_VALUE_QUERY1 (ComplexMatrix, complex_matrix_value) - FORWARD_VALUE_QUERY1 (FloatComplexMatrix, float_complex_matrix_value) - FORWARD_VALUE_QUERY1 (ComplexNDArray, complex_array_value) - FORWARD_VALUE_QUERY1 (FloatComplexNDArray, float_complex_array_value) - FORWARD_VALUE_QUERY1 (boolNDArray, bool_array_value) - FORWARD_VALUE_QUERY1 (charNDArray, char_array_value) - FORWARD_VALUE_QUERY1 (NDArray, array_value) - FORWARD_VALUE_QUERY1 (FloatNDArray, float_array_value) - FORWARD_VALUE_QUERY1 (SparseMatrix, sparse_matrix_value) - FORWARD_VALUE_QUERY1 (SparseComplexMatrix, sparse_complex_matrix_value) octave_value diag (octave_idx_type k = 0) const - { return make_value ().diag (k); } + { + return make_value ().diag (k); + } octave_value convert_to_str_internal (bool pad, bool force, char type) const - { return make_value ().convert_to_str_internal (pad, force, type); } + { + return make_value ().convert_to_str_internal (pad, force, type); + } void print_raw (std::ostream& os, bool pr_as_read_syntax = false) const - { return make_value ().print_raw (os, pr_as_read_syntax); } + { + return make_value ().print_raw (os, pr_as_read_syntax); + } bool save_ascii (std::ostream& os); @@ -210,20 +204,29 @@ int write (octave_stream& os, int block_size, oct_data_conv::data_type output_type, int skip, octave::mach_info::float_format flt_fmt) const - { return make_value ().write (os, block_size, output_type, skip, flt_fmt); } + { + return make_value ().write (os, block_size, output_type, skip, flt_fmt); + } // Unsafe. This function exists to support the MEX interface. // You should not use it anywhere else. void *mex_get_data (void) const - { return make_value ().mex_get_data (); } + { + return make_value ().mex_get_data (); + } mxArray *as_mxArray (void) const - { return make_value ().as_mxArray (); } + { + return make_value ().as_mxArray (); + } octave_value map (unary_mapper_t umap) const - { return make_value ().map (umap); } + { + return make_value ().map (umap); + } private: + const octave_value& make_value (void) const { if (value.is_undefined ()) diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-perm.cc --- a/libinterp/octave-value/ov-perm.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-perm.cc Mon Aug 01 12:40:18 2016 -0400 @@ -213,12 +213,12 @@ return retval; } -#define FORWARD_MATRIX_VALUE(TYPE, PREFIX) \ -TYPE \ -octave_perm_matrix::PREFIX ## _value (bool frc_str_conv) const \ -{ \ - return to_dense ().PREFIX ## _value (frc_str_conv); \ -} +#define FORWARD_MATRIX_VALUE(TYPE, PREFIX) \ + TYPE \ + octave_perm_matrix::PREFIX ## _value (bool frc_str_conv) const \ + { \ + return to_dense ().PREFIX ## _value (frc_str_conv); \ + } SparseMatrix octave_perm_matrix::sparse_matrix_value (bool) const diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-re-mat.cc --- a/libinterp/octave-value/ov-re-mat.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-re-mat.cc Mon Aug 01 12:40:18 2016 -0400 @@ -851,22 +851,22 @@ return matrix; // Mappers handled specially. -#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.FCN ()) +#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.FCN ()) ARRAY_METHOD_MAPPER (abs, abs); ARRAY_METHOD_MAPPER (isnan, isnan); ARRAY_METHOD_MAPPER (isinf, isinf); ARRAY_METHOD_MAPPER (isfinite, isfinite); -#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) -#define RC_ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return do_rc_map (matrix, FCN) +#define RC_ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return do_rc_map (matrix, FCN) RC_ARRAY_MAPPER (acos, Complex, octave::math::rc_acos); RC_ARRAY_MAPPER (acosh, Complex, octave::math::rc_acosh); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-re-sparse.cc --- a/libinterp/octave-value/ov-re-sparse.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-re-sparse.cc Mon Aug 01 12:40:18 2016 -0400 @@ -897,15 +897,15 @@ return matrix; // Mappers handled specially. -#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.FCN ()) +#define ARRAY_METHOD_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.FCN ()) ARRAY_METHOD_MAPPER (abs, abs); -#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ - case umap_ ## UMAP: \ - return octave_value (matrix.map (FCN)) +#define ARRAY_MAPPER(UMAP, TYPE, FCN) \ + case umap_ ## UMAP: \ + return octave_value (matrix.map (FCN)) ARRAY_MAPPER (acos, Complex, octave::math::rc_acos); ARRAY_MAPPER (acosh, Complex, octave::math::rc_acosh); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-scalar.cc --- a/libinterp/octave-value/ov-scalar.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-scalar.cc Mon Aug 01 12:40:18 2016 -0400 @@ -313,9 +313,9 @@ case umap_conj: return scalar; -#define SCALAR_MAPPER(UMAP, FCN) \ - case umap_ ## UMAP: \ - return octave_value (FCN (scalar)) +#define SCALAR_MAPPER(UMAP, FCN) \ + case umap_ ## UMAP: \ + return octave_value (FCN (scalar)) SCALAR_MAPPER (abs, ::fabs); SCALAR_MAPPER (acos, octave::math::rc_acos); diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-scalar.h --- a/libinterp/octave-value/ov-scalar.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-scalar.h Mon Aug 01 12:40:18 2016 -0400 @@ -124,10 +124,12 @@ uint64_array_value (void) const { return uint64NDArray (dim_vector (1, 1), scalar); } -#define DEFINE_INT_SCALAR_VALUE(TYPE) \ - octave_ ## TYPE \ - TYPE ## _scalar_value (void) const \ - { return octave_ ## TYPE (scalar); } +#define DEFINE_INT_SCALAR_VALUE(TYPE) \ + octave_ ## TYPE \ + TYPE ## _scalar_value (void) const \ + { \ + return octave_ ## TYPE (scalar); \ + } DEFINE_INT_SCALAR_VALUE (int8) DEFINE_INT_SCALAR_VALUE (int16) diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-str-mat.cc --- a/libinterp/octave-value/ov-str-mat.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-str-mat.cc Mon Aug 01 12:40:18 2016 -0400 @@ -160,16 +160,16 @@ return octave_value (retval, is_sq_string () ? '\'' : '"'); } -#define CHAR_MATRIX_CONV(T, INIT, TNAME, FCN) \ - T retval INIT; \ - \ - if (! force_string_conv) \ - err_invalid_conversion ("string", TNAME); \ - \ - warn_implicit_conversion ("Octave:str-to-num", "string", TNAME); \ - \ - retval = octave_char_matrix::FCN (); \ - \ +#define CHAR_MATRIX_CONV(T, INIT, TNAME, FCN) \ + T retval INIT; \ + \ + if (! force_string_conv) \ + err_invalid_conversion ("string", TNAME); \ + \ + warn_implicit_conversion ("Octave:str-to-num", "string", TNAME); \ + \ + retval = octave_char_matrix::FCN (); \ + \ return retval double diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov-type-conv.h --- a/libinterp/octave-value/ov-type-conv.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov-type-conv.h Mon Aug 01 12:40:18 2016 -0400 @@ -75,33 +75,33 @@ return retval; } -#define OCTAVE_TYPE_CONV_BODY3(NAME, MATRIX_RESULT_T, SCALAR_RESULT_T) \ - \ - if (args.length () != 1) \ - print_usage (); \ - \ - octave_value retval; \ - \ - const octave_value arg = args(0); \ - \ - int t_result = MATRIX_RESULT_T::static_type_id (); \ - \ - retval = octave_type_conv_body (arg, #NAME, t_result); \ - if (retval.is_undefined ()) \ - { \ - std::string arg_tname = arg.type_name (); \ - \ - std::string result_tname = arg.numel () == 1 \ - ? SCALAR_RESULT_T::static_type_name () \ - : MATRIX_RESULT_T::static_type_name (); \ - \ - err_invalid_conversion (arg_tname, result_tname); \ - } \ - \ +#define OCTAVE_TYPE_CONV_BODY3(NAME, MATRIX_RESULT_T, SCALAR_RESULT_T) \ + \ + if (args.length () != 1) \ + print_usage (); \ + \ + octave_value retval; \ + \ + const octave_value arg = args(0); \ + \ + int t_result = MATRIX_RESULT_T::static_type_id (); \ + \ + retval = octave_type_conv_body (arg, #NAME, t_result); \ + if (retval.is_undefined ()) \ + { \ + std::string arg_tname = arg.type_name (); \ + \ + std::string result_tname = arg.numel () == 1 \ + ? SCALAR_RESULT_T::static_type_name () \ + : MATRIX_RESULT_T::static_type_name (); \ + \ + err_invalid_conversion (arg_tname, result_tname); \ + } \ + \ return retval; -#define OCTAVE_TYPE_CONV_BODY(NAME) \ - OCTAVE_TYPE_CONV_BODY3 (NAME, octave_ ## NAME ## _matrix, \ +#define OCTAVE_TYPE_CONV_BODY(NAME) \ + OCTAVE_TYPE_CONV_BODY3 (NAME, octave_ ## NAME ## _matrix, \ octave_ ## NAME ## _scalar) #endif diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov.cc --- a/libinterp/octave-value/ov.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov.cc Mon Aug 01 12:40:18 2016 -0400 @@ -1771,30 +1771,30 @@ // value extraction functions perform implicit type conversions that we // wish to avoid for these functions. -#define XVALUE_EXTRACTOR(TYPE, NAME, FCN) \ - TYPE \ - octave_value::NAME (const char *fmt, ...) const \ - { \ - TYPE retval; \ - \ - try \ - { \ - retval = FCN (); \ - } \ - catch (octave_execution_exception& e) \ - { \ - if (fmt) \ - { \ - va_list args; \ - va_start (args, fmt); \ - verror (e, fmt, args); \ - va_end (args); \ - } \ - \ - throw e; \ - } \ - \ - return retval; \ +#define XVALUE_EXTRACTOR(TYPE, NAME, FCN) \ + TYPE \ + octave_value::NAME (const char *fmt, ...) const \ + { \ + TYPE retval; \ + \ + try \ + { \ + retval = FCN (); \ + } \ + catch (octave_execution_exception& e) \ + { \ + if (fmt) \ + { \ + va_list args; \ + va_start (args, fmt); \ + verror (e, fmt, args); \ + va_end (args); \ + } \ + \ + throw e; \ + } \ + \ + return retval; \ } XVALUE_EXTRACTOR (short int, xshort_value, short_value) diff -r dd992fd74fce -r e43d83253e28 libinterp/octave-value/ov.h --- a/libinterp/octave-value/ov.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/octave-value/ov.h Mon Aug 01 12:40:18 2016 -0400 @@ -1323,7 +1323,10 @@ void dump (std::ostream& os) const { rep->dump (os); } #define MAPPER_FORWARD(F) \ - octave_value F (void) const { return rep->map (octave_base_value::umap_ ## F); } + octave_value F (void) const \ + { \ + return rep->map (octave_base_value::umap_ ## F); \ + } MAPPER_FORWARD (abs) MAPPER_FORWARD (acos) @@ -1445,22 +1448,22 @@ do_binary_op (octave_value::compound_binary_op op, const octave_value& a, const octave_value& b); -#define OV_UNOP_FN(name) \ - inline octave_value \ - name (const octave_value& a) \ - { \ +#define OV_UNOP_FN(name) \ + inline octave_value \ + name (const octave_value& a) \ + { \ return do_unary_op (octave_value::name, a); \ } -#define OV_UNOP_OP(name, op) \ - inline octave_value \ - operator op (const octave_value& a) \ - { \ - return name (a); \ +#define OV_UNOP_OP(name, op) \ + inline octave_value \ + operator op (const octave_value& a) \ + { \ + return name (a); \ } -#define OV_UNOP_FN_OP(name, op) \ - OV_UNOP_FN (name) \ +#define OV_UNOP_FN_OP(name, op) \ + OV_UNOP_FN (name) \ OV_UNOP_OP (name, op) OV_UNOP_FN_OP (op_not, !) @@ -1475,22 +1478,22 @@ // incr // decr -#define OV_BINOP_FN(name) \ - inline octave_value \ +#define OV_BINOP_FN(name) \ + inline octave_value \ name (const octave_value& a1, const octave_value& a2) \ - { \ - return do_binary_op (octave_value::name, a1, a2); \ + { \ + return do_binary_op (octave_value::name, a1, a2); \ } -#define OV_BINOP_OP(name, op) \ - inline octave_value \ - operator op (const octave_value& a1, const octave_value& a2) \ - { \ - return name (a1, a2); \ +#define OV_BINOP_OP(name, op) \ + inline octave_value \ + operator op (const octave_value& a1, const octave_value& a2) \ + { \ + return name (a1, a2); \ } -#define OV_BINOP_FN_OP(name, op) \ - OV_BINOP_FN (name) \ +#define OV_BINOP_FN_OP(name, op) \ + OV_BINOP_FN (name) \ OV_BINOP_OP (name, op) OV_BINOP_FN_OP (op_add, +) @@ -1517,11 +1520,11 @@ OV_BINOP_FN (op_struct_ref) -#define OV_COMP_BINOP_FN(name) \ - inline octave_value \ +#define OV_COMP_BINOP_FN(name) \ + inline octave_value \ name (const octave_value& a1, const octave_value& a2) \ - { \ - return do_binary_op (octave_value::name, a1, a2); \ + { \ + return do_binary_op (octave_value::name, a1, a2); \ } OV_COMP_BINOP_FN (op_trans_mul) @@ -1536,10 +1539,12 @@ inline Value octave_value_extract (const octave_value&) { assert (false); } -#define DEF_VALUE_EXTRACTOR(VALUE,MPREFIX) \ -template <> \ -inline VALUE octave_value_extract (const octave_value& v) \ - { return v.MPREFIX ## _value (); } +#define DEF_VALUE_EXTRACTOR(VALUE,MPREFIX) \ + template <> \ + inline VALUE octave_value_extract (const octave_value& v) \ + { \ + return v.MPREFIX ## _value (); \ + } DEF_VALUE_EXTRACTOR (double, scalar) DEF_VALUE_EXTRACTOR (float, float_scalar) @@ -1599,10 +1604,13 @@ DEF_VALUE_EXTRACTOR (SparseBoolMatrix, sparse_bool_matrix) #undef DEF_VALUE_EXTRACTOR -#define DEF_DUMMY_VALUE_EXTRACTOR(VALUE,DEFVAL) \ -template <> \ -inline VALUE octave_value_extract (const octave_value&) \ - { assert (false); return DEFVAL; } +#define DEF_DUMMY_VALUE_EXTRACTOR(VALUE,DEFVAL) \ + template <> \ + inline VALUE octave_value_extract (const octave_value&) \ + { \ + assert (false); \ + return DEFVAL; \ + } DEF_DUMMY_VALUE_EXTRACTOR (char, 0) DEF_DUMMY_VALUE_EXTRACTOR (octave_value, octave_value ()) diff -r dd992fd74fce -r e43d83253e28 libinterp/operators/op-class.cc --- a/libinterp/operators/op-class.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/operators/op-class.cc Mon Aug 01 12:40:18 2016 -0400 @@ -38,30 +38,30 @@ // class ops. -#define DEF_CLASS_UNOP(name) \ - static octave_value \ - oct_unop_ ## name (const octave_value& a) \ - { \ - octave_value retval; \ - \ - std::string class_name = a.class_name (); \ - \ - octave_value meth = symbol_table::find_method (#name, class_name); \ - \ - if (meth.is_undefined ()) \ - error ("%s method not defined for %s class", #name, \ - class_name.c_str ()); \ - \ - octave_value_list args; \ - \ - args(0) = a; \ - \ - octave_value_list tmp = feval (meth.function_value (), args, 1); \ - \ - if (tmp.length () > 0) \ - retval = tmp(0); \ - \ - return retval; \ +#define DEF_CLASS_UNOP(name) \ + static octave_value \ + oct_unop_ ## name (const octave_value& a) \ + { \ + octave_value retval; \ + \ + std::string class_name = a.class_name (); \ + \ + octave_value meth = symbol_table::find_method (#name, class_name); \ + \ + if (meth.is_undefined ()) \ + error ("%s method not defined for %s class", #name, \ + class_name.c_str ()); \ + \ + octave_value_list args; \ + \ + args(0) = a; \ + \ + octave_value_list tmp = feval (meth.function_value (), args, 1); \ + \ + if (tmp.length () > 0) \ + retval = tmp(0); \ + \ + return retval; \ } DEF_CLASS_UNOP (not) @@ -73,31 +73,31 @@ // FIXME: we need to handle precedence in the binop function. #define DEF_CLASS_BINOP(name) \ - static octave_value \ - oct_binop_ ## name (const octave_value& a1, const octave_value& a2) \ - { \ - octave_value retval; \ - \ - std::string dispatch_type \ - = a1.is_object () ? a1.class_name () : a2.class_name (); \ - \ + static octave_value \ + oct_binop_ ## name (const octave_value& a1, const octave_value& a2) \ + { \ + octave_value retval; \ + \ + std::string dispatch_type \ + = a1.is_object () ? a1.class_name () : a2.class_name (); \ + \ octave_value meth = symbol_table::find_method (#name, dispatch_type); \ - \ - if (meth.is_undefined ()) \ - error ("%s method not defined for %s class", #name, \ - dispatch_type.c_str ()); \ - \ - octave_value_list args; \ - \ - args(1) = a2; \ - args(0) = a1; \ - \ - octave_value_list tmp = feval (meth.function_value (), args, 1); \ - \ - if (tmp.length () > 0) \ - retval = tmp(0); \ - \ - return retval; \ + \ + if (meth.is_undefined ()) \ + error ("%s method not defined for %s class", #name, \ + dispatch_type.c_str ()); \ + \ + octave_value_list args; \ + \ + args(1) = a2; \ + args(0) = a1; \ + \ + octave_value_list tmp = feval (meth.function_value (), args, 1); \ + \ + if (tmp.length () > 0) \ + retval = tmp(0); \ + \ + return retval; \ } DEF_CLASS_BINOP (plus) @@ -119,12 +119,12 @@ DEF_CLASS_BINOP (and) DEF_CLASS_BINOP (or) -#define INSTALL_CLASS_UNOP(op, f) \ - octave_value_typeinfo::register_unary_class_op \ +#define INSTALL_CLASS_UNOP(op, f) \ + octave_value_typeinfo::register_unary_class_op \ (octave_value::op, oct_unop_ ## f) -#define INSTALL_CLASS_BINOP(op, f) \ - octave_value_typeinfo::register_binary_class_op \ +#define INSTALL_CLASS_BINOP(op, f) \ + octave_value_typeinfo::register_binary_class_op \ (octave_value::op, oct_binop_ ## f) void diff -r dd992fd74fce -r e43d83253e28 libinterp/operators/op-int-conv.cc --- a/libinterp/operators/op-int-conv.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/operators/op-int-conv.cc Mon Aug 01 12:40:18 2016 -0400 @@ -47,12 +47,12 @@ #include "ops.h" #define DEFINTCONVFN(name, tfrom, tto) \ - CONVDECL (name) \ - { \ + CONVDECL (name) \ + { \ const octave_ ## tfrom& v = dynamic_cast (a); \ - \ - octave_ ## tto ## _matrix v2 = v.tto ## _array_value (); \ - return new octave_ ## tto ## _matrix (v2); \ + \ + octave_ ## tto ## _matrix v2 = v.tto ## _array_value (); \ + return new octave_ ## tto ## _matrix (v2); \ } // conversion ops @@ -147,25 +147,25 @@ DEFINTCONVFN (range_to_uint32, range, uint32) DEFINTCONVFN (range_to_uint64, range, uint64) -#define INT_CONV_FUNCTIONS(tfrom) \ - DEFCONVFN2 (tfrom ## _scalar_to_int8, tfrom, scalar, int8) \ - DEFCONVFN2 (tfrom ## _scalar_to_int16, tfrom, scalar, int16) \ - DEFCONVFN2 (tfrom ## _scalar_to_int32, tfrom, scalar, int32) \ - DEFCONVFN2 (tfrom ## _scalar_to_int64, tfrom, scalar, int64) \ - \ - DEFCONVFN2 (tfrom ## _scalar_to_uint8, tfrom, scalar, uint8) \ - DEFCONVFN2 (tfrom ## _scalar_to_uint16, tfrom, scalar, uint16) \ - DEFCONVFN2 (tfrom ## _scalar_to_uint32, tfrom, scalar, uint32) \ - DEFCONVFN2 (tfrom ## _scalar_to_uint64, tfrom, scalar, uint64) \ - \ - DEFCONVFN2 (tfrom ## _matrix_to_int8, tfrom, matrix, int8) \ - DEFCONVFN2 (tfrom ## _matrix_to_int16, tfrom, matrix, int16) \ - DEFCONVFN2 (tfrom ## _matrix_to_int32, tfrom, matrix, int32) \ - DEFCONVFN2 (tfrom ## _matrix_to_int64, tfrom, matrix, int64) \ - \ - DEFCONVFN2 (tfrom ## _matrix_to_uint8, tfrom, matrix, uint8) \ - DEFCONVFN2 (tfrom ## _matrix_to_uint16, tfrom, matrix, uint16) \ - DEFCONVFN2 (tfrom ## _matrix_to_uint32, tfrom, matrix, uint32) \ +#define INT_CONV_FUNCTIONS(tfrom) \ + DEFCONVFN2 (tfrom ## _scalar_to_int8, tfrom, scalar, int8) \ + DEFCONVFN2 (tfrom ## _scalar_to_int16, tfrom, scalar, int16) \ + DEFCONVFN2 (tfrom ## _scalar_to_int32, tfrom, scalar, int32) \ + DEFCONVFN2 (tfrom ## _scalar_to_int64, tfrom, scalar, int64) \ + \ + DEFCONVFN2 (tfrom ## _scalar_to_uint8, tfrom, scalar, uint8) \ + DEFCONVFN2 (tfrom ## _scalar_to_uint16, tfrom, scalar, uint16) \ + DEFCONVFN2 (tfrom ## _scalar_to_uint32, tfrom, scalar, uint32) \ + DEFCONVFN2 (tfrom ## _scalar_to_uint64, tfrom, scalar, uint64) \ + \ + DEFCONVFN2 (tfrom ## _matrix_to_int8, tfrom, matrix, int8) \ + DEFCONVFN2 (tfrom ## _matrix_to_int16, tfrom, matrix, int16) \ + DEFCONVFN2 (tfrom ## _matrix_to_int32, tfrom, matrix, int32) \ + DEFCONVFN2 (tfrom ## _matrix_to_int64, tfrom, matrix, int64) \ + \ + DEFCONVFN2 (tfrom ## _matrix_to_uint8, tfrom, matrix, uint8) \ + DEFCONVFN2 (tfrom ## _matrix_to_uint16, tfrom, matrix, uint16) \ + DEFCONVFN2 (tfrom ## _matrix_to_uint32, tfrom, matrix, uint32) \ DEFCONVFN2 (tfrom ## _matrix_to_uint64, tfrom, matrix, uint64) INT_CONV_FUNCTIONS (int8) @@ -178,37 +178,61 @@ INT_CONV_FUNCTIONS (uint32) INT_CONV_FUNCTIONS (uint64) -#define INSTALL_INT_CONV_FUNCTIONS(tfrom) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int8_matrix, tfrom ## _scalar_to_int8) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int16_matrix, tfrom ## _scalar_to_int16) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int32_matrix, tfrom ## _scalar_to_int32) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int64_matrix, tfrom ## _scalar_to_int64) \ - \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint8_matrix, tfrom ## _scalar_to_uint8) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint16_matrix, tfrom ## _scalar_to_uint16) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint32_matrix, tfrom ## _scalar_to_uint32) \ - INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint64_matrix, tfrom ## _scalar_to_uint64) \ - \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int8_matrix, tfrom ## _matrix_to_int8) \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int16_matrix, tfrom ## _matrix_to_int16) \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int32_matrix, tfrom ## _matrix_to_int32) \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int64_matrix, tfrom ## _matrix_to_int64) \ - \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint8_matrix, tfrom ## _matrix_to_uint8) \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint16_matrix, tfrom ## _matrix_to_uint16) \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint32_matrix, tfrom ## _matrix_to_uint32) \ - INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint64_matrix, tfrom ## _matrix_to_uint64) +#define INSTALL_INT_CONV_FUNCTIONS(tfrom) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int8_matrix, \ + tfrom ## _scalar_to_int8) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int16_matrix, \ + tfrom ## _scalar_to_int16) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int32_matrix, \ + tfrom ## _scalar_to_int32) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_int64_matrix, \ + tfrom ## _scalar_to_int64) \ + \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint8_matrix, \ + tfrom ## _scalar_to_uint8) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint16_matrix, \ + tfrom ## _scalar_to_uint16) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint32_matrix, \ + tfrom ## _scalar_to_uint32) \ + INSTALL_CONVOP (octave_ ## tfrom ## _scalar, octave_uint64_matrix, \ + tfrom ## _scalar_to_uint64) \ + \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int8_matrix, \ + tfrom ## _matrix_to_int8) \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int16_matrix, \ + tfrom ## _matrix_to_int16) \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int32_matrix, \ + tfrom ## _matrix_to_int32) \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_int64_matrix, \ + tfrom ## _matrix_to_int64) \ + \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint8_matrix, \ + tfrom ## _matrix_to_uint8) \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint16_matrix, \ + tfrom ## _matrix_to_uint16) \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint32_matrix, \ + tfrom ## _matrix_to_uint32) \ + INSTALL_CONVOP (octave_ ## tfrom ## _matrix, octave_uint64_matrix, \ + tfrom ## _matrix_to_uint64) -#define INSTALL_CONVOPS(tfrom) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_int8_matrix, tfrom ## _to_int8) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_int16_matrix, tfrom ## _to_int16) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_int32_matrix, tfrom ## _to_int32) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_int64_matrix, tfrom ## _to_int64) \ - \ - INSTALL_CONVOP (octave_ ## tfrom, octave_uint8_matrix, tfrom ## _to_uint8) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_uint16_matrix, tfrom ## _to_uint16) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_uint32_matrix, tfrom ## _to_uint32) \ - INSTALL_CONVOP (octave_ ## tfrom, octave_uint64_matrix, tfrom ## _to_uint64) +#define INSTALL_CONVOPS(tfrom) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_int8_matrix, \ + tfrom ## _to_int8) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_int16_matrix, \ + tfrom ## _to_int16) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_int32_matrix, \ + tfrom ## _to_int32) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_int64_matrix, \ + tfrom ## _to_int64) \ + \ + INSTALL_CONVOP (octave_ ## tfrom, octave_uint8_matrix, \ + tfrom ## _to_uint8) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_uint16_matrix, \ + tfrom ## _to_uint16) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_uint32_matrix, \ + tfrom ## _to_uint32) \ + INSTALL_CONVOP (octave_ ## tfrom, octave_uint64_matrix, \ + tfrom ## _to_uint64) void install_int_conv_ops (void) diff -r dd992fd74fce -r e43d83253e28 libinterp/operators/op-int.h --- a/libinterp/operators/op-int.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/operators/op-int.h Mon Aug 01 12:40:18 2016 -0400 @@ -28,107 +28,107 @@ #include "quit.h" #include "bsxfun.h" -#define DEFINTBINOP_OP(name, t1, t2, op, t3) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ - const octave_ ## t1& v1 = dynamic_cast (a1); \ - const octave_ ## t2& v2 = dynamic_cast (a2); \ - octave_value retval = octave_value \ - (v1.t1 ## _value () op v2.t2 ## _value ()); \ - return retval; \ +#define DEFINTBINOP_OP(name, t1, t2, op, t3) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ + const octave_ ## t1& v1 = dynamic_cast (a1); \ + const octave_ ## t2& v2 = dynamic_cast (a2); \ + octave_value retval = octave_value \ + (v1.t1 ## _value () op v2.t2 ## _value ()); \ + return retval; \ } -#define DEFINTNDBINOP_OP(name, t1, t2, e1, e2, op, t3) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ - const octave_ ## t1& v1 = dynamic_cast (a1); \ - const octave_ ## t2& v2 = dynamic_cast (a2); \ - octave_value retval = octave_value \ - (v1.e1 ## _value () op v2.e2 ## _value ()); \ - return retval; \ +#define DEFINTNDBINOP_OP(name, t1, t2, e1, e2, op, t3) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ + const octave_ ## t1& v1 = dynamic_cast (a1); \ + const octave_ ## t2& v2 = dynamic_cast (a2); \ + octave_value retval = octave_value \ + (v1.e1 ## _value () op v2.e2 ## _value ()); \ + return retval; \ } -#define DEFINTBINOP_FN(name, t1, t2, f, t3, op) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ - const octave_ ## t1& v1 = dynamic_cast (a1); \ - const octave_ ## t2& v2 = dynamic_cast (a2); \ +#define DEFINTBINOP_FN(name, t1, t2, f, t3, op) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ + const octave_ ## t1& v1 = dynamic_cast (a1); \ + const octave_ ## t2& v2 = dynamic_cast (a2); \ octave_value retval = octave_value (f (v1.t1 ## _value (), v2.t2 ## _value ())); \ - return retval; \ + return retval; \ } -#define DEFINTNDBINOP_FN(name, t1, t2, e1, e2, f, t3, op) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ - const octave_ ## t1& v1 = dynamic_cast (a1); \ - const octave_ ## t2& v2 = dynamic_cast (a2); \ +#define DEFINTNDBINOP_FN(name, t1, t2, e1, e2, f, t3, op) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ + const octave_ ## t1& v1 = dynamic_cast (a1); \ + const octave_ ## t2& v2 = dynamic_cast (a2); \ octave_value retval = octave_value (f (v1.e1 ## _value (), v2.e2 ## _value ())); \ - return retval; \ + return retval; \ } -#define OCTAVE_CONCAT_FN2(T1, T2) \ +#define OCTAVE_CONCAT_FN2(T1, T2) \ DEFNDCATOP_FN2 (T1 ## _ ## T2 ## _s_s, T1 ## _scalar, T2 ## _scalar, , T1 ## NDArray, T1 ## _array, T2 ## _array, concat) \ DEFNDCATOP_FN2 (T1 ## _ ## T2 ## _s_m, T1 ## _scalar, T2 ## _matrix, , T1 ## NDArray, T1 ## _array, T2 ## _array, concat) \ DEFNDCATOP_FN2 (T1 ## _ ## T2 ## _m_s, T1 ## _matrix, T2 ## _scalar, , T1 ## NDArray, T1 ## _array, T2 ## _array, concat) \ DEFNDCATOP_FN2 (T1 ## _ ## T2 ## _m_m, T1 ## _matrix, T2 ## _matrix, , T1 ## NDArray, T1 ## _array, T2 ## _array, concat) -#define OCTAVE_INSTALL_CONCAT_FN2(T1, T2) \ +#define OCTAVE_INSTALL_CONCAT_FN2(T1, T2) \ INSTALL_CATOP (octave_ ## T1 ## _scalar, octave_ ## T2 ## _scalar, T1 ## _ ## T2 ## _s_s) \ INSTALL_CATOP (octave_ ## T1 ## _scalar, octave_ ## T2 ## _matrix, T1 ## _ ## T2 ## _s_m) \ INSTALL_CATOP (octave_ ## T1 ## _matrix, octave_ ## T2 ## _scalar, T1 ## _ ## T2 ## _m_s) \ INSTALL_CATOP (octave_ ## T1 ## _matrix, octave_ ## T2 ## _matrix, T1 ## _ ## T2 ## _m_m) -#define OCTAVE_DOUBLE_INT_CONCAT_FN(TYPE) \ +#define OCTAVE_DOUBLE_INT_CONCAT_FN(TYPE) \ DEFNDCATOP_FN2 (double ## _ ## TYPE ## _s_s, scalar, TYPE ## _scalar, TYPE ## NDArray, , array, TYPE ## _array, concat) \ DEFNDCATOP_FN2 (double ## _ ## TYPE ## _s_m, scalar, TYPE ## _matrix, TYPE ## NDArray, , array, TYPE ## _array, concat) \ DEFNDCATOP_FN2 (double ## _ ## TYPE ## _m_s, matrix, TYPE ## _scalar, TYPE ## NDArray, , array, TYPE ## _array, concat) \ DEFNDCATOP_FN2 (double ## _ ## TYPE ## _m_m, matrix, TYPE ## _matrix, TYPE ## NDArray, , array, TYPE ## _array, concat) -#define OCTAVE_INSTALL_DOUBLE_INT_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_DOUBLE_INT_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_scalar, octave_ ## TYPE ## _scalar, double ## _ ## TYPE ## _s_s) \ INSTALL_CATOP (octave_scalar, octave_ ## TYPE ## _matrix, double ## _ ## TYPE ## _s_m) \ INSTALL_CATOP (octave_matrix, octave_ ## TYPE ## _scalar, double ## _ ## TYPE ## _m_s) \ INSTALL_CATOP (octave_matrix, octave_ ## TYPE ## _matrix, double ## _ ## TYPE ## _m_m) -#define OCTAVE_INT_DOUBLE_CONCAT_FN(TYPE) \ +#define OCTAVE_INT_DOUBLE_CONCAT_FN(TYPE) \ DEFNDCATOP_FN2 (TYPE ## _ ## double ## _s_s, TYPE ## _scalar, scalar, , TYPE ## NDArray, TYPE ## _array, array, concat) \ DEFNDCATOP_FN2 (TYPE ## _ ## double ## _s_m, TYPE ## _scalar, matrix, , TYPE ## NDArray, TYPE ## _array, array, concat) \ DEFNDCATOP_FN2 (TYPE ## _ ## double ## _m_s, TYPE ## _matrix, scalar, , TYPE ## NDArray, TYPE ## _array, array, concat) \ DEFNDCATOP_FN2 (TYPE ## _ ## double ## _m_m, TYPE ## _matrix, matrix, , TYPE ## NDArray, TYPE ## _array, array, concat) -#define OCTAVE_INSTALL_INT_DOUBLE_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_INT_DOUBLE_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_scalar, TYPE ## _ ## double ## _s_s) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_matrix, TYPE ## _ ## double ## _s_m) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_scalar, TYPE ## _ ## double ## _m_s) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_matrix, TYPE ## _ ## double ## _m_m) -#define OCTAVE_FLOAT_INT_CONCAT_FN(TYPE) \ +#define OCTAVE_FLOAT_INT_CONCAT_FN(TYPE) \ DEFNDCATOP_FN2 (float ## _ ## TYPE ## _s_s, float_scalar, TYPE ## _scalar, TYPE ## NDArray, , float_array, TYPE ## _array, concat) \ DEFNDCATOP_FN2 (float ## _ ## TYPE ## _s_m, float_scalar, TYPE ## _matrix, TYPE ## NDArray, , float_array, TYPE ## _array, concat) \ DEFNDCATOP_FN2 (float ## _ ## TYPE ## _m_s, float_matrix, TYPE ## _scalar, TYPE ## NDArray, , float_array, TYPE ## _array, concat) \ DEFNDCATOP_FN2 (float ## _ ## TYPE ## _m_m, float_matrix, TYPE ## _matrix, TYPE ## NDArray, , float_array, TYPE ## _array, concat) -#define OCTAVE_INSTALL_FLOAT_INT_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_FLOAT_INT_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_float_scalar, octave_ ## TYPE ## _scalar, float ## _ ## TYPE ## _s_s) \ INSTALL_CATOP (octave_float_scalar, octave_ ## TYPE ## _matrix, float ## _ ## TYPE ## _s_m) \ INSTALL_CATOP (octave_float_matrix, octave_ ## TYPE ## _scalar, float ## _ ## TYPE ## _m_s) \ INSTALL_CATOP (octave_float_matrix, octave_ ## TYPE ## _matrix, float ## _ ## TYPE ## _m_m) -#define OCTAVE_INT_FLOAT_CONCAT_FN(TYPE) \ +#define OCTAVE_INT_FLOAT_CONCAT_FN(TYPE) \ DEFNDCATOP_FN2 (TYPE ## _ ## float ## _s_s, TYPE ## _scalar, float_scalar, , TYPE ## NDArray, TYPE ## _array, float_array, concat) \ DEFNDCATOP_FN2 (TYPE ## _ ## float ## _s_m, TYPE ## _scalar, float_matrix, , TYPE ## NDArray, TYPE ## _array, float_array, concat) \ DEFNDCATOP_FN2 (TYPE ## _ ## float ## _m_s, TYPE ## _matrix, float_scalar, , TYPE ## NDArray, TYPE ## _array, float_array, concat) \ DEFNDCATOP_FN2 (TYPE ## _ ## float ## _m_m, TYPE ## _matrix, float_matrix, , TYPE ## NDArray, TYPE ## _array, float_array, concat) -#define OCTAVE_INSTALL_INT_FLOAT_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_INT_FLOAT_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_float_scalar, TYPE ## _ ## float ## _s_s) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_float_matrix, TYPE ## _ ## float ## _s_m) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_float_scalar, TYPE ## _ ## float ## _m_s) \ @@ -137,248 +137,248 @@ // For compatibility, concatenation with a character always returns a // character. -#define OCTAVE_CHAR_INT_CONCAT_FN(TYPE) \ +#define OCTAVE_CHAR_INT_CONCAT_FN(TYPE) \ DEFNDCHARCATOP_FN (char ## _ ## TYPE ## _m_s, char_matrix, TYPE ## _scalar, concat) \ DEFNDCHARCATOP_FN (char ## _ ## TYPE ## _m_m, char_matrix, TYPE ## _matrix, concat) -#define OCTAVE_INSTALL_CHAR_INT_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_CHAR_INT_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_char_matrix_str, octave_ ## TYPE ## _scalar, char ## _ ## TYPE ## _m_s) \ INSTALL_CATOP (octave_char_matrix_str, octave_ ## TYPE ## _matrix, char ## _ ## TYPE ## _m_m) \ INSTALL_CATOP (octave_char_matrix_sq_str, octave_ ## TYPE ## _scalar, char ## _ ## TYPE ## _m_s) \ INSTALL_CATOP (octave_char_matrix_sq_str, octave_ ## TYPE ## _matrix, char ## _ ## TYPE ## _m_m) -#define OCTAVE_INT_CHAR_CONCAT_FN(TYPE) \ +#define OCTAVE_INT_CHAR_CONCAT_FN(TYPE) \ DEFNDCHARCATOP_FN (TYPE ## _ ## char ## _s_m, TYPE ## _scalar, char_matrix, concat) \ DEFNDCHARCATOP_FN (TYPE ## _ ## char ## _m_m, TYPE ## _matrix, char_matrix, concat) -#define OCTAVE_INSTALL_INT_CHAR_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_INT_CHAR_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_char_matrix_str, TYPE ## _ ## char ## _s_m) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_char_matrix_str, TYPE ## _ ## char ## _m_m) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_char_matrix_sq_str, TYPE ## _ ## char ## _s_m) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_char_matrix_sq_str, TYPE ## _ ## char ## _m_m) -#define OCTAVE_CONCAT_FN(TYPE) \ +#define OCTAVE_CONCAT_FN(TYPE) \ DEFNDCATOP_FN (TYPE ## _s_s, TYPE ## _scalar, TYPE ## _scalar, TYPE ## _array, TYPE ## _array, concat) \ DEFNDCATOP_FN (TYPE ## _s_m, TYPE ## _scalar, TYPE ## _matrix, TYPE ## _array, TYPE ## _array, concat) \ DEFNDCATOP_FN (TYPE ## _m_s, TYPE ## _matrix, TYPE ## _scalar, TYPE ## _array, TYPE ## _array, concat) \ DEFNDCATOP_FN (TYPE ## _m_m, TYPE ## _matrix, TYPE ## _matrix, TYPE ## _array, TYPE ## _array, concat) -#define OCTAVE_INSTALL_CONCAT_FN(TYPE) \ +#define OCTAVE_INSTALL_CONCAT_FN(TYPE) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_ ## TYPE ## _scalar, TYPE ## _s_s) \ INSTALL_CATOP (octave_ ## TYPE ## _scalar, octave_ ## TYPE ## _matrix, TYPE ## _s_m) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_ ## TYPE ## _scalar, TYPE ## _m_s) \ INSTALL_CATOP (octave_ ## TYPE ## _matrix, octave_ ## TYPE ## _matrix, TYPE ## _m_m) // scalar unary ops. -#define OCTAVE_S_INT_UNOPS(TYPE) \ - \ - DEFUNOP_OP (s_not, TYPE ## _scalar, !) \ - DEFUNOP_OP (s_uplus, TYPE ## _scalar, /* no-op */) \ - DEFUNOP (s_uminus, TYPE ## _scalar) \ - { \ +#define OCTAVE_S_INT_UNOPS(TYPE) \ + \ + DEFUNOP_OP (s_not, TYPE ## _scalar, !) \ + DEFUNOP_OP (s_uplus, TYPE ## _scalar, /* no-op */) \ + DEFUNOP (s_uminus, TYPE ## _scalar) \ + { \ const octave_ ## TYPE ## _scalar & v = dynamic_cast (a); \ octave_value retval = octave_value (- v. TYPE ## _scalar_value ()); \ - return retval; \ - } \ - DEFUNOP_OP (s_transpose, TYPE ## _scalar, /* no-op */) \ - DEFUNOP_OP (s_hermitian, TYPE ## _scalar, /* no-op */) \ - \ - DEFNCUNOP_METHOD (s_incr, TYPE ## _scalar, increment) \ + return retval; \ + } \ + DEFUNOP_OP (s_transpose, TYPE ## _scalar, /* no-op */) \ + DEFUNOP_OP (s_hermitian, TYPE ## _scalar, /* no-op */) \ + \ + DEFNCUNOP_METHOD (s_incr, TYPE ## _scalar, increment) \ DEFNCUNOP_METHOD (s_decr, TYPE ## _scalar, decrement) // scalar by scalar ops. -#define OCTAVE_SS_INT_ARITH_OPS(PFX, T1, T2, T3) \ - \ - DEFINTBINOP_OP (PFX ## _add, T1 ## scalar, T2 ## scalar, +, T3) \ - DEFINTBINOP_OP (PFX ## _sub, T1 ## scalar, T2 ## scalar, -, T3) \ - DEFINTBINOP_OP (PFX ## _mul, T1 ## scalar, T2 ## scalar, *, T3) \ - \ - DEFBINOP (PFX ## _div, T1 ## scalar, T2 ## scalar) \ - { \ +#define OCTAVE_SS_INT_ARITH_OPS(PFX, T1, T2, T3) \ + \ + DEFINTBINOP_OP (PFX ## _add, T1 ## scalar, T2 ## scalar, +, T3) \ + DEFINTBINOP_OP (PFX ## _sub, T1 ## scalar, T2 ## scalar, -, T3) \ + DEFINTBINOP_OP (PFX ## _mul, T1 ## scalar, T2 ## scalar, *, T3) \ + \ + DEFBINOP (PFX ## _div, T1 ## scalar, T2 ## scalar) \ + { \ const octave_ ## T1 ## scalar& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## scalar& v2 = dynamic_cast (a2); \ - \ - if (! v2.T2 ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ + \ + if (! v2.T2 ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ octave_value retval = octave_value (v1.T1 ## scalar_value () / v2.T2 ## scalar_value ()); \ - return retval; \ - } \ - \ + return retval; \ + } \ + \ DEFINTBINOP_FN (PFX ## _pow, T1 ## scalar, T2 ## scalar, xpow, T3, ^) \ - \ - DEFBINOP (PFX ## _ldiv, T1 ## scalar, T2 ## scalar) \ - { \ + \ + DEFBINOP (PFX ## _ldiv, T1 ## scalar, T2 ## scalar) \ + { \ const octave_ ## T1 ## scalar& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## scalar& v2 = dynamic_cast (a2); \ - \ - if (! v1.T1 ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ + \ + if (! v1.T1 ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ octave_value retval = octave_value (v2.T2 ## scalar_value () / v1.T1 ## scalar_value ()); \ - return retval; \ - } \ - \ - DEFINTBINOP_OP (PFX ## _el_mul, T1 ## scalar, T2 ## scalar, *, T3) \ - \ - DEFBINOP (PFX ## _el_div, T1 ## scalar, T2 ## scalar) \ - { \ + return retval; \ + } \ + \ + DEFINTBINOP_OP (PFX ## _el_mul, T1 ## scalar, T2 ## scalar, *, T3) \ + \ + DEFBINOP (PFX ## _el_div, T1 ## scalar, T2 ## scalar) \ + { \ const octave_ ## T1 ## scalar& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## scalar& v2 = dynamic_cast (a2); \ - \ - if (! v2.T2 ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ + \ + if (! v2.T2 ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ octave_value retval = octave_value (v1.T1 ## scalar_value () / v2.T2 ## scalar_value ()); \ - return retval; \ - } \ - \ + return retval; \ + } \ + \ DEFINTBINOP_FN (PFX ## _el_pow, T1 ## scalar, T2 ## scalar, xpow, T3, .^) \ - \ - DEFBINOP (PFX ## _el_ldiv, T1 ## scalar, T2 ## scalar) \ - { \ + \ + DEFBINOP (PFX ## _el_ldiv, T1 ## scalar, T2 ## scalar) \ + { \ const octave_ ## T1 ## scalar& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## scalar& v2 = dynamic_cast (a2); \ - \ - if (! v1.T1 ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ + \ + if (! v1.T1 ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ octave_value retval = octave_value (v2.T2 ## scalar_value () / v1.T1 ## scalar_value ()); \ - return retval; \ + return retval; \ } -#define OCTAVE_SS_INT_BOOL_OPS(PFX, T1, T2, Z1, Z2) \ - DEFBINOP (PFX ## _el_and, T2, T2) \ - { \ +#define OCTAVE_SS_INT_BOOL_OPS(PFX, T1, T2, Z1, Z2) \ + DEFBINOP (PFX ## _el_and, T2, T2) \ + { \ const octave_ ## T1 ## scalar& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## scalar& v2 = dynamic_cast (a2); \ - \ + \ return v1.T1 ## scalar_value () != Z1 && v2.T2 ## scalar_value () != Z2; \ - } \ - \ - DEFBINOP (PFX ## _el_or, T1, T2) \ - { \ + } \ + \ + DEFBINOP (PFX ## _el_or, T1, T2) \ + { \ const octave_ ## T1 ## scalar& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## scalar& v2 = dynamic_cast (a2); \ - \ + \ return v1.T1 ## scalar_value () != Z1 || v2.T2 ## scalar_value () != Z2; \ } -#define OCTAVE_SS_INT_CMP_OPS(PFX, T1, T2) \ - DEFBINOP_OP (PFX ## _lt, T1 ## scalar, T2 ## scalar, <) \ - DEFBINOP_OP (PFX ## _le, T1 ## scalar, T2 ## scalar, <=) \ - DEFBINOP_OP (PFX ## _eq, T1 ## scalar, T2 ## scalar, ==) \ - DEFBINOP_OP (PFX ## _ge, T1 ## scalar, T2 ## scalar, >=) \ - DEFBINOP_OP (PFX ## _gt, T1 ## scalar, T2 ## scalar, >) \ +#define OCTAVE_SS_INT_CMP_OPS(PFX, T1, T2) \ + DEFBINOP_OP (PFX ## _lt, T1 ## scalar, T2 ## scalar, <) \ + DEFBINOP_OP (PFX ## _le, T1 ## scalar, T2 ## scalar, <=) \ + DEFBINOP_OP (PFX ## _eq, T1 ## scalar, T2 ## scalar, ==) \ + DEFBINOP_OP (PFX ## _ge, T1 ## scalar, T2 ## scalar, >=) \ + DEFBINOP_OP (PFX ## _gt, T1 ## scalar, T2 ## scalar, >) \ DEFBINOP_OP (PFX ## _ne, T1 ## scalar, T2 ## scalar, !=) -#define OCTAVE_SS_POW_OPS(T1, T2) \ - octave_value \ +#define OCTAVE_SS_POW_OPS(T1, T2) \ + octave_value \ xpow (const octave_ ## T1& a, const octave_ ## T2& b) \ - { \ - return pow (a, b); \ - } \ - \ - octave_value \ - xpow (const octave_ ## T1& a, double b) \ - { \ - return pow (a, b); \ - } \ - \ - octave_value \ - xpow (double a, const octave_ ## T1& b) \ - { \ - return pow (a, b); \ - } \ - \ - octave_value \ - xpow (const octave_ ## T1& a, float b) \ - { \ - return powf (a, b); \ - } \ - \ - octave_value \ - xpow (float a, const octave_ ## T1& b) \ - { \ - return powf (a, b); \ + { \ + return pow (a, b); \ + } \ + \ + octave_value \ + xpow (const octave_ ## T1& a, double b) \ + { \ + return pow (a, b); \ + } \ + \ + octave_value \ + xpow (double a, const octave_ ## T1& b) \ + { \ + return pow (a, b); \ + } \ + \ + octave_value \ + xpow (const octave_ ## T1& a, float b) \ + { \ + return powf (a, b); \ + } \ + \ + octave_value \ + xpow (float a, const octave_ ## T1& b) \ + { \ + return powf (a, b); \ } -#define OCTAVE_SS_INT_OPS(TYPE) \ - OCTAVE_S_INT_UNOPS (TYPE) \ - OCTAVE_SS_POW_OPS (TYPE, TYPE) \ - OCTAVE_SS_INT_ARITH_OPS (ss, TYPE ## _, TYPE ## _, TYPE) \ - OCTAVE_SS_INT_ARITH_OPS (ssx, TYPE ## _, , TYPE) \ - OCTAVE_SS_INT_ARITH_OPS (sxs, , TYPE ## _, TYPE) \ - OCTAVE_SS_INT_ARITH_OPS (ssfx, TYPE ## _, float_, TYPE) \ - OCTAVE_SS_INT_ARITH_OPS (sfxs, float_, TYPE ## _, TYPE) \ - OCTAVE_SS_INT_CMP_OPS (ss, TYPE ## _, TYPE ## _) \ - OCTAVE_SS_INT_CMP_OPS (sx, TYPE ## _, ) \ - OCTAVE_SS_INT_CMP_OPS (xs, , TYPE ## _) \ - OCTAVE_SS_INT_CMP_OPS (sfx, TYPE ## _, float_) \ - OCTAVE_SS_INT_CMP_OPS (fxs, float_, TYPE ## _) \ +#define OCTAVE_SS_INT_OPS(TYPE) \ + OCTAVE_S_INT_UNOPS (TYPE) \ + OCTAVE_SS_POW_OPS (TYPE, TYPE) \ + OCTAVE_SS_INT_ARITH_OPS (ss, TYPE ## _, TYPE ## _, TYPE) \ + OCTAVE_SS_INT_ARITH_OPS (ssx, TYPE ## _, , TYPE) \ + OCTAVE_SS_INT_ARITH_OPS (sxs, , TYPE ## _, TYPE) \ + OCTAVE_SS_INT_ARITH_OPS (ssfx, TYPE ## _, float_, TYPE) \ + OCTAVE_SS_INT_ARITH_OPS (sfxs, float_, TYPE ## _, TYPE) \ + OCTAVE_SS_INT_CMP_OPS (ss, TYPE ## _, TYPE ## _) \ + OCTAVE_SS_INT_CMP_OPS (sx, TYPE ## _, ) \ + OCTAVE_SS_INT_CMP_OPS (xs, , TYPE ## _) \ + OCTAVE_SS_INT_CMP_OPS (sfx, TYPE ## _, float_) \ + OCTAVE_SS_INT_CMP_OPS (fxs, float_, TYPE ## _) \ OCTAVE_SS_INT_BOOL_OPS (ss, TYPE ## _, TYPE ## _, octave_ ## TYPE (0), octave_ ## TYPE (0)) \ - OCTAVE_SS_INT_BOOL_OPS (sx, TYPE ## _, , octave_ ## TYPE (0), 0) \ - OCTAVE_SS_INT_BOOL_OPS (xs, , TYPE ## _, 0, octave_ ## TYPE (0)) \ + OCTAVE_SS_INT_BOOL_OPS (sx, TYPE ## _, , octave_ ## TYPE (0), 0) \ + OCTAVE_SS_INT_BOOL_OPS (xs, , TYPE ## _, 0, octave_ ## TYPE (0)) \ OCTAVE_SS_INT_BOOL_OPS (sfx, TYPE ## _, float_, octave_ ## TYPE (0), 0) \ OCTAVE_SS_INT_BOOL_OPS (fxs, float_, TYPE ## _, 0, octave_ ## TYPE (0)) // scalar by matrix ops. -#define OCTAVE_SM_INT_ARITH_OPS(PFX, TS, TM, TI) \ - \ +#define OCTAVE_SM_INT_ARITH_OPS(PFX, TS, TM, TI) \ + \ DEFINTNDBINOP_OP (PFX ## _add, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, +, TI) \ DEFINTNDBINOP_OP (PFX ## _sub, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, -, TI) \ DEFINTNDBINOP_OP (PFX ## _mul, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, *, TI) \ - \ - /* DEFBINOP (PFX ## _div, TS ## scalar, TM ## matrix) */ \ - /* { */ \ + \ + /* DEFBINOP (PFX ## _div, TS ## scalar, TM ## matrix) */ \ + /* { */ \ /* const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); */ \ /* const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); */ \ - /* */ \ - /* Matrix m1 = v1.TM ## matrix_value (); */ \ - /* Matrix m2 = v2.TM ## matrix_value (); */ \ - /* */ \ - /* return octave_value (xdiv (m1, m2)); */ \ - /* } */ \ - \ - /* DEFBINOP_FN (PFX ## _pow, TS ## scalar, TM ## matrix, xpow) */ \ - \ - DEFBINOP (PFX ## _ldiv, TS ## scalar, TM ## matrix) \ - { \ - const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); \ - const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); \ - \ - if (! v1.TS ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ - octave_value retval = octave_value (v2.TS ## scalar_value () / v1.TS ## scalar_value ()); \ - return retval; \ - } \ - \ + /* */ \ + /* Matrix m1 = v1.TM ## matrix_value (); */ \ + /* Matrix m2 = v2.TM ## matrix_value (); */ \ + /* */ \ + /* return octave_value (xdiv (m1, m2)); */ \ + /* } */ \ + \ + /* DEFBINOP_FN (PFX ## _pow, TS ## scalar, TM ## matrix, xpow) */ \ + \ + DEFBINOP (PFX ## _ldiv, TS ## scalar, TM ## matrix) \ + { \ + const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); \ + const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); \ + \ + if (! v1.TS ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ + octave_value retval = octave_value (v2.TS ## scalar_value () / v1.TS ## scalar_value ()); \ + return retval; \ + } \ + \ DEFINTNDBINOP_OP (PFX ## _el_mul, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, *, TI) \ - DEFBINOP (PFX ## _el_div, TS ## scalar, TM ## matrix) \ - { \ - const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); \ - const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); \ - \ - octave_value retval = octave_value (v1.TS ## scalar_value () / v2.TM ## array_value ()); \ - return retval; \ - } \ - \ + DEFBINOP (PFX ## _el_div, TS ## scalar, TM ## matrix) \ + { \ + const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); \ + const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); \ + \ + octave_value retval = octave_value (v1.TS ## scalar_value () / v2.TM ## array_value ()); \ + return retval; \ + } \ + \ DEFINTNDBINOP_FN (PFX ## _el_pow, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, elem_xpow, TI, .^) \ - \ - DEFBINOP (PFX ## _el_ldiv, TS ## scalar, TM ## matrix) \ - { \ - const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); \ - const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); \ - \ - if (! v1.TS ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ - octave_value retval = octave_value (v2.TM ## array_value () / v1.TS ## scalar_value ()); \ - return retval; \ - } + \ + DEFBINOP (PFX ## _el_ldiv, TS ## scalar, TM ## matrix) \ + { \ + const octave_ ## TS ## scalar& v1 = dynamic_cast (a1); \ + const octave_ ## TM ## matrix& v2 = dynamic_cast (a2); \ + \ + if (! v1.TS ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ + octave_value retval = octave_value (v2.TM ## array_value () / v1.TS ## scalar_value ()); \ + return retval; \ + } -#define OCTAVE_SM_INT_CMP_OPS(PFX, TS, TM) \ +#define OCTAVE_SM_INT_CMP_OPS(PFX, TS, TM) \ DEFNDBINOP_FN (PFX ## _lt, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_lt) \ DEFNDBINOP_FN (PFX ## _le, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_le) \ DEFNDBINOP_FN (PFX ## _eq, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_eq) \ @@ -386,160 +386,160 @@ DEFNDBINOP_FN (PFX ## _gt, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_gt) \ DEFNDBINOP_FN (PFX ## _ne, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_ne) -#define OCTAVE_SM_INT_BOOL_OPS(PFX, TS, TM) \ +#define OCTAVE_SM_INT_BOOL_OPS(PFX, TS, TM) \ DEFNDBINOP_FN (PFX ## _el_and, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_and) \ DEFNDBINOP_FN (PFX ## _el_or, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_or) \ DEFNDBINOP_FN (PFX ## _el_and_not, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_and_not) \ DEFNDBINOP_FN (PFX ## _el_or_not, TS ## scalar, TM ## matrix, TS ## scalar, TM ## array, mx_el_or_not) -#define OCTAVE_SM_POW_OPS(T1, T2) \ - octave_value \ - elem_xpow (const octave_ ## T1& a, const T2 ## NDArray& b) \ - { \ - T2 ## NDArray result (b.dims ()); \ - for (int i = 0; i < b.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a, b(i)); \ - } \ - return octave_value (result); \ - } \ -\ - octave_value \ - elem_xpow (const octave_ ## T1& a, const NDArray& b) \ - { \ - T1 ## NDArray result (b.dims ()); \ - for (int i = 0; i < b.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a, b(i)); \ - } \ - return octave_value (result); \ - } \ - \ - octave_value \ - elem_xpow (double a, const T2 ## NDArray& b) \ - { \ - T2 ## NDArray result (b.dims ()); \ - for (int i = 0; i < b.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a, b(i)); \ - } \ - return octave_value (result); \ - } \ -\ - octave_value \ - elem_xpow (const octave_ ## T1& a, const FloatNDArray& b) \ - { \ - T1 ## NDArray result (b.dims ()); \ - for (int i = 0; i < b.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = powf (a, b(i)); \ - } \ - return octave_value (result); \ - } \ - \ - octave_value \ - elem_xpow (float a, const T2 ## NDArray& b) \ - { \ - T2 ## NDArray result (b.dims ()); \ - for (int i = 0; i < b.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = powf (a, b(i)); \ - } \ - return octave_value (result); \ +#define OCTAVE_SM_POW_OPS(T1, T2) \ + octave_value \ + elem_xpow (const octave_ ## T1& a, const T2 ## NDArray& b) \ + { \ + T2 ## NDArray result (b.dims ()); \ + for (int i = 0; i < b.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a, b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (const octave_ ## T1& a, const NDArray& b) \ + { \ + T1 ## NDArray result (b.dims ()); \ + for (int i = 0; i < b.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a, b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (double a, const T2 ## NDArray& b) \ + { \ + T2 ## NDArray result (b.dims ()); \ + for (int i = 0; i < b.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a, b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (const octave_ ## T1& a, const FloatNDArray& b) \ + { \ + T1 ## NDArray result (b.dims ()); \ + for (int i = 0; i < b.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = powf (a, b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (float a, const T2 ## NDArray& b) \ + { \ + T2 ## NDArray result (b.dims ()); \ + for (int i = 0; i < b.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = powf (a, b(i)); \ + } \ + return octave_value (result); \ } -#define OCTAVE_SM_CONV(TS, TM) \ - DEFCONV (TS ## s_ ## TM ## m_conv, TM ## scalar, TM ## matrix) \ - { \ +#define OCTAVE_SM_CONV(TS, TM) \ + DEFCONV (TS ## s_ ## TM ## m_conv, TM ## scalar, TM ## matrix) \ + { \ const octave_ ## TS ## scalar& v = dynamic_cast (a); \ - \ - return new octave_ ## TM ## matrix (v.TM ## array_value ()); \ + \ + return new octave_ ## TM ## matrix (v.TM ## array_value ()); \ } -#define OCTAVE_SM_INT_OPS(TYPE) \ - OCTAVE_SM_POW_OPS (TYPE, TYPE) \ - OCTAVE_SM_INT_ARITH_OPS (sm, TYPE ## _, TYPE ## _, TYPE) \ - OCTAVE_SM_INT_ARITH_OPS (smx, TYPE ## _, , TYPE) \ - OCTAVE_SM_INT_ARITH_OPS (sxm, , TYPE ## _, TYPE) \ - OCTAVE_SM_INT_ARITH_OPS (smfx, TYPE ## _, float_, TYPE) \ - OCTAVE_SM_INT_ARITH_OPS (sfxm, float_, TYPE ## _, TYPE) \ - OCTAVE_SM_INT_CMP_OPS (sm, TYPE ## _, TYPE ## _) \ - OCTAVE_SM_INT_CMP_OPS (xm, , TYPE ## _) \ - OCTAVE_SM_INT_CMP_OPS (smx, TYPE ## _, ) \ - OCTAVE_SM_INT_CMP_OPS (fxm, float_, TYPE ## _) \ - OCTAVE_SM_INT_CMP_OPS (smfx, TYPE ## _, float_) \ - OCTAVE_SM_INT_BOOL_OPS (sm, TYPE ## _, TYPE ## _) \ - OCTAVE_SM_INT_BOOL_OPS (xm, , TYPE ## _) \ - OCTAVE_SM_INT_BOOL_OPS (smx, TYPE ## _, ) \ - OCTAVE_SM_INT_BOOL_OPS (fxm, float_, TYPE ## _) \ - OCTAVE_SM_INT_BOOL_OPS (smfx, TYPE ## _, float_) \ - OCTAVE_SM_CONV (TYPE ## _, TYPE ## _) \ - OCTAVE_SM_CONV (TYPE ## _, complex_) \ +#define OCTAVE_SM_INT_OPS(TYPE) \ + OCTAVE_SM_POW_OPS (TYPE, TYPE) \ + OCTAVE_SM_INT_ARITH_OPS (sm, TYPE ## _, TYPE ## _, TYPE) \ + OCTAVE_SM_INT_ARITH_OPS (smx, TYPE ## _, , TYPE) \ + OCTAVE_SM_INT_ARITH_OPS (sxm, , TYPE ## _, TYPE) \ + OCTAVE_SM_INT_ARITH_OPS (smfx, TYPE ## _, float_, TYPE) \ + OCTAVE_SM_INT_ARITH_OPS (sfxm, float_, TYPE ## _, TYPE) \ + OCTAVE_SM_INT_CMP_OPS (sm, TYPE ## _, TYPE ## _) \ + OCTAVE_SM_INT_CMP_OPS (xm, , TYPE ## _) \ + OCTAVE_SM_INT_CMP_OPS (smx, TYPE ## _, ) \ + OCTAVE_SM_INT_CMP_OPS (fxm, float_, TYPE ## _) \ + OCTAVE_SM_INT_CMP_OPS (smfx, TYPE ## _, float_) \ + OCTAVE_SM_INT_BOOL_OPS (sm, TYPE ## _, TYPE ## _) \ + OCTAVE_SM_INT_BOOL_OPS (xm, , TYPE ## _) \ + OCTAVE_SM_INT_BOOL_OPS (smx, TYPE ## _, ) \ + OCTAVE_SM_INT_BOOL_OPS (fxm, float_, TYPE ## _) \ + OCTAVE_SM_INT_BOOL_OPS (smfx, TYPE ## _, float_) \ + OCTAVE_SM_CONV (TYPE ## _, TYPE ## _) \ + OCTAVE_SM_CONV (TYPE ## _, complex_) \ OCTAVE_SM_CONV (TYPE ## _, float_complex_) // matrix by scalar ops. -#define OCTAVE_MS_INT_ARITH_OPS(PFX, TM, TS, TI) \ - \ +#define OCTAVE_MS_INT_ARITH_OPS(PFX, TM, TS, TI) \ + \ DEFINTNDBINOP_OP (PFX ## _add, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, +, TI) \ DEFINTNDBINOP_OP (PFX ## _sub, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, -, TI) \ DEFINTNDBINOP_OP (PFX ## _mul, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, *, TI) \ - \ - DEFBINOP (PFX ## _div, TM ## matrix, TS ## scalar) \ - { \ + \ + DEFBINOP (PFX ## _div, TM ## matrix, TS ## scalar) \ + { \ const octave_ ## TM ## matrix& v1 = dynamic_cast (a1); \ const octave_ ## TS ## scalar& v2 = dynamic_cast (a2); \ - \ - if (! v2.TS ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ + \ + if (! v2.TS ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ octave_value retval = octave_value (v1.TM ## array_value () / v2.TS ## scalar_value ()); \ - return retval; \ - } \ - \ - /* DEFBINOP_FN (PFX ## _pow, TM ## matrix, TS ## scalar, xpow) */ \ - \ - /* DEFBINOP (PFX ## _ldiv, TM ## matrix, TS ## scalar) */ \ - /* { */ \ + return retval; \ + } \ + \ + /* DEFBINOP_FN (PFX ## _pow, TM ## matrix, TS ## scalar, xpow) */ \ + \ + /* DEFBINOP (PFX ## _ldiv, TM ## matrix, TS ## scalar) */ \ + /* { */ \ /* const octave_ ## TM ## matrix& v1 = dynamic_cast (a1); */ \ /* const octave_ ## TS ## scalar& v2 = dynamic_cast (a2); */ \ - /* */ \ - /* Matrix m1 = v1.TM ## matrix_value (); */ \ - /* Matrix m2 = v2.TM ## matrix_value (); */ \ - /* */ \ - /* return octave_value (xleftdiv (m1, m2)); */ \ - /* } */ \ - \ + /* */ \ + /* Matrix m1 = v1.TM ## matrix_value (); */ \ + /* Matrix m2 = v2.TM ## matrix_value (); */ \ + /* */ \ + /* return octave_value (xleftdiv (m1, m2)); */ \ + /* } */ \ + \ DEFINTNDBINOP_OP (PFX ## _el_mul, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, *, TI) \ - \ - DEFBINOP (PFX ## _el_div, TM ## matrix, TS ## scalar) \ - { \ + \ + DEFBINOP (PFX ## _el_div, TM ## matrix, TS ## scalar) \ + { \ const octave_ ## TM ## matrix& v1 = dynamic_cast (a1); \ const octave_ ## TS ## scalar& v2 = dynamic_cast (a2); \ - \ - if (! v2.TS ## scalar_value ()) \ - warn_divide_by_zero (); \ - \ + \ + if (! v2.TS ## scalar_value ()) \ + warn_divide_by_zero (); \ + \ octave_value retval = octave_value (v1.TM ## array_value () / v2.TS ## scalar_value ()); \ - return retval; \ - } \ - \ + return retval; \ + } \ + \ DEFINTNDBINOP_FN (PFX ## _el_pow, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, elem_xpow, TI, .^) \ - \ - DEFBINOP (PFX ## _el_ldiv, TM ## matrix, TS ## scalar) \ - { \ + \ + DEFBINOP (PFX ## _el_ldiv, TM ## matrix, TS ## scalar) \ + { \ const octave_ ## TM ## matrix& v1 = dynamic_cast (a1); \ const octave_ ## TS ## scalar& v2 = dynamic_cast (a2); \ - \ + \ octave_value retval = v2.TS ## scalar_value () / v1.TM ## array_value (); \ - return retval; \ + return retval; \ } -#define OCTAVE_MS_INT_CMP_OPS(PFX, TM, TS) \ +#define OCTAVE_MS_INT_CMP_OPS(PFX, TM, TS) \ DEFNDBINOP_FN (PFX ## _lt, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_lt) \ DEFNDBINOP_FN (PFX ## _le, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_le) \ DEFNDBINOP_FN (PFX ## _eq, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_eq) \ @@ -547,157 +547,157 @@ DEFNDBINOP_FN (PFX ## _gt, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_gt) \ DEFNDBINOP_FN (PFX ## _ne, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_ne) -#define OCTAVE_MS_INT_BOOL_OPS(PFX, TM, TS) \ +#define OCTAVE_MS_INT_BOOL_OPS(PFX, TM, TS) \ DEFNDBINOP_FN (PFX ## _el_and, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_and) \ DEFNDBINOP_FN (PFX ## _el_or, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_or) \ DEFNDBINOP_FN (PFX ## _el_not_and, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_not_and) \ DEFNDBINOP_FN (PFX ## _el_not_or, TM ## matrix, TS ## scalar, TM ## array, TS ## scalar, mx_el_not_or) -#define OCTAVE_MS_INT_ASSIGN_OPS(PFX, TM, TS, TE) \ +#define OCTAVE_MS_INT_ASSIGN_OPS(PFX, TM, TS, TE) \ DEFNDASSIGNOP_FN (PFX ## _assign, TM ## matrix, TS ## scalar, TM ## scalar, assign) -#define OCTAVE_MS_INT_ASSIGNEQ_OPS(PFX, TM) \ +#define OCTAVE_MS_INT_ASSIGNEQ_OPS(PFX, TM) \ DEFNDASSIGNOP_OP (PFX ## _assign_add, TM ## matrix, TM ## scalar, TM ## scalar, +=) \ DEFNDASSIGNOP_OP (PFX ## _assign_sub, TM ## matrix, TM ## scalar, TM ## scalar, -=) \ DEFNDASSIGNOP_OP (PFX ## _assign_mul, TM ## matrix, TM ## scalar, TM ## scalar, *=) \ DEFNDASSIGNOP_OP (PFX ## _assign_div, TM ## matrix, TM ## scalar, TM ## scalar, /=) -#define OCTAVE_MS_POW_OPS(T1, T2) \ -octave_value elem_xpow (T1 ## NDArray a, octave_ ## T2 b) \ -{ \ - T1 ## NDArray result (a.dims ()); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a(i), b); \ - } \ - return octave_value (result); \ -} \ -\ -octave_value elem_xpow (T1 ## NDArray a, double b) \ -{ \ - T1 ## NDArray result (a.dims ()); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a(i), b); \ - } \ - return octave_value (result); \ -} \ -\ -octave_value elem_xpow (NDArray a, octave_ ## T2 b) \ -{ \ - T2 ## NDArray result (a.dims ()); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a(i), b); \ - } \ - return octave_value (result); \ -} \ -\ -octave_value elem_xpow (T1 ## NDArray a, float b) \ -{ \ - T1 ## NDArray result (a.dims ()); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = powf (a(i), b); \ - } \ - return octave_value (result); \ -} \ -\ -octave_value elem_xpow (FloatNDArray a, octave_ ## T2 b) \ -{ \ - T2 ## NDArray result (a.dims ()); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = powf (a(i), b); \ - } \ - return octave_value (result); \ -} +#define OCTAVE_MS_POW_OPS(T1, T2) \ + octave_value elem_xpow (T1 ## NDArray a, octave_ ## T2 b) \ + { \ + T1 ## NDArray result (a.dims ()); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a(i), b); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value elem_xpow (T1 ## NDArray a, double b) \ + { \ + T1 ## NDArray result (a.dims ()); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a(i), b); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value elem_xpow (NDArray a, octave_ ## T2 b) \ + { \ + T2 ## NDArray result (a.dims ()); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a(i), b); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value elem_xpow (T1 ## NDArray a, float b) \ + { \ + T1 ## NDArray result (a.dims ()); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = powf (a(i), b); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value elem_xpow (FloatNDArray a, octave_ ## T2 b) \ + { \ + T2 ## NDArray result (a.dims ()); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = powf (a(i), b); \ + } \ + return octave_value (result); \ + } -#define OCTAVE_MS_INT_OPS(TYPE) \ - OCTAVE_MS_POW_OPS (TYPE, TYPE) \ - OCTAVE_MS_INT_ARITH_OPS (ms, TYPE ## _, TYPE ## _, TYPE) \ - OCTAVE_MS_INT_ARITH_OPS (msx, TYPE ## _, , TYPE) \ - OCTAVE_MS_INT_ARITH_OPS (mxs, , TYPE ## _, TYPE) \ - OCTAVE_MS_INT_ARITH_OPS (msfx, TYPE ## _, float_, TYPE) \ - OCTAVE_MS_INT_ARITH_OPS (mfxs, float_, TYPE ## _, TYPE) \ - OCTAVE_MS_INT_CMP_OPS (ms, TYPE ## _, TYPE ## _) \ - OCTAVE_MS_INT_CMP_OPS (mx, TYPE ## _, ) \ - OCTAVE_MS_INT_CMP_OPS (mxs, , TYPE ## _) \ - OCTAVE_MS_INT_CMP_OPS (mfx, TYPE ## _, float_) \ - OCTAVE_MS_INT_CMP_OPS (mfxs, float_, TYPE ## _) \ - OCTAVE_MS_INT_BOOL_OPS (ms, TYPE ## _, TYPE ## _) \ - OCTAVE_MS_INT_BOOL_OPS (mx, TYPE ## _, ) \ - OCTAVE_MS_INT_BOOL_OPS (mxs, , TYPE ## _) \ - OCTAVE_MS_INT_BOOL_OPS (mfx, TYPE ## _, float_) \ - OCTAVE_MS_INT_BOOL_OPS (mfxs, float_, TYPE ## _) \ - OCTAVE_MS_INT_ASSIGN_OPS (ms, TYPE ## _, TYPE ## _, TYPE ## _) \ - OCTAVE_MS_INT_ASSIGNEQ_OPS (mse, TYPE ## _) \ - OCTAVE_MS_INT_ASSIGN_OPS (mx, TYPE ## _, , ) \ +#define OCTAVE_MS_INT_OPS(TYPE) \ + OCTAVE_MS_POW_OPS (TYPE, TYPE) \ + OCTAVE_MS_INT_ARITH_OPS (ms, TYPE ## _, TYPE ## _, TYPE) \ + OCTAVE_MS_INT_ARITH_OPS (msx, TYPE ## _, , TYPE) \ + OCTAVE_MS_INT_ARITH_OPS (mxs, , TYPE ## _, TYPE) \ + OCTAVE_MS_INT_ARITH_OPS (msfx, TYPE ## _, float_, TYPE) \ + OCTAVE_MS_INT_ARITH_OPS (mfxs, float_, TYPE ## _, TYPE) \ + OCTAVE_MS_INT_CMP_OPS (ms, TYPE ## _, TYPE ## _) \ + OCTAVE_MS_INT_CMP_OPS (mx, TYPE ## _, ) \ + OCTAVE_MS_INT_CMP_OPS (mxs, , TYPE ## _) \ + OCTAVE_MS_INT_CMP_OPS (mfx, TYPE ## _, float_) \ + OCTAVE_MS_INT_CMP_OPS (mfxs, float_, TYPE ## _) \ + OCTAVE_MS_INT_BOOL_OPS (ms, TYPE ## _, TYPE ## _) \ + OCTAVE_MS_INT_BOOL_OPS (mx, TYPE ## _, ) \ + OCTAVE_MS_INT_BOOL_OPS (mxs, , TYPE ## _) \ + OCTAVE_MS_INT_BOOL_OPS (mfx, TYPE ## _, float_) \ + OCTAVE_MS_INT_BOOL_OPS (mfxs, float_, TYPE ## _) \ + OCTAVE_MS_INT_ASSIGN_OPS (ms, TYPE ## _, TYPE ## _, TYPE ## _) \ + OCTAVE_MS_INT_ASSIGNEQ_OPS (mse, TYPE ## _) \ + OCTAVE_MS_INT_ASSIGN_OPS (mx, TYPE ## _, , ) \ OCTAVE_MS_INT_ASSIGN_OPS (mfx, TYPE ## _, float_, float_) // matrix unary ops. -#define OCTAVE_M_INT_UNOPS(TYPE) \ - \ - DEFNDUNOP_OP (m_not, TYPE ## _matrix, TYPE ## _array, !) \ - DEFNDUNOP_OP (m_uplus, TYPE ## _matrix, TYPE ## _array, /* no-op */) \ - DEFUNOP (m_uminus, TYPE ## _matrix) \ - { \ +#define OCTAVE_M_INT_UNOPS(TYPE) \ + \ + DEFNDUNOP_OP (m_not, TYPE ## _matrix, TYPE ## _array, !) \ + DEFNDUNOP_OP (m_uplus, TYPE ## _matrix, TYPE ## _array, /* no-op */) \ + DEFUNOP (m_uminus, TYPE ## _matrix) \ + { \ const octave_ ## TYPE ## _matrix & v = dynamic_cast (a); \ - octave_value retval = octave_value (- v. TYPE ## _array_value ()); \ - return retval; \ - } \ - \ - DEFUNOP (m_transpose, TYPE ## _matrix) \ - { \ + octave_value retval = octave_value (- v. TYPE ## _array_value ()); \ + return retval; \ + } \ + \ + DEFUNOP (m_transpose, TYPE ## _matrix) \ + { \ const octave_ ## TYPE ## _matrix& v = dynamic_cast (a); \ - \ - if (v.ndims () > 2) \ - error ("transpose not defined for N-D objects"); \ - \ - return octave_value (v.TYPE ## _array_value ().transpose ()); \ - } \ - \ - DEFNCUNOP_METHOD (m_incr, TYPE ## _matrix, increment) \ - DEFNCUNOP_METHOD (m_decr, TYPE ## _matrix, decrement) \ + \ + if (v.ndims () > 2) \ + error ("transpose not defined for N-D objects"); \ + \ + return octave_value (v.TYPE ## _array_value ().transpose ()); \ + } \ + \ + DEFNCUNOP_METHOD (m_incr, TYPE ## _matrix, increment) \ + DEFNCUNOP_METHOD (m_decr, TYPE ## _matrix, decrement) \ DEFNCUNOP_METHOD (m_changesign, TYPE ## _matrix, changesign) // matrix by matrix ops. -#define OCTAVE_MM_INT_ARITH_OPS(PFX, T1, T2, T3) \ - \ +#define OCTAVE_MM_INT_ARITH_OPS(PFX, T1, T2, T3) \ + \ DEFINTNDBINOP_OP (PFX ## _add, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, +, T3) \ DEFINTNDBINOP_OP (PFX ## _sub, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, -, T3) \ - \ - /* DEFBINOP_OP (PFX ## _mul, T1 ## matrix, T2 ## matrix, *) */ \ - /* DEFBINOP_FN (PFX ## _div, T1 ## matrix, T2 ## matrix, xdiv) */ \ - \ - DEFBINOPX (PFX ## _pow, T1 ## matrix, T2 ## matrix) \ - { \ - error ("can't do A ^ B for A and B both matrices"); \ - } \ - \ + \ + /* DEFBINOP_OP (PFX ## _mul, T1 ## matrix, T2 ## matrix, *) */ \ + /* DEFBINOP_FN (PFX ## _div, T1 ## matrix, T2 ## matrix, xdiv) */ \ + \ + DEFBINOPX (PFX ## _pow, T1 ## matrix, T2 ## matrix) \ + { \ + error ("can't do A ^ B for A and B both matrices"); \ + } \ + \ /* DEFBINOP_FN (PFX ## _ldiv, T1 ## matrix, T2 ## matrix, xleftdiv) */ \ - \ + \ DEFINTNDBINOP_FN (PFX ## _el_mul, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, product, T3, .*) \ - \ + \ DEFINTNDBINOP_FN (PFX ## _el_div, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, quotient, T3, ./) \ - \ + \ DEFINTNDBINOP_FN (PFX ## _el_pow, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, elem_xpow, T3, .^) \ - \ - DEFBINOP (PFX ## _el_ldiv, T1 ## matrix, T2 ## matrix) \ - { \ + \ + DEFBINOP (PFX ## _el_ldiv, T1 ## matrix, T2 ## matrix) \ + { \ const octave_ ## T1 ## matrix& v1 = dynamic_cast (a1); \ const octave_ ## T2 ## matrix& v2 = dynamic_cast (a2); \ - \ + \ octave_value retval = octave_value (quotient (v2.T2 ## array_value (), v1.T1 ## array_value ())); \ - return retval; \ + return retval; \ } -#define OCTAVE_MM_INT_CMP_OPS(PFX, T1, T2) \ +#define OCTAVE_MM_INT_CMP_OPS(PFX, T1, T2) \ DEFNDBINOP_FN (PFX ## _lt, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_lt) \ DEFNDBINOP_FN (PFX ## _le, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_le) \ DEFNDBINOP_FN (PFX ## _eq, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_eq) \ @@ -705,7 +705,7 @@ DEFNDBINOP_FN (PFX ## _gt, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_gt) \ DEFNDBINOP_FN (PFX ## _ne, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_ne) -#define OCTAVE_MM_INT_BOOL_OPS(PFX, T1, T2) \ +#define OCTAVE_MM_INT_BOOL_OPS(PFX, T1, T2) \ DEFNDBINOP_FN (PFX ## _el_and, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_and) \ DEFNDBINOP_FN (PFX ## _el_or, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_or) \ DEFNDBINOP_FN (PFX ## _el_not_and, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_not_and) \ @@ -713,196 +713,196 @@ DEFNDBINOP_FN (PFX ## _el_and_not, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_and_not) \ DEFNDBINOP_FN (PFX ## _el_or_not, T1 ## matrix, T2 ## matrix, T1 ## array, T2 ## array, mx_el_or_not) -#define OCTAVE_MM_INT_ASSIGN_OPS(PFX, TLHS, TRHS, TE) \ +#define OCTAVE_MM_INT_ASSIGN_OPS(PFX, TLHS, TRHS, TE) \ DEFNDASSIGNOP_FN (PFX ## _assign, TLHS ## matrix, TRHS ## matrix, TLHS ## array, assign) -#define OCTAVE_MM_INT_ASSIGNEQ_OPS(PFX, TM) \ +#define OCTAVE_MM_INT_ASSIGNEQ_OPS(PFX, TM) \ DEFNDASSIGNOP_OP (PFX ## _assign_add, TM ## matrix, TM ## matrix, TM ## array, +=) \ DEFNDASSIGNOP_OP (PFX ## _assign_sub, TM ## matrix, TM ## matrix, TM ## array, -=) \ DEFNDASSIGNOP_FNOP (PFX ## _assign_el_mul, TM ## matrix, TM ## matrix, TM ## array, product_eq) \ DEFNDASSIGNOP_FNOP (PFX ## _assign_el_div, TM ## matrix, TM ## matrix, TM ## array, quotient_eq) -#define OCTAVE_MM_POW_OPS(T1, T2) \ - octave_value \ - elem_xpow (const T1 ## NDArray& a, const T2 ## NDArray& b) \ - { \ - dim_vector a_dims = a.dims (); \ - dim_vector b_dims = b.dims (); \ - if (a_dims != b_dims) \ - { \ - if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ - err_nonconformant ("operator .^", a_dims, b_dims); \ - \ - return bsxfun_pow (a, b); \ - } \ - T1 ## NDArray result (a_dims); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a(i), b(i)); \ - } \ - return octave_value (result); \ - } \ -\ - octave_value \ - elem_xpow (const T1 ## NDArray& a, const NDArray& b) \ - { \ - dim_vector a_dims = a.dims (); \ - dim_vector b_dims = b.dims (); \ - if (a_dims != b_dims) \ - { \ - if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ - err_nonconformant ("operator .^", a_dims, b_dims); \ - \ - return bsxfun_pow (a, b); \ - } \ - T1 ## NDArray result (a_dims); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a(i), b(i)); \ - } \ - return octave_value (result); \ - } \ -\ - octave_value \ - elem_xpow (const NDArray& a, const T2 ## NDArray& b) \ - { \ - dim_vector a_dims = a.dims (); \ - dim_vector b_dims = b.dims (); \ - if (a_dims != b_dims) \ - { \ - if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ - err_nonconformant ("operator .^", a_dims, b_dims); \ - \ - return bsxfun_pow (a, b); \ - } \ - T2 ## NDArray result (a_dims); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = pow (a(i), b(i)); \ - } \ - return octave_value (result); \ - } \ -\ - octave_value \ - elem_xpow (const T1 ## NDArray& a, const FloatNDArray& b) \ - { \ - dim_vector a_dims = a.dims (); \ - dim_vector b_dims = b.dims (); \ - if (a_dims != b_dims) \ - { \ - if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ - err_nonconformant ("operator .^", a_dims, b_dims); \ - \ - return bsxfun_pow (a, b); \ - } \ - T1 ## NDArray result (a_dims); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = powf (a(i), b(i)); \ - } \ - return octave_value (result); \ - } \ -\ - octave_value \ - elem_xpow (const FloatNDArray& a, const T2 ## NDArray& b) \ - { \ - dim_vector a_dims = a.dims (); \ - dim_vector b_dims = b.dims (); \ - if (a_dims != b_dims) \ - { \ - if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ - err_nonconformant ("operator .^", a_dims, b_dims); \ - \ - return bsxfun_pow (a, b); \ - } \ - T2 ## NDArray result (a_dims); \ - for (int i = 0; i < a.numel (); i++) \ - { \ - OCTAVE_QUIT; \ - result (i) = powf (a(i), b(i)); \ - } \ - return octave_value (result); \ +#define OCTAVE_MM_POW_OPS(T1, T2) \ + octave_value \ + elem_xpow (const T1 ## NDArray& a, const T2 ## NDArray& b) \ + { \ + dim_vector a_dims = a.dims (); \ + dim_vector b_dims = b.dims (); \ + if (a_dims != b_dims) \ + { \ + if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ + err_nonconformant ("operator .^", a_dims, b_dims); \ + \ + return bsxfun_pow (a, b); \ + } \ + T1 ## NDArray result (a_dims); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a(i), b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (const T1 ## NDArray& a, const NDArray& b) \ + { \ + dim_vector a_dims = a.dims (); \ + dim_vector b_dims = b.dims (); \ + if (a_dims != b_dims) \ + { \ + if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ + err_nonconformant ("operator .^", a_dims, b_dims); \ + \ + return bsxfun_pow (a, b); \ + } \ + T1 ## NDArray result (a_dims); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a(i), b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (const NDArray& a, const T2 ## NDArray& b) \ + { \ + dim_vector a_dims = a.dims (); \ + dim_vector b_dims = b.dims (); \ + if (a_dims != b_dims) \ + { \ + if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ + err_nonconformant ("operator .^", a_dims, b_dims); \ + \ + return bsxfun_pow (a, b); \ + } \ + T2 ## NDArray result (a_dims); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = pow (a(i), b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (const T1 ## NDArray& a, const FloatNDArray& b) \ + { \ + dim_vector a_dims = a.dims (); \ + dim_vector b_dims = b.dims (); \ + if (a_dims != b_dims) \ + { \ + if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ + err_nonconformant ("operator .^", a_dims, b_dims); \ + \ + return bsxfun_pow (a, b); \ + } \ + T1 ## NDArray result (a_dims); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = powf (a(i), b(i)); \ + } \ + return octave_value (result); \ + } \ + \ + octave_value \ + elem_xpow (const FloatNDArray& a, const T2 ## NDArray& b) \ + { \ + dim_vector a_dims = a.dims (); \ + dim_vector b_dims = b.dims (); \ + if (a_dims != b_dims) \ + { \ + if (! is_valid_bsxfun ("operator .^", a_dims, b_dims)) \ + err_nonconformant ("operator .^", a_dims, b_dims); \ + \ + return bsxfun_pow (a, b); \ + } \ + T2 ## NDArray result (a_dims); \ + for (int i = 0; i < a.numel (); i++) \ + { \ + OCTAVE_QUIT; \ + result (i) = powf (a(i), b(i)); \ + } \ + return octave_value (result); \ } -#define OCTAVE_MM_CONV(T1, T2) \ - DEFCONV (T1 ## m_ ## T2 ## m_conv, T1 ## matrix, T2 ## matrix) \ - { \ +#define OCTAVE_MM_CONV(T1, T2) \ + DEFCONV (T1 ## m_ ## T2 ## m_conv, T1 ## matrix, T2 ## matrix) \ + { \ const octave_ ## T1 ## matrix& v = dynamic_cast (a); \ - \ - return new octave_ ## T2 ## matrix (v.T2 ## array_value ()); \ + \ + return new octave_ ## T2 ## matrix (v.T2 ## array_value ()); \ } -#define OCTAVE_MM_INT_OPS(TYPE) \ - OCTAVE_M_INT_UNOPS (TYPE) \ - OCTAVE_MM_POW_OPS (TYPE, TYPE) \ - OCTAVE_MM_INT_ARITH_OPS (mm, TYPE ## _, TYPE ## _, TYPE) \ - OCTAVE_MM_INT_ARITH_OPS (mmx, TYPE ## _, , TYPE) \ - OCTAVE_MM_INT_ARITH_OPS (mxm, , TYPE ## _, TYPE) \ - OCTAVE_MM_INT_ARITH_OPS (mmfx, TYPE ## _, float_, TYPE) \ - OCTAVE_MM_INT_ARITH_OPS (mfxm, float_, TYPE ## _, TYPE) \ - OCTAVE_MM_INT_CMP_OPS (mm, TYPE ## _, TYPE ## _) \ - OCTAVE_MM_INT_CMP_OPS (mmx, TYPE ## _, ) \ - OCTAVE_MM_INT_CMP_OPS (mfxm, float_, TYPE ## _) \ - OCTAVE_MM_INT_CMP_OPS (mmfx, TYPE ## _, float_) \ - OCTAVE_MM_INT_CMP_OPS (mxm, , TYPE ## _) \ - OCTAVE_MM_INT_BOOL_OPS (mm, TYPE ## _, TYPE ## _) \ - OCTAVE_MM_INT_BOOL_OPS (mmx, TYPE ## _, ) \ - OCTAVE_MM_INT_BOOL_OPS (mxm, , TYPE ## _) \ - OCTAVE_MM_INT_BOOL_OPS (mmfx, TYPE ## _, float_) \ - OCTAVE_MM_INT_BOOL_OPS (mfxm, float_, TYPE ## _) \ - OCTAVE_MM_INT_ASSIGN_OPS (mm, TYPE ## _, TYPE ## _, TYPE ## _) \ - OCTAVE_MM_INT_ASSIGNEQ_OPS (mme, TYPE ## _) \ - OCTAVE_MM_INT_ASSIGN_OPS (mmx, TYPE ## _, , ) \ - OCTAVE_MM_INT_ASSIGN_OPS (mmfx, TYPE ## _, float_, float_) \ - OCTAVE_MM_CONV(TYPE ## _, complex_) \ +#define OCTAVE_MM_INT_OPS(TYPE) \ + OCTAVE_M_INT_UNOPS (TYPE) \ + OCTAVE_MM_POW_OPS (TYPE, TYPE) \ + OCTAVE_MM_INT_ARITH_OPS (mm, TYPE ## _, TYPE ## _, TYPE) \ + OCTAVE_MM_INT_ARITH_OPS (mmx, TYPE ## _, , TYPE) \ + OCTAVE_MM_INT_ARITH_OPS (mxm, , TYPE ## _, TYPE) \ + OCTAVE_MM_INT_ARITH_OPS (mmfx, TYPE ## _, float_, TYPE) \ + OCTAVE_MM_INT_ARITH_OPS (mfxm, float_, TYPE ## _, TYPE) \ + OCTAVE_MM_INT_CMP_OPS (mm, TYPE ## _, TYPE ## _) \ + OCTAVE_MM_INT_CMP_OPS (mmx, TYPE ## _, ) \ + OCTAVE_MM_INT_CMP_OPS (mfxm, float_, TYPE ## _) \ + OCTAVE_MM_INT_CMP_OPS (mmfx, TYPE ## _, float_) \ + OCTAVE_MM_INT_CMP_OPS (mxm, , TYPE ## _) \ + OCTAVE_MM_INT_BOOL_OPS (mm, TYPE ## _, TYPE ## _) \ + OCTAVE_MM_INT_BOOL_OPS (mmx, TYPE ## _, ) \ + OCTAVE_MM_INT_BOOL_OPS (mxm, , TYPE ## _) \ + OCTAVE_MM_INT_BOOL_OPS (mmfx, TYPE ## _, float_) \ + OCTAVE_MM_INT_BOOL_OPS (mfxm, float_, TYPE ## _) \ + OCTAVE_MM_INT_ASSIGN_OPS (mm, TYPE ## _, TYPE ## _, TYPE ## _) \ + OCTAVE_MM_INT_ASSIGNEQ_OPS (mme, TYPE ## _) \ + OCTAVE_MM_INT_ASSIGN_OPS (mmx, TYPE ## _, , ) \ + OCTAVE_MM_INT_ASSIGN_OPS (mmfx, TYPE ## _, float_, float_) \ + OCTAVE_MM_CONV(TYPE ## _, complex_) \ OCTAVE_MM_CONV(TYPE ## _, float_complex_) -#define OCTAVE_RE_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_RE_INT_ASSIGN_OPS(TYPE) \ DEFNDASSIGNOP_FN (TYPE ## ms_assign, matrix, TYPE ## _scalar, array, assign) \ DEFNDASSIGNOP_FN (TYPE ## mm_assign, matrix, TYPE ## _matrix, array, assign) -#define OCTAVE_FLT_RE_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_FLT_RE_INT_ASSIGN_OPS(TYPE) \ DEFNDASSIGNOP_FN (TYPE ## fms_assign, float_matrix, TYPE ## _scalar, float_array, assign) \ DEFNDASSIGNOP_FN (TYPE ## fmm_assign, float_matrix, TYPE ## _matrix, float_array, assign) -#define OCTAVE_CX_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_CX_INT_ASSIGN_OPS(TYPE) \ DEFNDASSIGNOP_FN (TYPE ## cms_assign, complex_matrix, TYPE ## _scalar, complex_array, assign) \ DEFNDASSIGNOP_FN (TYPE ## cmm_assign, complex_matrix, TYPE ## _matrix, complex_array, assign) -#define OCTAVE_FLT_CX_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_FLT_CX_INT_ASSIGN_OPS(TYPE) \ DEFNDASSIGNOP_FN (TYPE ## fcms_assign, float_complex_matrix, TYPE ## _scalar, float_complex_array, assign) \ DEFNDASSIGNOP_FN (TYPE ## fcmm_assign, float_complex_matrix, TYPE ## _matrix, float_complex_array, assign) -#define OCTAVE_INT_NULL_ASSIGN_OPS(TYPE) \ +#define OCTAVE_INT_NULL_ASSIGN_OPS(TYPE) \ DEFNULLASSIGNOP_FN (TYPE ## null_assign, TYPE ## _matrix, delete_elements) -#define OCTAVE_INT_OPS(TYPE) \ - OCTAVE_SS_INT_OPS (TYPE) \ - OCTAVE_SM_INT_OPS (TYPE) \ - OCTAVE_MS_INT_OPS (TYPE) \ - OCTAVE_MM_INT_OPS (TYPE) \ - OCTAVE_CONCAT_FN (TYPE) \ - OCTAVE_RE_INT_ASSIGN_OPS (TYPE) \ - OCTAVE_FLT_RE_INT_ASSIGN_OPS (TYPE) \ - OCTAVE_CX_INT_ASSIGN_OPS (TYPE) \ - OCTAVE_FLT_CX_INT_ASSIGN_OPS (TYPE) \ +#define OCTAVE_INT_OPS(TYPE) \ + OCTAVE_SS_INT_OPS (TYPE) \ + OCTAVE_SM_INT_OPS (TYPE) \ + OCTAVE_MS_INT_OPS (TYPE) \ + OCTAVE_MM_INT_OPS (TYPE) \ + OCTAVE_CONCAT_FN (TYPE) \ + OCTAVE_RE_INT_ASSIGN_OPS (TYPE) \ + OCTAVE_FLT_RE_INT_ASSIGN_OPS (TYPE) \ + OCTAVE_CX_INT_ASSIGN_OPS (TYPE) \ + OCTAVE_FLT_CX_INT_ASSIGN_OPS (TYPE) \ OCTAVE_INT_NULL_ASSIGN_OPS(TYPE) -#define OCTAVE_INSTALL_S_INT_UNOPS(TYPE) \ - INSTALL_UNOP (op_not, octave_ ## TYPE ## _scalar, s_not); \ - INSTALL_UNOP (op_uplus, octave_ ## TYPE ## _scalar, s_uplus); \ - INSTALL_UNOP (op_uminus, octave_ ## TYPE ## _scalar, s_uminus); \ +#define OCTAVE_INSTALL_S_INT_UNOPS(TYPE) \ + INSTALL_UNOP (op_not, octave_ ## TYPE ## _scalar, s_not); \ + INSTALL_UNOP (op_uplus, octave_ ## TYPE ## _scalar, s_uplus); \ + INSTALL_UNOP (op_uminus, octave_ ## TYPE ## _scalar, s_uminus); \ INSTALL_UNOP (op_transpose, octave_ ## TYPE ## _scalar, s_transpose); \ INSTALL_UNOP (op_hermitian, octave_ ## TYPE ## _scalar, s_hermitian); \ - \ - INSTALL_NCUNOP (op_incr, octave_ ## TYPE ## _scalar, s_incr); \ + \ + INSTALL_NCUNOP (op_incr, octave_ ## TYPE ## _scalar, s_incr); \ INSTALL_NCUNOP (op_decr, octave_ ## TYPE ## _scalar, s_decr); -#define OCTAVE_INSTALL_SS_INT_ARITH_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_SS_INT_ARITH_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_add, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _add); \ INSTALL_BINOP (op_sub, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _sub); \ INSTALL_BINOP (op_mul, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _mul); \ @@ -914,7 +914,7 @@ INSTALL_BINOP (op_el_pow, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _el_pow); \ INSTALL_BINOP (op_el_ldiv, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _el_ldiv); -#define OCTAVE_INSTALL_SS_INT_CMP_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_SS_INT_CMP_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_lt, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _lt); \ INSTALL_BINOP (op_le, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _le); \ INSTALL_BINOP (op_eq, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _eq); \ @@ -922,34 +922,34 @@ INSTALL_BINOP (op_gt, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _gt); \ INSTALL_BINOP (op_ne, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _ne); -#define OCTAVE_INSTALL_SS_INT_BOOL_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_SS_INT_BOOL_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_el_and, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _el_and); \ INSTALL_BINOP (op_el_or, octave_ ## T1 ## scalar, octave_ ## T2 ## scalar, PFX ## _el_or); -#define OCTAVE_INSTALL_SS_INT_OPS(TYPE) \ - OCTAVE_INSTALL_S_INT_UNOPS (TYPE) \ - OCTAVE_INSTALL_SS_INT_ARITH_OPS (ss, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_ARITH_OPS (ssx, TYPE ## _, ) \ - OCTAVE_INSTALL_SS_INT_ARITH_OPS (sxs, , TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_ARITH_OPS (ssfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_SS_INT_ARITH_OPS (sfxs, float_, TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_CMP_OPS (ss, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_CMP_OPS (sx, TYPE ## _, ) \ - OCTAVE_INSTALL_SS_INT_CMP_OPS (xs, , TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_CMP_OPS (sfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_SS_INT_CMP_OPS (fxs, float_, TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_BOOL_OPS (ss, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_BOOL_OPS (sx, TYPE ## _, ) \ - OCTAVE_INSTALL_SS_INT_BOOL_OPS (xs, , TYPE ## _) \ - OCTAVE_INSTALL_SS_INT_BOOL_OPS (sfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_SS_INT_BOOL_OPS (fxs, float_, TYPE ## _) \ +#define OCTAVE_INSTALL_SS_INT_OPS(TYPE) \ + OCTAVE_INSTALL_S_INT_UNOPS (TYPE) \ + OCTAVE_INSTALL_SS_INT_ARITH_OPS (ss, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_ARITH_OPS (ssx, TYPE ## _, ) \ + OCTAVE_INSTALL_SS_INT_ARITH_OPS (sxs, , TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_ARITH_OPS (ssfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_SS_INT_ARITH_OPS (sfxs, float_, TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_CMP_OPS (ss, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_CMP_OPS (sx, TYPE ## _, ) \ + OCTAVE_INSTALL_SS_INT_CMP_OPS (xs, , TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_CMP_OPS (sfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_SS_INT_CMP_OPS (fxs, float_, TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_BOOL_OPS (ss, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_BOOL_OPS (sx, TYPE ## _, ) \ + OCTAVE_INSTALL_SS_INT_BOOL_OPS (xs, , TYPE ## _) \ + OCTAVE_INSTALL_SS_INT_BOOL_OPS (sfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_SS_INT_BOOL_OPS (fxs, float_, TYPE ## _) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_ ## TYPE ## _scalar, octave_ ## TYPE ## _matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_scalar, octave_ ## TYPE ## _matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_float_scalar, octave_ ## TYPE ## _matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_complex_scalar, octave_complex_matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_float_complex_scalar, octave_float_complex_matrix) -#define OCTAVE_INSTALL_SM_INT_ARITH_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_SM_INT_ARITH_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_add, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _add); \ INSTALL_BINOP (op_sub, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _sub); \ INSTALL_BINOP (op_mul, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _mul); \ @@ -961,7 +961,7 @@ INSTALL_BINOP (op_el_pow, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _el_pow); \ INSTALL_BINOP (op_el_ldiv, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _el_ldiv); -#define OCTAVE_INSTALL_SM_INT_CMP_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_SM_INT_CMP_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_lt, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _lt); \ INSTALL_BINOP (op_le, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _le); \ INSTALL_BINOP (op_eq, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _eq); \ @@ -969,28 +969,28 @@ INSTALL_BINOP (op_gt, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _gt); \ INSTALL_BINOP (op_ne, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _ne); -#define OCTAVE_INSTALL_SM_INT_BOOL_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_SM_INT_BOOL_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_el_and, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _el_and); \ INSTALL_BINOP (op_el_or, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _el_or); \ INSTALL_BINOP (op_el_and_not, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _el_and_not); \ INSTALL_BINOP (op_el_or_not, octave_ ## T1 ## scalar, octave_ ## T2 ## matrix, PFX ## _el_or_not); -#define OCTAVE_INSTALL_SM_INT_OPS(TYPE) \ - OCTAVE_INSTALL_SM_INT_ARITH_OPS (sm, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_ARITH_OPS (smx, TYPE ## _, ) \ - OCTAVE_INSTALL_SM_INT_ARITH_OPS (sxm, , TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_ARITH_OPS (smfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_SM_INT_ARITH_OPS (sfxm, float_, TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_CMP_OPS (sm, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_CMP_OPS (xm, , TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_CMP_OPS (smx, TYPE ## _, ) \ - OCTAVE_INSTALL_SM_INT_CMP_OPS (fxm, float_, TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_CMP_OPS (smfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_SM_INT_BOOL_OPS (sm, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_BOOL_OPS (xm, , TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_BOOL_OPS (smx, TYPE ## _, ) \ - OCTAVE_INSTALL_SM_INT_BOOL_OPS (fxm, float_, TYPE ## _) \ - OCTAVE_INSTALL_SM_INT_BOOL_OPS (smfx, TYPE ## _, float_) \ +#define OCTAVE_INSTALL_SM_INT_OPS(TYPE) \ + OCTAVE_INSTALL_SM_INT_ARITH_OPS (sm, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_ARITH_OPS (smx, TYPE ## _, ) \ + OCTAVE_INSTALL_SM_INT_ARITH_OPS (sxm, , TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_ARITH_OPS (smfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_SM_INT_ARITH_OPS (sfxm, float_, TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_CMP_OPS (sm, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_CMP_OPS (xm, , TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_CMP_OPS (smx, TYPE ## _, ) \ + OCTAVE_INSTALL_SM_INT_CMP_OPS (fxm, float_, TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_CMP_OPS (smfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_SM_INT_BOOL_OPS (sm, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_BOOL_OPS (xm, , TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_BOOL_OPS (smx, TYPE ## _, ) \ + OCTAVE_INSTALL_SM_INT_BOOL_OPS (fxm, float_, TYPE ## _) \ + OCTAVE_INSTALL_SM_INT_BOOL_OPS (smfx, TYPE ## _, float_) \ INSTALL_WIDENOP (octave_ ## TYPE ## _scalar, octave_ ## TYPE ## _matrix, TYPE ## _s_ ## TYPE ## _m_conv) \ INSTALL_WIDENOP (octave_ ## TYPE ## _scalar, octave_complex_matrix, TYPE ## _s_complex_m_conv) \ INSTALL_WIDENOP (octave_ ## TYPE ## _scalar, octave_float_complex_matrix, TYPE ## _s_float_complex_m_conv) \ @@ -1000,20 +1000,20 @@ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_complex_matrix, octave_complex_matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _scalar, octave_float_complex_matrix, octave_float_complex_matrix) -#define OCTAVE_INSTALL_MS_INT_ARITH_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_MS_INT_ARITH_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_add, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _add); \ INSTALL_BINOP (op_sub, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _sub); \ INSTALL_BINOP (op_mul, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _mul); \ INSTALL_BINOP (op_div, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _div); \ /* INSTALL_BINOP (op_pow, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _pow); */ \ /* INSTALL_BINOP (op_ldiv, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _ldiv); */ \ - \ + \ INSTALL_BINOP (op_el_mul, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_mul); \ INSTALL_BINOP (op_el_div, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_div); \ INSTALL_BINOP (op_el_pow, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_pow); \ INSTALL_BINOP (op_el_ldiv, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_ldiv); -#define OCTAVE_INSTALL_MS_INT_CMP_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_MS_INT_CMP_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_lt, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _lt); \ INSTALL_BINOP (op_le, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _le); \ INSTALL_BINOP (op_eq, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _eq); \ @@ -1021,56 +1021,56 @@ INSTALL_BINOP (op_gt, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _gt); \ INSTALL_BINOP (op_ne, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _ne); -#define OCTAVE_INSTALL_MS_INT_BOOL_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_MS_INT_BOOL_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_el_and, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_and); \ INSTALL_BINOP (op_el_or, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_or); \ INSTALL_BINOP (op_el_not_and, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_not_and); \ INSTALL_BINOP (op_el_not_or, octave_ ## T1 ## matrix, octave_ ## T2 ## scalar, PFX ## _el_not_or); -#define OCTAVE_INSTALL_MS_INT_ASSIGN_OPS(PFX, TLHS, TRHS) \ +#define OCTAVE_INSTALL_MS_INT_ASSIGN_OPS(PFX, TLHS, TRHS) \ INSTALL_ASSIGNOP (op_asn_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## scalar, PFX ## _assign) -#define OCTAVE_INSTALL_MS_INT_ASSIGNEQ_OPS(PFX, TLHS, TRHS) \ +#define OCTAVE_INSTALL_MS_INT_ASSIGNEQ_OPS(PFX, TLHS, TRHS) \ INSTALL_ASSIGNOP (op_add_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## scalar, PFX ## _assign_add) \ INSTALL_ASSIGNOP (op_sub_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## scalar, PFX ## _assign_sub) \ INSTALL_ASSIGNOP (op_mul_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## scalar, PFX ## _assign_mul) \ INSTALL_ASSIGNOP (op_div_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## scalar, PFX ## _assign_div) -#define OCTAVE_INSTALL_MS_INT_OPS(TYPE) \ - OCTAVE_INSTALL_MS_INT_ARITH_OPS (ms, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_ARITH_OPS (msx, TYPE ## _, ) \ - OCTAVE_INSTALL_MS_INT_ARITH_OPS (mxs, , TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_ARITH_OPS (msfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_MS_INT_ARITH_OPS (mfxs, float_, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_CMP_OPS (ms, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_CMP_OPS (mx, TYPE ## _, ) \ - OCTAVE_INSTALL_MS_INT_CMP_OPS (mxs, , TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_CMP_OPS (mfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_MS_INT_CMP_OPS (mfxs, float_, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_BOOL_OPS (ms, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_BOOL_OPS (mx, TYPE ## _, ) \ - OCTAVE_INSTALL_MS_INT_BOOL_OPS (mxs, , TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_BOOL_OPS (mfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_MS_INT_BOOL_OPS (mfxs, float_, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_ASSIGN_OPS (ms, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_ASSIGNEQ_OPS (mse, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MS_INT_ASSIGN_OPS (mx, TYPE ## _, ) \ - OCTAVE_INSTALL_MS_INT_ASSIGN_OPS (mfx, TYPE ## _, float_) \ +#define OCTAVE_INSTALL_MS_INT_OPS(TYPE) \ + OCTAVE_INSTALL_MS_INT_ARITH_OPS (ms, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_ARITH_OPS (msx, TYPE ## _, ) \ + OCTAVE_INSTALL_MS_INT_ARITH_OPS (mxs, , TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_ARITH_OPS (msfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_MS_INT_ARITH_OPS (mfxs, float_, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_CMP_OPS (ms, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_CMP_OPS (mx, TYPE ## _, ) \ + OCTAVE_INSTALL_MS_INT_CMP_OPS (mxs, , TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_CMP_OPS (mfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_MS_INT_CMP_OPS (mfxs, float_, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_BOOL_OPS (ms, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_BOOL_OPS (mx, TYPE ## _, ) \ + OCTAVE_INSTALL_MS_INT_BOOL_OPS (mxs, , TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_BOOL_OPS (mfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_MS_INT_BOOL_OPS (mfxs, float_, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_ASSIGN_OPS (ms, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_ASSIGNEQ_OPS (mse, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MS_INT_ASSIGN_OPS (mx, TYPE ## _, ) \ + OCTAVE_INSTALL_MS_INT_ASSIGN_OPS (mfx, TYPE ## _, float_) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _matrix, octave_complex_scalar, octave_complex_matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _matrix, octave_float_complex_scalar, octave_float_complex_matrix) -#define OCTAVE_INSTALL_M_INT_UNOPS(TYPE) \ - INSTALL_UNOP (op_not, octave_ ## TYPE ## _matrix, m_not); \ - INSTALL_UNOP (op_uplus, octave_ ## TYPE ## _matrix, m_uplus); \ - INSTALL_UNOP (op_uminus, octave_ ## TYPE ## _matrix, m_uminus); \ +#define OCTAVE_INSTALL_M_INT_UNOPS(TYPE) \ + INSTALL_UNOP (op_not, octave_ ## TYPE ## _matrix, m_not); \ + INSTALL_UNOP (op_uplus, octave_ ## TYPE ## _matrix, m_uplus); \ + INSTALL_UNOP (op_uminus, octave_ ## TYPE ## _matrix, m_uminus); \ INSTALL_UNOP (op_transpose, octave_ ## TYPE ## _matrix, m_transpose); \ INSTALL_UNOP (op_hermitian, octave_ ## TYPE ## _matrix, m_transpose); \ - \ - INSTALL_NCUNOP (op_incr, octave_ ## TYPE ## _matrix, m_incr); \ - INSTALL_NCUNOP (op_decr, octave_ ## TYPE ## _matrix, m_decr); \ + \ + INSTALL_NCUNOP (op_incr, octave_ ## TYPE ## _matrix, m_incr); \ + INSTALL_NCUNOP (op_decr, octave_ ## TYPE ## _matrix, m_decr); \ INSTALL_NCUNOP (op_uminus, octave_ ## TYPE ## _matrix, m_changesign); -#define OCTAVE_INSTALL_MM_INT_ARITH_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_MM_INT_ARITH_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_add, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _add); \ INSTALL_BINOP (op_sub, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _sub); \ /* INSTALL_BINOP (op_mul, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _mul); */ \ @@ -1082,7 +1082,7 @@ INSTALL_BINOP (op_el_pow, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_pow); \ INSTALL_BINOP (op_el_ldiv, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_ldiv); -#define OCTAVE_INSTALL_MM_INT_CMP_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_MM_INT_CMP_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_lt, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _lt); \ INSTALL_BINOP (op_le, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _le); \ INSTALL_BINOP (op_eq, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _eq); \ @@ -1090,7 +1090,7 @@ INSTALL_BINOP (op_gt, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _gt); \ INSTALL_BINOP (op_ne, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _ne); -#define OCTAVE_INSTALL_MM_INT_BOOL_OPS(PFX, T1, T2) \ +#define OCTAVE_INSTALL_MM_INT_BOOL_OPS(PFX, T1, T2) \ INSTALL_BINOP (op_el_and, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_and); \ INSTALL_BINOP (op_el_or, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_or); \ INSTALL_BINOP (op_el_not_and, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_not_and); \ @@ -1098,66 +1098,66 @@ INSTALL_BINOP (op_el_and_not, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_and_not); \ INSTALL_BINOP (op_el_or_not, octave_ ## T1 ## matrix, octave_ ## T2 ## matrix, PFX ## _el_or_not); -#define OCTAVE_INSTALL_MM_INT_ASSIGN_OPS(PFX, TLHS, TRHS) \ +#define OCTAVE_INSTALL_MM_INT_ASSIGN_OPS(PFX, TLHS, TRHS) \ INSTALL_ASSIGNOP (op_asn_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## matrix, PFX ## _assign) -#define OCTAVE_INSTALL_MM_INT_ASSIGNEQ_OPS(PFX, TLHS, TRHS) \ +#define OCTAVE_INSTALL_MM_INT_ASSIGNEQ_OPS(PFX, TLHS, TRHS) \ INSTALL_ASSIGNOP (op_add_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## matrix, PFX ## _assign_add) \ INSTALL_ASSIGNOP (op_sub_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## matrix, PFX ## _assign_sub) \ INSTALL_ASSIGNOP (op_el_mul_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## matrix, PFX ## _assign_el_mul) \ INSTALL_ASSIGNOP (op_el_div_eq, octave_ ## TLHS ## matrix, octave_ ## TRHS ## matrix, PFX ## _assign_el_div) -#define OCTAVE_INSTALL_MM_INT_OPS(TYPE) \ - OCTAVE_INSTALL_M_INT_UNOPS (TYPE) \ - OCTAVE_INSTALL_MM_INT_ARITH_OPS (mm, TYPE ##_, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_ARITH_OPS (mmx, TYPE ##_, ) \ - OCTAVE_INSTALL_MM_INT_ARITH_OPS (mxm, , TYPE ##_) \ - OCTAVE_INSTALL_MM_INT_ARITH_OPS (mmfx, TYPE ##_, float_) \ - OCTAVE_INSTALL_MM_INT_ARITH_OPS (mfxm, float_, TYPE ##_) \ - OCTAVE_INSTALL_MM_INT_CMP_OPS (mm, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_CMP_OPS (mmx, TYPE ## _, ) \ - OCTAVE_INSTALL_MM_INT_CMP_OPS (mxm, , TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_CMP_OPS (mmfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_MM_INT_CMP_OPS (mfxm, float_, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_BOOL_OPS (mm, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_BOOL_OPS (mmx, TYPE ## _, ) \ - OCTAVE_INSTALL_MM_INT_BOOL_OPS (mxm, , TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_BOOL_OPS (mmfx, TYPE ## _, float_) \ - OCTAVE_INSTALL_MM_INT_BOOL_OPS (mfxm, float_, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_ASSIGN_OPS (mm, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_ASSIGNEQ_OPS (mme, TYPE ## _, TYPE ## _) \ - OCTAVE_INSTALL_MM_INT_ASSIGN_OPS (mmx, TYPE ## _, ) \ - OCTAVE_INSTALL_MM_INT_ASSIGN_OPS (mmfx, TYPE ## _, float_) \ +#define OCTAVE_INSTALL_MM_INT_OPS(TYPE) \ + OCTAVE_INSTALL_M_INT_UNOPS (TYPE) \ + OCTAVE_INSTALL_MM_INT_ARITH_OPS (mm, TYPE ##_, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_ARITH_OPS (mmx, TYPE ##_, ) \ + OCTAVE_INSTALL_MM_INT_ARITH_OPS (mxm, , TYPE ##_) \ + OCTAVE_INSTALL_MM_INT_ARITH_OPS (mmfx, TYPE ##_, float_) \ + OCTAVE_INSTALL_MM_INT_ARITH_OPS (mfxm, float_, TYPE ##_) \ + OCTAVE_INSTALL_MM_INT_CMP_OPS (mm, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_CMP_OPS (mmx, TYPE ## _, ) \ + OCTAVE_INSTALL_MM_INT_CMP_OPS (mxm, , TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_CMP_OPS (mmfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_MM_INT_CMP_OPS (mfxm, float_, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_BOOL_OPS (mm, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_BOOL_OPS (mmx, TYPE ## _, ) \ + OCTAVE_INSTALL_MM_INT_BOOL_OPS (mxm, , TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_BOOL_OPS (mmfx, TYPE ## _, float_) \ + OCTAVE_INSTALL_MM_INT_BOOL_OPS (mfxm, float_, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_ASSIGN_OPS (mm, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_ASSIGNEQ_OPS (mme, TYPE ## _, TYPE ## _) \ + OCTAVE_INSTALL_MM_INT_ASSIGN_OPS (mmx, TYPE ## _, ) \ + OCTAVE_INSTALL_MM_INT_ASSIGN_OPS (mmfx, TYPE ## _, float_) \ INSTALL_WIDENOP (octave_ ## TYPE ## _matrix, octave_complex_matrix, TYPE ## _m_complex_m_conv) \ INSTALL_WIDENOP (octave_ ## TYPE ## _matrix, octave_float_complex_matrix, TYPE ## _m_float_complex_m_conv) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _matrix, octave_complex_matrix, octave_complex_matrix) \ INSTALL_ASSIGNCONV (octave_ ## TYPE ## _matrix, octave_float_complex_matrix, octave_float_complex_matrix) -#define OCTAVE_INSTALL_RE_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_INSTALL_RE_INT_ASSIGN_OPS(TYPE) \ INSTALL_ASSIGNOP (op_asn_eq, octave_matrix, octave_ ## TYPE ## _scalar, TYPE ## ms_assign) \ INSTALL_ASSIGNOP (op_asn_eq, octave_matrix, octave_ ## TYPE ## _matrix, TYPE ## mm_assign) \ INSTALL_ASSIGNCONV (octave_scalar, octave_ ## TYPE ## _scalar, octave_matrix) \ INSTALL_ASSIGNCONV (octave_matrix, octave_ ## TYPE ## _matrix, octave_matrix) -#define OCTAVE_INSTALL_FLT_RE_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_INSTALL_FLT_RE_INT_ASSIGN_OPS(TYPE) \ INSTALL_ASSIGNOP (op_asn_eq, octave_float_matrix, octave_ ## TYPE ## _scalar, TYPE ## fms_assign) \ INSTALL_ASSIGNOP (op_asn_eq, octave_float_matrix, octave_ ## TYPE ## _matrix, TYPE ## fmm_assign) \ INSTALL_ASSIGNCONV (octave_float_scalar, octave_ ## TYPE ## _scalar, octave_float_matrix) \ INSTALL_ASSIGNCONV (octave_float_matrix, octave_ ## TYPE ## _matrix, octave_float_matrix) -#define OCTAVE_INSTALL_CX_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_INSTALL_CX_INT_ASSIGN_OPS(TYPE) \ INSTALL_ASSIGNOP (op_asn_eq, octave_complex_matrix, octave_ ## TYPE ## _scalar, TYPE ## cms_assign) \ INSTALL_ASSIGNOP (op_asn_eq, octave_complex_matrix, octave_ ## TYPE ## _matrix, TYPE ## cmm_assign) \ INSTALL_ASSIGNCONV (octave_complex_scalar, octave_ ## TYPE ## _scalar, octave_complex_matrix) \ INSTALL_ASSIGNCONV (octave_complex_matrix, octave_ ## TYPE ## _matrix, octave_complex_matrix) -#define OCTAVE_INSTALL_FLT_CX_INT_ASSIGN_OPS(TYPE) \ +#define OCTAVE_INSTALL_FLT_CX_INT_ASSIGN_OPS(TYPE) \ INSTALL_ASSIGNOP (op_asn_eq, octave_float_complex_matrix, octave_ ## TYPE ## _scalar, TYPE ## fcms_assign) \ INSTALL_ASSIGNOP (op_asn_eq, octave_float_complex_matrix, octave_ ## TYPE ## _matrix, TYPE ## fcmm_assign) \ INSTALL_ASSIGNCONV (octave_float_complex_scalar, octave_ ## TYPE ## _scalar, octave_complex_matrix) \ INSTALL_ASSIGNCONV (octave_float_complex_matrix, octave_ ## TYPE ## _matrix, octave_complex_matrix) -#define OCTAVE_INSTALL_INT_NULL_ASSIGN_OPS(TYPE) \ +#define OCTAVE_INSTALL_INT_NULL_ASSIGN_OPS(TYPE) \ INSTALL_ASSIGNOP (op_asn_eq, octave_ ## TYPE ## _matrix, octave_null_matrix, TYPE ## null_assign) \ INSTALL_ASSIGNOP (op_asn_eq, octave_ ## TYPE ## _matrix, octave_null_str, TYPE ## null_assign) \ INSTALL_ASSIGNOP (op_asn_eq, octave_ ## TYPE ## _matrix, octave_null_sq_str, TYPE ## null_assign) \ @@ -1165,29 +1165,29 @@ INSTALL_ASSIGNCONV (octave_## TYPE ## _scalar, octave_null_str, octave_ ## TYPE ## _matrix) \ INSTALL_ASSIGNCONV (octave_## TYPE ## _scalar, octave_null_sq_str, octave_ ## TYPE ## _matrix) -#define OCTAVE_INSTALL_INT_OPS(TYPE) \ - OCTAVE_INSTALL_SS_INT_OPS (TYPE) \ - OCTAVE_INSTALL_SM_INT_OPS (TYPE) \ - OCTAVE_INSTALL_MS_INT_OPS (TYPE) \ - OCTAVE_INSTALL_MM_INT_OPS (TYPE) \ - OCTAVE_INSTALL_CONCAT_FN (TYPE) \ - OCTAVE_INSTALL_RE_INT_ASSIGN_OPS (TYPE) \ - OCTAVE_INSTALL_FLT_RE_INT_ASSIGN_OPS (TYPE) \ - OCTAVE_INSTALL_CX_INT_ASSIGN_OPS (TYPE) \ - OCTAVE_INSTALL_FLT_CX_INT_ASSIGN_OPS (TYPE) \ +#define OCTAVE_INSTALL_INT_OPS(TYPE) \ + OCTAVE_INSTALL_SS_INT_OPS (TYPE) \ + OCTAVE_INSTALL_SM_INT_OPS (TYPE) \ + OCTAVE_INSTALL_MS_INT_OPS (TYPE) \ + OCTAVE_INSTALL_MM_INT_OPS (TYPE) \ + OCTAVE_INSTALL_CONCAT_FN (TYPE) \ + OCTAVE_INSTALL_RE_INT_ASSIGN_OPS (TYPE) \ + OCTAVE_INSTALL_FLT_RE_INT_ASSIGN_OPS (TYPE) \ + OCTAVE_INSTALL_CX_INT_ASSIGN_OPS (TYPE) \ + OCTAVE_INSTALL_FLT_CX_INT_ASSIGN_OPS (TYPE) \ OCTAVE_INSTALL_INT_NULL_ASSIGN_OPS(TYPE) -#define OCTAVE_INSTALL_SM_INT_ASSIGNCONV(TLHS, TRHS) \ +#define OCTAVE_INSTALL_SM_INT_ASSIGNCONV(TLHS, TRHS) \ INSTALL_ASSIGNCONV (octave_ ## TLHS ## _scalar, octave_ ## TRHS ## _scalar, octave_ ## TLHS ## _matrix) \ INSTALL_ASSIGNCONV (octave_ ## TLHS ## _scalar, octave_ ## TRHS ## _matrix, octave_ ## TLHS ## _matrix) -#define OCTAVE_MIXED_INT_CMP_OPS(T1, T2) \ - OCTAVE_SS_INT_CMP_OPS (T1 ## _ ## T2 ## _ss, T1 ## _, T2 ## _) \ - OCTAVE_SM_INT_CMP_OPS (T1 ## _ ## T2 ## _sm, T1 ## _, T2 ## _) \ - OCTAVE_MS_INT_CMP_OPS (T1 ## _ ## T2 ## _ms, T1 ## _, T2 ## _) \ +#define OCTAVE_MIXED_INT_CMP_OPS(T1, T2) \ + OCTAVE_SS_INT_CMP_OPS (T1 ## _ ## T2 ## _ss, T1 ## _, T2 ## _) \ + OCTAVE_SM_INT_CMP_OPS (T1 ## _ ## T2 ## _sm, T1 ## _, T2 ## _) \ + OCTAVE_MS_INT_CMP_OPS (T1 ## _ ## T2 ## _ms, T1 ## _, T2 ## _) \ OCTAVE_MM_INT_CMP_OPS (T1 ## _ ## T2 ## _mm, T1 ## _, T2 ## _) -#define OCTAVE_INSTALL_MIXED_INT_CMP_OPS(T1, T2) \ +#define OCTAVE_INSTALL_MIXED_INT_CMP_OPS(T1, T2) \ OCTAVE_INSTALL_SS_INT_CMP_OPS (T1 ## _ ## T2 ## _ss, T1 ## _, T2 ## _) \ OCTAVE_INSTALL_SM_INT_CMP_OPS (T1 ## _ ## T2 ## _sm, T1 ## _, T2 ## _) \ OCTAVE_INSTALL_MS_INT_CMP_OPS (T1 ## _ ## T2 ## _ms, T1 ## _, T2 ## _) \ diff -r dd992fd74fce -r e43d83253e28 libinterp/operators/op-str-str.cc --- a/libinterp/operators/op-str-str.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/operators/op-str-str.cc Mon Aug 01 12:40:18 2016 -0400 @@ -47,33 +47,38 @@ // string by string ops. -#define DEFCHARNDBINOP_FN(name, op, t1, t2, e1, e2, f) \ - static octave_value \ - CONCAT2(oct_binop_, name) (const octave_base_value& a1, const octave_base_value& a2) \ - { \ - dim_vector a1_dims = a1.dims (); \ - dim_vector a2_dims = a2.dims (); \ - \ - bool a1_is_scalar = a1_dims.all_ones (); \ - bool a2_is_scalar = a2_dims.all_ones (); \ - \ - const octave_ ## t1& v1 = dynamic_cast (a1); \ - const octave_ ## t2& v2 = dynamic_cast (a2); \ - \ - if (a1_is_scalar) \ - { \ - if (a2_is_scalar) \ - return octave_value ((v1.e1 ## _value ())(0) op (v2.e2 ## _value ())(0)); \ - else \ - return octave_value (f ((v1.e1 ## _value ())(0), v2.e2 ## _value ())); \ - } \ - else \ - { \ - if (a2_is_scalar) \ - return octave_value (f (v1.e1 ## _value (), (v2.e2 ## _value ())(0))); \ - else \ - return octave_value (f (v1.e1 ## _value (), v2.e2 ## _value ())); \ - } \ +#define DEFCHARNDBINOP_FN(name, op, t1, t2, e1, e2, f) \ + static octave_value \ + CONCAT2(oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ + dim_vector a1_dims = a1.dims (); \ + dim_vector a2_dims = a2.dims (); \ + \ + bool a1_is_scalar = a1_dims.all_ones (); \ + bool a2_is_scalar = a2_dims.all_ones (); \ + \ + const octave_ ## t1& v1 = dynamic_cast (a1); \ + const octave_ ## t2& v2 = dynamic_cast (a2); \ + \ + if (a1_is_scalar) \ + { \ + if (a2_is_scalar) \ + return octave_value ((v1.e1 ## _value ())(0) \ + op (v2.e2 ## _value ())(0)); \ + else \ + return octave_value (f ((v1.e1 ## _value ())(0), \ + v2.e2 ## _value ())); \ + } \ + else \ + { \ + if (a2_is_scalar) \ + return octave_value (f (v1.e1 ## _value (), \ + (v2.e2 ## _value ())(0))); \ + else \ + return octave_value (f (v1.e1 ## _value (), \ + v2.e2 ## _value ())); \ + } \ } DEFCHARNDBINOP_FN (lt, <, char_matrix_str, char_matrix_str, char_array, diff -r dd992fd74fce -r e43d83253e28 libinterp/operators/ops.h --- a/libinterp/operators/ops.h Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/operators/ops.h Mon Aug 01 12:40:18 2016 -0400 @@ -37,400 +37,400 @@ extern void install_ops (void); -#define INSTALL_UNOP(op, t, f) \ - octave_value_typeinfo::register_unary_op \ - (octave_value::op, t::static_type_id (), CONCAT2 (oct_unop_, f)); +#define INSTALL_UNOP(op, t, f) \ + octave_value_typeinfo::register_unary_op \ + (octave_value::op, t::static_type_id (), CONCAT2 (oct_unop_, f)); -#define INSTALL_NCUNOP(op, t, f) \ - octave_value_typeinfo::register_non_const_unary_op \ - (octave_value::op, t::static_type_id (), CONCAT2 (oct_unop_, f)); +#define INSTALL_NCUNOP(op, t, f) \ + octave_value_typeinfo::register_non_const_unary_op \ + (octave_value::op, t::static_type_id (), CONCAT2 (oct_unop_, f)); -#define INSTALL_BINOP(op, t1, t2, f) \ - octave_value_typeinfo::register_binary_op \ - (octave_value::op, t1::static_type_id (), t2::static_type_id (), \ - CONCAT2 (oct_binop_, f)); +#define INSTALL_BINOP(op, t1, t2, f) \ + octave_value_typeinfo::register_binary_op \ + (octave_value::op, t1::static_type_id (), t2::static_type_id (), \ + CONCAT2 (oct_binop_, f)); -#define INSTALL_CATOP(t1, t2, f) \ - octave_value_typeinfo::register_cat_op \ - (t1::static_type_id (), t2::static_type_id (), CONCAT2 (oct_catop_, f)); +#define INSTALL_CATOP(t1, t2, f) \ + octave_value_typeinfo::register_cat_op \ + (t1::static_type_id (), t2::static_type_id (), CONCAT2 (oct_catop_, f)); -#define INSTALL_ASSIGNOP(op, t1, t2, f) \ - octave_value_typeinfo::register_assign_op \ - (octave_value::op, t1::static_type_id (), t2::static_type_id (), \ - CONCAT2 (oct_assignop_, f)); +#define INSTALL_ASSIGNOP(op, t1, t2, f) \ + octave_value_typeinfo::register_assign_op \ + (octave_value::op, t1::static_type_id (), t2::static_type_id (), \ + CONCAT2 (oct_assignop_, f)); -#define INSTALL_ASSIGNANYOP(op, t1, f) \ - octave_value_typeinfo::register_assignany_op \ - (octave_value::op, t1::static_type_id (), CONCAT2 (oct_assignop_, f)); +#define INSTALL_ASSIGNANYOP(op, t1, f) \ + octave_value_typeinfo::register_assignany_op \ + (octave_value::op, t1::static_type_id (), CONCAT2 (oct_assignop_, f)); -#define INSTALL_ASSIGNCONV(t1, t2, tr) \ - octave_value_typeinfo::register_pref_assign_conv \ - (t1::static_type_id (), t2::static_type_id (), tr::static_type_id ()); +#define INSTALL_ASSIGNCONV(t1, t2, tr) \ + octave_value_typeinfo::register_pref_assign_conv \ + (t1::static_type_id (), t2::static_type_id (), tr::static_type_id ()); -#define INSTALL_CONVOP(t1, t2, f) \ - octave_value_typeinfo::register_type_conv_op \ - (t1::static_type_id (), t2::static_type_id (), CONCAT2 (oct_conv_, f)); +#define INSTALL_CONVOP(t1, t2, f) \ + octave_value_typeinfo::register_type_conv_op \ + (t1::static_type_id (), t2::static_type_id (), CONCAT2 (oct_conv_, f)); -#define INSTALL_WIDENOP(t1, t2, f) \ - octave_value_typeinfo::register_widening_op \ - (t1::static_type_id (), t2::static_type_id (), CONCAT2 (oct_conv_, f)); +#define INSTALL_WIDENOP(t1, t2, f) \ + octave_value_typeinfo::register_widening_op \ + (t1::static_type_id (), t2::static_type_id (), CONCAT2 (oct_conv_, f)); -#define DEFASSIGNOP(name, t1, t2) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ - const octave_value_list& idx, \ +#define DEFASSIGNOP(name, t1, t2) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ + const octave_value_list& idx, \ const octave_base_value& a2) -#define DEFASSIGNOP_FN(name, t1, t2, f) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ - const octave_value_list& idx, \ - const octave_base_value& a2) \ - { \ +#define DEFASSIGNOP_FN(name, t1, t2, f) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ + const octave_value_list& idx, \ + const octave_base_value& a2) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - v1.f (idx, v2.CONCAT2 (t1, _value) ()); \ - return octave_value (); \ + \ + v1.f (idx, v2.CONCAT2 (t1, _value) ()); \ + return octave_value (); \ } -#define DEFNULLASSIGNOP_FN(name, t, f) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a, \ - const octave_value_list& idx, \ - const octave_base_value&) \ - { \ - CONCAT2 (octave_, t)& v = dynamic_cast (a); \ - \ - v.f (idx); \ - return octave_value (); \ +#define DEFNULLASSIGNOP_FN(name, t, f) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a, \ + const octave_value_list& idx, \ + const octave_base_value&) \ + { \ + CONCAT2 (octave_, t)& v = dynamic_cast (a); \ + \ + v.f (idx); \ + return octave_value (); \ } -#define DEFNDASSIGNOP_FN(name, t1, t2, e, f) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ - const octave_value_list& idx, \ - const octave_base_value& a2) \ - { \ +#define DEFNDASSIGNOP_FN(name, t1, t2, e, f) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ + const octave_value_list& idx, \ + const octave_base_value& a2) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - v1.f (idx, v2.CONCAT2 (e, _value) ()); \ - return octave_value (); \ + \ + v1.f (idx, v2.CONCAT2 (e, _value) ()); \ + return octave_value (); \ } // FIXME: the following currently don't handle index. -#define DEFNDASSIGNOP_OP(name, t1, t2, f, op) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ - const octave_value_list& idx, \ - const octave_base_value& a2) \ - { \ +#define DEFNDASSIGNOP_OP(name, t1, t2, f, op) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ + const octave_value_list& idx, \ + const octave_base_value& a2) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - assert (idx.empty ()); \ - v1.matrix_ref () op v2.CONCAT2 (f, _value) (); \ - \ - return octave_value (); \ + \ + assert (idx.empty ()); \ + v1.matrix_ref () op v2.CONCAT2 (f, _value) (); \ + \ + return octave_value (); \ } -#define DEFNDASSIGNOP_FNOP(name, t1, t2, f, fnop) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ - const octave_value_list& idx, \ - const octave_base_value& a2) \ - { \ +#define DEFNDASSIGNOP_FNOP(name, t1, t2, f, fnop) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ + const octave_value_list& idx, \ + const octave_base_value& a2) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - assert (idx.empty ()); \ - fnop (v1.matrix_ref (), v2.CONCAT2 (f, _value) ()); \ - \ - return octave_value (); \ + \ + assert (idx.empty ()); \ + fnop (v1.matrix_ref (), v2.CONCAT2 (f, _value) ()); \ + \ + return octave_value (); \ } -#define DEFASSIGNANYOP_FN(name, t1, f) \ - static octave_value \ - CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ - const octave_value_list& idx, \ - const octave_value& a2) \ - { \ +#define DEFASSIGNANYOP_FN(name, t1, f) \ + static octave_value \ + CONCAT2 (oct_assignop_, name) (octave_base_value& a1, \ + const octave_value_list& idx, \ + const octave_value& a2) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ - \ - v1.f (idx, a2); \ - return octave_value (); \ + \ + v1.f (idx, a2); \ + return octave_value (); \ } -#define CONVDECL(name) \ - static octave_base_value * \ +#define CONVDECL(name) \ + static octave_base_value * \ CONCAT2 (oct_conv_, name) (const octave_base_value& a) -#define CONVDECLX(name) \ - static octave_base_value * \ +#define CONVDECLX(name) \ + static octave_base_value * \ CONCAT2 (oct_conv_, name) (const octave_base_value&) -#define DEFCONV(name, a_dummy, b_dummy) \ +#define DEFCONV(name, a_dummy, b_dummy) \ CONVDECL (name) -#define DEFCONVFNX(name, tfrom, ovtto, tto, e) \ - CONVDECL (name) \ - { \ +#define DEFCONVFNX(name, tfrom, ovtto, tto, e) \ + CONVDECL (name) \ + { \ const CONCAT2 (octave_, tfrom)& v = dynamic_cast (a); \ - \ + \ return new CONCAT2 (octave_, ovtto) (CONCAT2 (tto, NDArray) (v.CONCAT2 (e, array_value) ())); \ } -#define DEFCONVFNX2(name, tfrom, ovtto, e) \ - CONVDECL (name) \ - { \ +#define DEFCONVFNX2(name, tfrom, ovtto, e) \ + CONVDECL (name) \ + { \ const CONCAT2 (octave_, tfrom)& v = dynamic_cast (a); \ - \ + \ return new CONCAT2 (octave_, ovtto) (v.CONCAT2 (e, array_value) ()); \ } -#define DEFDBLCONVFN(name, ovtfrom, e) \ - CONVDECL (name) \ - { \ +#define DEFDBLCONVFN(name, ovtfrom, e) \ + CONVDECL (name) \ + { \ const CONCAT2 (octave_, ovtfrom)& v = dynamic_cast (a); \ - \ - return new octave_matrix (NDArray (v.CONCAT2 (e, _value) ())); \ + \ + return new octave_matrix (NDArray (v.CONCAT2 (e, _value) ())); \ } -#define DEFFLTCONVFN(name, ovtfrom, e) \ - CONVDECL (name) \ - { \ +#define DEFFLTCONVFN(name, ovtfrom, e) \ + CONVDECL (name) \ + { \ const CONCAT2 (octave_, ovtfrom)& v = dynamic_cast (a); \ - \ + \ return new octave_float_matrix (FloatNDArray (v.CONCAT2 (e, _value) ())); \ } -#define DEFSTRINTCONVFN(name, tto) \ +#define DEFSTRINTCONVFN(name, tto) \ DEFCONVFNX(name, char_matrix_str, CONCAT2 (tto, _matrix), tto, char_) -#define DEFSTRDBLCONVFN(name, tfrom) \ +#define DEFSTRDBLCONVFN(name, tfrom) \ DEFCONVFNX(name, tfrom, matrix, , char_) -#define DEFSTRFLTCONVFN(name, tfrom) \ +#define DEFSTRFLTCONVFN(name, tfrom) \ DEFCONVFNX(name, tfrom, float_matrix, Float, char_) -#define DEFCONVFN(name, tfrom, tto) \ +#define DEFCONVFN(name, tfrom, tto) \ DEFCONVFNX2 (name, tfrom, CONCAT2 (tto, _matrix), CONCAT2 (tto, _)) -#define DEFCONVFN2(name, tfrom, sm, tto) \ +#define DEFCONVFN2(name, tfrom, sm, tto) \ DEFCONVFNX2 (name, CONCAT3 (tfrom, _, sm), CONCAT2 (tto, _matrix), CONCAT2 (tto, _)) -#define DEFUNOPX(name, t) \ - static octave_value \ +#define DEFUNOPX(name, t) \ + static octave_value \ CONCAT2 (oct_unop_, name) (const octave_base_value&) -#define DEFUNOP(name, t) \ - static octave_value \ +#define DEFUNOP(name, t) \ + static octave_value \ CONCAT2 (oct_unop_, name) (const octave_base_value& a) -#define DEFUNOP_OP(name, t, op) \ - static octave_value \ - CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ - { \ +#define DEFUNOP_OP(name, t, op) \ + static octave_value \ + CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ + { \ const CONCAT2 (octave_, t)& v = dynamic_cast (a); \ - return octave_value (op v.CONCAT2 (t, _value) ()); \ + return octave_value (op v.CONCAT2 (t, _value) ()); \ } -#define DEFNDUNOP_OP(name, t, e, op) \ - static octave_value \ - CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ - { \ +#define DEFNDUNOP_OP(name, t, e, op) \ + static octave_value \ + CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ + { \ const CONCAT2 (octave_, t)& v = dynamic_cast (a); \ - return octave_value (op v.CONCAT2 (e, _value) ()); \ + return octave_value (op v.CONCAT2 (e, _value) ()); \ } // FIXME: in some cases, the constructor isn't necessary. -#define DEFUNOP_FN(name, t, f) \ - static octave_value \ - CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ - { \ +#define DEFUNOP_FN(name, t, f) \ + static octave_value \ + CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ + { \ const CONCAT2 (octave_, t)& v = dynamic_cast (a); \ - return octave_value (f (v.CONCAT2 (t, _value) ())); \ + return octave_value (f (v.CONCAT2 (t, _value) ())); \ } -#define DEFNDUNOP_FN(name, t, e, f) \ - static octave_value \ - CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ - { \ +#define DEFNDUNOP_FN(name, t, e, f) \ + static octave_value \ + CONCAT2 (oct_unop_, name) (const octave_base_value& a) \ + { \ const CONCAT2 (octave_, t)& v = dynamic_cast (a); \ - return octave_value (f (v.CONCAT2 (e, _value) ())); \ + return octave_value (f (v.CONCAT2 (e, _value) ())); \ } -#define DEFNCUNOP_METHOD(name, t, method) \ - static void \ - CONCAT2 (oct_unop_, name) (octave_base_value& a) \ - { \ - CONCAT2 (octave_, t)& v = dynamic_cast (a); \ - v.method (); \ +#define DEFNCUNOP_METHOD(name, t, method) \ + static void \ + CONCAT2 (oct_unop_, name) (octave_base_value& a) \ + { \ + CONCAT2 (octave_, t)& v = dynamic_cast (a); \ + v.method (); \ } -#define DEFBINOPX(name, t1, t2) \ - static octave_value \ +#define DEFBINOPX(name, t1, t2) \ + static octave_value \ CONCAT2 (oct_binop_, name) (const octave_base_value&, \ const octave_base_value&) -#define DEFBINOP(name, t1, t2) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ +#define DEFBINOP(name, t1, t2) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ const octave_base_value& a2) -#define DEFBINOP_OP(name, t1, t2, op) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFBINOP_OP(name, t1, t2, op) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - return octave_value \ - (v1.CONCAT2 (t1, _value) () op v2.CONCAT2 (t2, _value) ()); \ + \ + return octave_value \ + (v1.CONCAT2 (t1, _value) () op v2.CONCAT2 (t2, _value) ()); \ } -#define DEFCMPLXCMPOP_OP(name, t1, t2, op) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFCMPLXCMPOP_OP(name, t1, t2, op) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - warn_complex_cmp (); \ - \ - return octave_value \ - (v1.CONCAT2 (t1, _value) () op v2.CONCAT2 (t2, _value) ()); \ + \ + warn_complex_cmp (); \ + \ + return octave_value \ + (v1.CONCAT2 (t1, _value) () op v2.CONCAT2 (t2, _value) ()); \ } -#define DEFSCALARBOOLOP_OP(name, t1, t2, op) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFSCALARBOOLOP_OP(name, t1, t2, op) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ if (octave::math::isnan (v1.CONCAT2 (t1, _value) ()) || octave::math::isnan (v2.CONCAT2 (t2, _value) ())) \ - err_nan_to_logical_conversion (); \ - \ - return octave_value \ - (v1.CONCAT2 (t1, _value) () op v2.CONCAT2 (t2, _value) ()); \ + err_nan_to_logical_conversion (); \ + \ + return octave_value \ + (v1.CONCAT2 (t1, _value) () op v2.CONCAT2 (t2, _value) ()); \ } -#define DEFNDBINOP_OP(name, t1, t2, e1, e2, op) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFNDBINOP_OP(name, t1, t2, e1, e2, op) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ - return octave_value \ - (v1.CONCAT2 (e1, _value) () op v2.CONCAT2 (e2, _value) ()); \ + \ + return octave_value \ + (v1.CONCAT2 (e1, _value) () op v2.CONCAT2 (e2, _value) ()); \ } // FIXME: in some cases, the constructor isn't necessary. -#define DEFBINOP_FN(name, t1, t2, f) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFBINOP_FN(name, t1, t2, f) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (f (v1.CONCAT2 (t1, _value) (), v2.CONCAT2 (t2, _value) ())); \ } -#define DEFNDBINOP_FN(name, t1, t2, e1, e2, f) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFNDBINOP_FN(name, t1, t2, e1, e2, f) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (f (v1.CONCAT2 (e1, _value) (), v2.CONCAT2 (e2, _value) ())); \ } -#define DEFNDCMPLXCMPOP_FN(name, t1, t2, e1, e2, f) \ - static octave_value \ - CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ - const octave_base_value& a2) \ - { \ +#define DEFNDCMPLXCMPOP_FN(name, t1, t2, e1, e2, f) \ + static octave_value \ + CONCAT2 (oct_binop_, name) (const octave_base_value& a1, \ + const octave_base_value& a2) \ + { \ const CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (f (v1.CONCAT2 (e1, _value) (), v2.CONCAT2 (e2, _value) ())); \ } -#define DEFCATOPX(name, t1, t2) \ - static octave_value \ +#define DEFCATOPX(name, t1, t2) \ + static octave_value \ CONCAT2 (oct_catop_, name) (octave_base_value&, const octave_base_value&, \ const Array& ra_idx) -#define DEFCATOP(name, t1, t2) \ - static octave_value \ - CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ - const octave_base_value& a2, \ +#define DEFCATOP(name, t1, t2) \ + static octave_value \ + CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ + const octave_base_value& a2, \ const Array& ra_idx) // FIXME: in some cases, the constructor isn't necessary. -#define DEFCATOP_FN(name, t1, t2, f) \ - static octave_value \ - CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ - const octave_base_value& a2, \ - const Array& ra_idx) \ - { \ +#define DEFCATOP_FN(name, t1, t2, f) \ + static octave_value \ + CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ + const octave_base_value& a2, \ + const Array& ra_idx) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (v1.CONCAT2 (t1, _value) () . f (v2.CONCAT2 (t2, _value) (), ra_idx)); \ } -#define DEFNDCATOP_FN(name, t1, t2, e1, e2, f) \ - static octave_value \ - CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ - const octave_base_value& a2, \ - const Array& ra_idx) \ - { \ +#define DEFNDCATOP_FN(name, t1, t2, e1, e2, f) \ + static octave_value \ + CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ + const octave_base_value& a2, \ + const Array& ra_idx) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (v1.CONCAT2 (e1, _value) () . f (v2.CONCAT2 (e2, _value) (), ra_idx)); \ } -#define DEFNDCHARCATOP_FN(name, t1, t2, f) \ - static octave_value \ - CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ - const octave_base_value& a2, \ - const Array& ra_idx) \ - { \ +#define DEFNDCHARCATOP_FN(name, t1, t2, f) \ + static octave_value \ + CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ + const octave_base_value& a2, \ + const Array& ra_idx) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (v1.char_array_value () . f (v2.char_array_value (), ra_idx), \ - ((a1.is_sq_string () || a2.is_sq_string ()) \ - ? '\'' : '"')); \ + ((a1.is_sq_string () || a2.is_sq_string ()) \ + ? '\'' : '"')); \ } // For compatibility, the second arg is always converted to the type // of the first. Hmm. -#define DEFNDCATOP_FN2(name, t1, t2, tc1, tc2, e1, e2, f) \ - static octave_value \ - CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ - const octave_base_value& a2, \ - const Array& ra_idx) \ - { \ +#define DEFNDCATOP_FN2(name, t1, t2, tc1, tc2, e1, e2, f) \ + static octave_value \ + CONCAT2 (oct_catop_, name) (octave_base_value& a1, \ + const octave_base_value& a2, \ + const Array& ra_idx) \ + { \ CONCAT2 (octave_, t1)& v1 = dynamic_cast (a1); \ const CONCAT2 (octave_, t2)& v2 = dynamic_cast (a2); \ - \ + \ return octave_value (tc1 (v1.CONCAT2 (e1, _value) ()) . f (tc2 (v2.CONCAT2 (e2, _value) ()), ra_idx)); \ } diff -r dd992fd74fce -r e43d83253e28 libinterp/parse-tree/lex.ll --- a/libinterp/parse-tree/lex.ll Tue Jul 12 14:28:07 2016 -0400 +++ b/libinterp/parse-tree/lex.ll Mon Aug 01 12:40:18 2016 -0400 @@ -133,7 +133,7 @@ #define YY_NO_UNISTD_H 1 #define isatty octave_isatty_wrapper -#if ! (defined (FLEX_SCANNER) \ +#if ! (defined (FLEX_SCANNER) \ && defined (YY_FLEX_MAJOR_VERSION) && YY_FLEX_MAJOR_VERSION >= 2 \ && defined (YY_FLEX_MINOR_VERSION) && YY_FLEX_MINOR_VERSION >= 5) #error lex.l requires flex version 2.5.4 or later @@ -147,7 +147,7 @@ #if defined (YY_INPUT) # undef YY_INPUT #endif -#define YY_INPUT(buf, result, max_size) \ +#define YY_INPUT(buf, result, max_size) \ result = curr_lexer->fill_flex_buffer (buf, max_size) // Try to avoid crashing out completely on fatal scanner errors. @@ -155,86 +155,83 @@ #if defined (YY_FATAL_ERROR) # undef YY_FATAL_ERROR #endif -#define YY_FATAL_ERROR(msg) \ - (yyget_extra (yyscanner))->fatal_error (msg) - -#define CMD_OR_OP(PATTERN, TOK, COMPAT) \ - \ - do \ - { \ - curr_lexer->lexer_debug (PATTERN); \ - \ - if (curr_lexer->looks_like_command_arg ()) \ - { \ - yyless (0); \ - curr_lexer->push_start_state (COMMAND_START); \ - } \ - else \ - { \ - return curr_lexer->handle_op_internal (TOK, false, COMPAT); \ - } \ - } \ - while (0) - -#define CMD_OR_COMPUTED_ASSIGN_OP(PATTERN, TOK) \ - \ - do \ - { \ - curr_lexer->lexer_debug (PATTERN); \ - \ - if (curr_lexer->previous_token_may_be_command () \ - && curr_lexer->space_follows_previous_token ()) \ - { \ - yyless (0); \ - curr_lexer->push_start_state (COMMAND_START); \ - } \ - else \ - { \ - return curr_lexer->handle_language_extension_op (PATTERN, TOK, \ - false); \ - } \ - } \ - while (0) - -#define CMD_OR_UNARY_OP(PATTERN, TOK, COMPAT) \ - \ - do \ - { \ - curr_lexer->lexer_debug (PATTERN); \ - \ - if (curr_lexer->previous_token_may_be_command ()) \ - { \ - if (curr_lexer->looks_like_command_arg ()) \ - { \ - yyless (0); \ - curr_lexer->push_start_state (COMMAND_START); \ - } \ - else \ - { \ - return curr_lexer->handle_op_internal (TOK, false, COMPAT); \ - } \ - } \ - else \ - { \ - int tok \ - = (COMPAT \ - ? curr_lexer->handle_unary_op (TOK) \ - : curr_lexer->handle_language_extension_unary_op (TOK)); \ - \ - if (tok < 0) \ - { \ - yyless (0); \ - curr_lexer->xunput (','); \ - /* Adjust for comma that was not really in the input stream. */ \ - curr_lexer->current_input_column--; \ - } \ - else \ - { \ - return tok; \ - } \ - } \ - } \ - while (0) +#define YY_FATAL_ERROR(msg) \ + (yyget_extra (yyscanner))->fatal_error (msg) + +#define CMD_OR_OP(PATTERN, TOK, COMPAT) \ + do \ + { \ + curr_lexer->lexer_debug (PATTERN); \ + \ + if (curr_lexer->looks_like_command_arg ()) \ + { \ + yyless (0); \ + curr_lexer->push_start_state (COMMAND_START); \ + } \ + else \ + { \ + return curr_lexer->handle_op_internal (TOK, false, COMPAT); \ + } \ + } \ + while (0) + +#define CMD_OR_COMPUTED_ASSIGN_OP(PATTERN, TOK) \ + do \ + { \ + curr_lexer->lexer_debug (PATTERN); \ + \ + if (curr_lexer->previous_token_may_be_command () \ + && curr_lexer->space_follows_previous_token ()) \ + { \ + yyless (0); \ + curr_lexer->push_start_state (COMMAND_START); \ + } \ + else \ + { \ + return curr_lexer->handle_language_extension_op (PATTERN, TOK, \ + false); \ + } \ + } \ + while (0) + +#define CMD_OR_UNARY_OP(PATTERN, TOK, COMPAT) \ + do \ + { \ + curr_lexer->lexer_debug (PATTERN); \ + \ + if (curr_lexer->previous_token_may_be_command ()) \ + { \ + if (curr_lexer->looks_like_command_arg ()) \ + { \ + yyless (0); \ + curr_lexer->push_start_state (COMMAND_START); \ + } \ + else \ + { \ + return curr_lexer->handle_op_internal (TOK, false, COMPAT); \ + } \ + } \ + else \ + { \ + int tok \ + = (COMPAT \ + ? curr_lexer->handle_unary_op (TOK) \ + : curr_lexer->handle_language_extension_unary_op (TOK)); \ + \ + if (tok < 0) \ + { \ + yyless (0); \ + curr_lexer->xunput (','); \ + /* Adjust for comma that was not really in the input stream. */ \ + curr_lexer->current_input_column--; \ + } \ + else \ + { \ + return tok; \ + } \ + } \ + } \ + while (0) // We can't rely on the trick used elsewhere of sticking ASCII 1 in // the input buffer and recognizing it as a special case because ASCII @@ -242,85 +239,85 @@ // end of the buffer, ask for more input. If we are at the end of the // file, deal with it. Otherwise, just keep going with the text from // the current buffer. -#define HANDLE_STRING_CONTINUATION \ - do \ - { \ - curr_lexer->decrement_promptflag (); \ - curr_lexer->input_line_number++; \ - curr_lexer->current_input_column = 1; \ - \ - if (curr_lexer->is_push_lexer ()) \ - { \ - if (curr_lexer->at_end_of_buffer ()) \ - return -1; \ - \ - if (curr_lexer->at_end_of_file ()) \ - return curr_lexer->handle_end_of_input (); \ - } \ - } \ - while (0) +#define HANDLE_STRING_CONTINUATION \ + do \ + { \ + curr_lexer->decrement_promptflag (); \ + curr_lexer->input_line_number++; \ + curr_lexer->current_input_column = 1; \ + \ + if (curr_lexer->is_push_lexer ()) \ + { \ + if (curr_lexer->at_end_of_buffer ()) \ + return -1; \ + \ + if (curr_lexer->at_end_of_file ()) \ + return curr_lexer->handle_end_of_input (); \ + } \ + } \ + while (0) // When a command argument boundary is detected, push out the // current argument being built. This one seems like a good // candidate for a function call. -#define COMMAND_ARG_FINISH \ - do \ - { \ - if (curr_lexer->string_text.empty ()) \ - break; \ - \ - int retval = curr_lexer->handle_token (curr_lexer->string_text, \ - SQ_STRING); \ - \ - curr_lexer->string_text = ""; \ - curr_lexer->command_arg_paren_count = 0; \ - \ - yyless (0); \ - \ - return retval; \ - } \ - while (0) - -#define HANDLE_IDENTIFIER(pattern, get_set) \ - do \ - { \ - curr_lexer->lexer_debug (pattern); \ - \ - int tok = curr_lexer->previous_token_value (); \ - \ - if (curr_lexer->whitespace_is_significant () \ - && curr_lexer->space_follows_previous_token () \ - && ! (tok == '[' || tok == '{' \ - || curr_lexer->previous_token_is_binop ())) \ - { \ - yyless (0); \ - unput (','); \ - } \ - else \ - { \ - if (! curr_lexer->looking_at_decl_list \ - && curr_lexer->previous_token_may_be_command ()) \ - { \ - yyless (0); \ - curr_lexer->push_start_state (COMMAND_START); \ - } \ - else \ - { \ - if (get_set) \ - { \ - yyless (3); \ - curr_lexer->maybe_classdef_get_set_method = false; \ - } \ - \ - int id_tok = curr_lexer->handle_identifier (); \ - \ - if (id_tok >= 0) \ - return curr_lexer->count_token_internal (id_tok); \ - } \ - } \ - } \ - while (0) +#define COMMAND_ARG_FINISH \ + do \ + { \ + if (curr_lexer->string_text.empty ()) \ + break; \ + \ + int retval = curr_lexer->handle_token (curr_lexer->string_text, \ + SQ_STRING); \ + \ + curr_lexer->string_text = ""; \ + curr_lexer->command_arg_paren_count = 0; \ + \ + yyless (0); \ + \ + return retval; \ + } \ + while (0) + +#define HANDLE_IDENTIFIER(pattern, get_set) \ + do \ + { \ + curr_lexer->lexer_debug (pattern); \ + \ + int tok = curr_lexer->previous_token_value (); \ + \ + if (curr_lexer->whitespace_is_significant () \ + && curr_lexer->space_follows_previous_token () \ + && ! (tok == '[' || tok == '{' \ + || curr_lexer->previous_token_is_binop ())) \ + { \ + yyless (0); \ + unput (','); \ + } \ + else \ + { \ + if (! curr_lexer->looking_at_decl_list \ + && curr_lexer->previous_token_may_be_command ()) \ + { \ + yyless (0); \ + curr_lexer->push_start_state (COMMAND_START); \ + } \ + else \ + { \ + if (get_set) \ + { \ + yyless (3); \ + curr_lexer->maybe_classdef_get_set_method = false; \ + } \ + \ + int id_tok = curr_lexer->handle_identifier (); \ + \ + if (id_tok >= 0) \ + return curr_lexer->count_token_internal (id_tok); \ + } \ + } \ + } \ + while (0) static bool Vdisplay_tokens = false; diff -r dd992fd74fce -r e43d83253e28 liboctave/array/Array.cc --- a/liboctave/array/Array.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/Array.cc Mon Aug 01 12:40:18 2016 -0400 @@ -2466,47 +2466,62 @@ return m; } -#define NO_INSTANTIATE_ARRAY_SORT(T) \ - \ -template <> Array \ -Array::sort (int, sortmode) const { return *this; } \ - \ -template <> Array \ -Array::sort (Array &sidx, int, sortmode) const \ -{ sidx = Array (); return *this; } \ - \ -template <> sortmode \ -Array::is_sorted (sortmode) const \ -{ return UNSORTED; } \ - \ -Array::compare_fcn_type \ -safe_comparator (sortmode, const Array&, bool) \ -{ return 0; } \ - \ -template <> Array \ -Array::sort_rows_idx (sortmode) const \ -{ return Array (); } \ - \ -template <> sortmode \ -Array::is_sorted_rows (sortmode) const \ -{ return UNSORTED; } \ - \ -template <> octave_idx_type \ -Array::lookup (T const &, sortmode) const \ -{ return 0; } \ -template <> Array \ -Array::lookup (const Array&, sortmode) const \ -{ return Array (); } \ - \ -template <> octave_idx_type \ -Array::nnz (void) const\ -{ return 0; } \ -template <> Array \ -Array::find (octave_idx_type, bool) const\ -{ return Array (); } \ - \ -template <> Array \ -Array::nth_element (const idx_vector&, int) const { return Array (); } +#define NO_INSTANTIATE_ARRAY_SORT(T) \ + template <> Array \ + Array::sort (int, sortmode) const \ + { \ + return *this; \ + } \ + template <> Array \ + Array::sort (Array &sidx, int, sortmode) const \ + { \ + sidx = Array (); \ + return *this; \ + } \ + template <> sortmode \ + Array::is_sorted (sortmode) const \ + { \ + return UNSORTED; \ + } \ + Array::compare_fcn_type \ + safe_comparator (sortmode, const Array&, bool) \ + { \ + return 0; \ + } \ + template <> Array \ + Array::sort_rows_idx (sortmode) const \ + { \ + return Array (); \ + } \ + template <> sortmode \ + Array::is_sorted_rows (sortmode) const \ + { \ + return UNSORTED; \ + } \ + template <> octave_idx_type \ + Array::lookup (T const &, sortmode) const \ + { \ + return 0; \ + } \ + template <> Array \ + Array::lookup (const Array&, sortmode) const \ + { \ + return Array (); \ + } \ + template <> octave_idx_type \ + Array::nnz (void) const \ + { \ + return 0; \ + } \ + template <> Array \ + Array::find (octave_idx_type, bool) const \ + { \ + return Array (); \ + } \ + template <> Array \ + Array::nth_element (const idx_vector&, int) const { \ + return Array (); \ + } template Array @@ -2748,7 +2763,7 @@ T::__xXxXx__ (); } -#define INSTANTIATE_ARRAY(T, API) \ +#define INSTANTIATE_ARRAY(T, API) \ template <> void Array::instantiation_guard () { } \ template class API Array diff -r dd992fd74fce -r e43d83253e28 liboctave/array/CMatrix.cc --- a/liboctave/array/CMatrix.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/CMatrix.cc Mon Aug 01 12:40:18 2016 -0400 @@ -3706,8 +3706,8 @@ // FIXME: it would be nice to share code among the min/max functions below. -#define EMPTY_RETURN_CHECK(T) \ - if (nr == 0 || nc == 0) \ +#define EMPTY_RETURN_CHECK(T) \ + if (nr == 0 || nc == 0) \ return T (nr, nc); ComplexMatrix diff -r dd992fd74fce -r e43d83253e28 liboctave/array/CSparse.cc --- a/liboctave/array/CSparse.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/CSparse.cc Mon Aug 01 12:40:18 2016 -0400 @@ -7320,12 +7320,12 @@ SparseComplexMatrix SparseComplexMatrix::sumsq (int dim) const { -#define ROW_EXPR \ - Complex d = data (i); \ +#define ROW_EXPR \ + Complex d = data (i); \ tmp[ridx (i)] += d * conj (d) -#define COL_EXPR \ - Complex d = data (i); \ +#define COL_EXPR \ + Complex d = data (i); \ tmp[j] += d * conj (d) SPARSE_BASE_REDUCTION_OP (SparseComplexMatrix, Complex, ROW_EXPR, @@ -7579,8 +7579,8 @@ // FIXME: it would be nice to share code among the min/max functions below. -#define EMPTY_RETURN_CHECK(T) \ - if (nr == 0 || nc == 0) \ +#define EMPTY_RETURN_CHECK(T) \ + if (nr == 0 || nc == 0) \ return T (nr, nc); SparseComplexMatrix diff -r dd992fd74fce -r e43d83253e28 liboctave/array/MArray.cc --- a/liboctave/array/MArray.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/MArray.cc Mon Aug 01 12:40:18 2016 -0400 @@ -303,11 +303,11 @@ // Element by element MArray by scalar ops. -#define MARRAY_NDS_OP(OP, FN) \ - template \ - MArray \ - operator OP (const MArray& a, const T& s) \ - { \ +#define MARRAY_NDS_OP(OP, FN) \ + template \ + MArray \ + operator OP (const MArray& a, const T& s) \ + { \ return do_ms_binary_op (a, s, FN); \ } @@ -318,11 +318,11 @@ // Element by element scalar by MArray ops. -#define MARRAY_SND_OP(OP, FN) \ - template \ - MArray \ - operator OP (const T& s, const MArray& a) \ - { \ +#define MARRAY_SND_OP(OP, FN) \ + template \ + MArray \ + operator OP (const T& s, const MArray& a) \ + { \ return do_sm_binary_op (s, a, FN); \ } diff -r dd992fd74fce -r e43d83253e28 liboctave/array/MArray.h --- a/liboctave/array/MArray.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/MArray.h Mon Aug 01 12:40:18 2016 -0400 @@ -123,97 +123,141 @@ // Define all the MArray forwarding functions for return type R and // MArray element type T -#define MARRAY_FORWARD_DEFS(B, R, T) \ - inline R operator += (R& x, const T& y) \ - { return R (operator += (dynamic_cast&> (x), (y))); } \ - inline R operator -= (R& x, const T& y) \ - { return R (operator -= (dynamic_cast&> (x), (y))); } \ - inline R operator *= (R& x, const T& y) \ - { return R (operator *= (dynamic_cast&> (x), (y))); } \ - inline R operator /= (R& x, const T& y) \ - { return R (operator /= (dynamic_cast&> (x), (y))); } \ - inline R operator += (R& x, const R& y) \ - { return R (operator += (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R operator -= (R& x, const R& y) \ - { return R (operator -= (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R product_eq (R& x, const R& y) \ - { return R (product_eq (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R quotient_eq (R& x, const R& y) \ - { return R (quotient_eq (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R operator + (const R& x) \ - { return R (operator + (dynamic_cast&> (x))); } \ - inline R operator - (const R& x) \ - { return R (operator - (dynamic_cast&> (x))); } \ - inline R operator + (const R& x, const T& y) \ - { return R (operator + (dynamic_cast&> (x), (y))); } \ - inline R operator - (const R& x, const T& y) \ - { return R (operator - (dynamic_cast&> (x), (y))); } \ - inline R operator * (const R& x, const T& y) \ - { return R (operator * (dynamic_cast&> (x), (y))); } \ - inline R operator / (const R& x, const T& y) \ - { return R (operator / (dynamic_cast&> (x), (y))); } \ - inline R operator + (const T& x, const R& y) \ - { return R (operator + ( (x), dynamic_cast&> (y))); } \ - inline R operator - (const T& x, const R& y) \ - { return R (operator - ( (x), dynamic_cast&> (y))); } \ - inline R operator * (const T& x, const R& y) \ - { return R (operator * ( (x), dynamic_cast&> (y))); } \ - inline R operator / (const T& x, const R& y) \ - { return R (operator / ( (x), dynamic_cast&> (y))); } \ - inline R operator + (const R& x, const R& y) \ - { return R (operator + (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R operator - (const R& x, const R& y) \ - { return R (operator - (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R product (const R& x, const R& y) \ - { return R (product (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R quotient (const R& x, const R& y) \ - { return R (quotient (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } +#define MARRAY_FORWARD_DEFS(B, R, T) \ + inline R operator += (R& x, const T& y) \ + { \ + return R (operator += (dynamic_cast&> (x), (y))); \ + } \ + inline R operator -= (R& x, const T& y) \ + { \ + return R (operator -= (dynamic_cast&> (x), (y))); \ + } \ + inline R operator *= (R& x, const T& y) \ + { \ + return R (operator *= (dynamic_cast&> (x), (y))); \ + } \ + inline R operator /= (R& x, const T& y) \ + { \ + return R (operator /= (dynamic_cast&> (x), (y))); \ + } \ + inline R operator += (R& x, const R& y) \ + { \ + return R (operator += (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R operator -= (R& x, const R& y) \ + { \ + return R (operator -= (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R product_eq (R& x, const R& y) \ + { \ + return R (product_eq (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R quotient_eq (R& x, const R& y) \ + { \ + return R (quotient_eq (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R operator + (const R& x) \ + { \ + return R (operator + (dynamic_cast&> (x))); \ + } \ + inline R operator - (const R& x) \ + { \ + return R (operator - (dynamic_cast&> (x))); \ + } \ + inline R operator + (const R& x, const T& y) \ + { \ + return R (operator + (dynamic_cast&> (x), (y))); \ + } \ + inline R operator - (const R& x, const T& y) \ + { \ + return R (operator - (dynamic_cast&> (x), (y))); \ + } \ + inline R operator * (const R& x, const T& y) \ + { \ + return R (operator * (dynamic_cast&> (x), (y))); \ + } \ + inline R operator / (const R& x, const T& y) \ + { \ + return R (operator / (dynamic_cast&> (x), (y))); \ + } \ + inline R operator + (const T& x, const R& y) \ + { \ + return R (operator + ( (x), dynamic_cast&> (y))); \ + } \ + inline R operator - (const T& x, const R& y) \ + { \ + return R (operator - ( (x), dynamic_cast&> (y))); \ + } \ + inline R operator * (const T& x, const R& y) \ + { \ + return R (operator * ( (x), dynamic_cast&> (y))); \ + } \ + inline R operator / (const T& x, const R& y) \ + { \ + return R (operator / ( (x), dynamic_cast&> (y))); \ + } \ + inline R operator + (const R& x, const R& y) \ + { \ + return R (operator + (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R operator - (const R& x, const R& y) \ + { \ + return R (operator - (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R product (const R& x, const R& y) \ + { \ + return R (product (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R quotient (const R& x, const R& y) \ + { \ + return R (quotient (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } // Instantiate all the MArray friends for MArray element type T. -#define INSTANTIATE_MARRAY_FRIENDS(T, API) \ - template API MArray& operator += (MArray&, const T&); \ - template API MArray& operator -= (MArray&, const T&); \ - template API MArray& operator *= (MArray&, const T&); \ - template API MArray& operator /= (MArray&, const T&); \ - template API MArray& operator += (MArray&, const MArray&); \ - template API MArray& operator -= (MArray&, const MArray&); \ - template API MArray& product_eq (MArray&, const MArray&); \ - template API MArray& quotient_eq (MArray&, const MArray&); \ - template API MArray operator + (const MArray&); \ - template API MArray operator - (const MArray&); \ - template API MArray operator + (const MArray&, const T&); \ - template API MArray operator - (const MArray&, const T&); \ - template API MArray operator * (const MArray&, const T&); \ - template API MArray operator / (const MArray&, const T&); \ - template API MArray operator + (const T&, const MArray&); \ - template API MArray operator - (const T&, const MArray&); \ - template API MArray operator * (const T&, const MArray&); \ - template API MArray operator / (const T&, const MArray&); \ +#define INSTANTIATE_MARRAY_FRIENDS(T, API) \ + template API MArray& operator += (MArray&, const T&); \ + template API MArray& operator -= (MArray&, const T&); \ + template API MArray& operator *= (MArray&, const T&); \ + template API MArray& operator /= (MArray&, const T&); \ + template API MArray& operator += (MArray&, const MArray&); \ + template API MArray& operator -= (MArray&, const MArray&); \ + template API MArray& product_eq (MArray&, const MArray&); \ + template API MArray& quotient_eq (MArray&, const MArray&); \ + template API MArray operator + (const MArray&); \ + template API MArray operator - (const MArray&); \ + template API MArray operator + (const MArray&, const T&); \ + template API MArray operator - (const MArray&, const T&); \ + template API MArray operator * (const MArray&, const T&); \ + template API MArray operator / (const MArray&, const T&); \ + template API MArray operator + (const T&, const MArray&); \ + template API MArray operator - (const T&, const MArray&); \ + template API MArray operator * (const T&, const MArray&); \ + template API MArray operator / (const T&, const MArray&); \ template API MArray operator + (const MArray&, const MArray&); \ template API MArray operator - (const MArray&, const MArray&); \ template API MArray quotient (const MArray&, const MArray&); \ template API MArray product (const MArray&, const MArray&); // Instantiate all the MDiagArray2 friends for MDiagArray2 element type T. -#define INSTANTIATE_MDIAGARRAY2_FRIENDS(T, API) \ - template API MDiagArray2 operator + (const MDiagArray2&); \ - template API MDiagArray2 operator - (const MDiagArray2&); \ +#define INSTANTIATE_MDIAGARRAY2_FRIENDS(T, API) \ + template API MDiagArray2 operator + (const MDiagArray2&); \ + template API MDiagArray2 operator - (const MDiagArray2&); \ template API MDiagArray2 operator * (const MDiagArray2&, const T&); \ template API MDiagArray2 operator / (const MDiagArray2&, const T&); \ template API MDiagArray2 operator * (const T&, const MDiagArray2&); \ - template API MDiagArray2 operator + (const MDiagArray2&, \ - const MDiagArray2&); \ - template API MDiagArray2 operator - (const MDiagArray2&, \ - const MDiagArray2&); \ - template API MDiagArray2 product (const MDiagArray2&, \ + template API MDiagArray2 operator + (const MDiagArray2&, \ + const MDiagArray2&); \ + template API MDiagArray2 operator - (const MDiagArray2&, \ + const MDiagArray2&); \ + template API MDiagArray2 product (const MDiagArray2&, \ const MDiagArray2&); #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/array/MDiagArray2.cc --- a/liboctave/array/MDiagArray2.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/MDiagArray2.cc Mon Aug 01 12:40:18 2016 -0400 @@ -52,11 +52,11 @@ // Element by element MDiagArray2 by scalar ops. -#define MARRAY_DAS_OP(OP, FN) \ - template \ - MDiagArray2 \ - operator OP (const MDiagArray2& a, const T& s) \ - { \ +#define MARRAY_DAS_OP(OP, FN) \ + template \ + MDiagArray2 \ + operator OP (const MDiagArray2& a, const T& s) \ + { \ return MDiagArray2 (do_ms_binary_op (a, s, FN), a.d1, a.d2); \ } @@ -75,14 +75,14 @@ // Element by element MDiagArray2 by MDiagArray2 ops. -#define MARRAY_DADA_OP(FCN, OP, FN) \ - template \ - MDiagArray2 \ - FCN (const MDiagArray2& a, const MDiagArray2& b) \ - { \ - if (a.d1 != b.d1 || a.d2 != b.d2) \ - err_nonconformant (#FCN, a.d1, a.d2, b.d1, b.d2); \ - \ +#define MARRAY_DADA_OP(FCN, OP, FN) \ + template \ + MDiagArray2 \ + FCN (const MDiagArray2& a, const MDiagArray2& b) \ + { \ + if (a.d1 != b.d1 || a.d2 != b.d2) \ + err_nonconformant (#FCN, a.d1, a.d2, b.d1, b.d2); \ + \ return MDiagArray2 (do_mm_binary_op (a, b, FN, FN, FN, #FCN), a.d1, a.d2); \ } diff -r dd992fd74fce -r e43d83253e28 liboctave/array/MDiagArray2.h --- a/liboctave/array/MDiagArray2.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/MDiagArray2.h Mon Aug 01 12:40:18 2016 -0400 @@ -129,25 +129,49 @@ }; -#define MDIAGARRAY2_FORWARD_DEFS(B, R, T) \ - inline R operator + (const R& x) \ - { return R (operator + (dynamic_cast&> (x))); } \ - inline R operator - (const R& x) \ - { return R (operator - (dynamic_cast&> (x))); } \ - inline R operator * (const R& x, const T& y) \ - { return R (operator * (dynamic_cast&> (x), (y))); } \ - inline R operator / (const R& x, const T& y) \ - { return R (operator / (dynamic_cast&> (x), (y))); } \ - inline R operator * (const T& x, const R& y) \ - { return R (operator * ( (x), dynamic_cast&> (y))); } \ - inline R operator + (const R& x, const R& y) \ - { return R (operator + (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R operator - (const R& x, const R& y) \ - { return R (operator - (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } \ - inline R product (const R& x, const R& y) \ - { return R (product (dynamic_cast&> (x), \ - dynamic_cast&> (y))); } +#define MDIAGARRAY2_FORWARD_DEFS(B, R, T) \ + inline R \ + operator + (const R& x) \ + { \ + return R (operator + (dynamic_cast&> (x))); \ + } \ + inline R \ + operator - (const R& x) \ + { \ + return R (operator - (dynamic_cast&> (x))); \ + } \ + inline R \ + operator * (const R& x, const T& y) \ + { \ + return R (operator * (dynamic_cast&> (x), (y))); \ + } \ + inline R \ + operator / (const R& x, const T& y) \ + { \ + return R (operator / (dynamic_cast&> (x), (y))); \ + } \ + inline R \ + operator * (const T& x, const R& y) \ + { \ + return R (operator * ( (x), dynamic_cast&> (y))); \ + } \ + inline R \ + operator + (const R& x, const R& y) \ + { \ + return R (operator + (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R \ + operator - (const R& x, const R& y) \ + { \ + return R (operator - (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } \ + inline R \ + product (const R& x, const R& y) \ + { \ + return R (product (dynamic_cast&> (x), \ + dynamic_cast&> (y))); \ + } #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/array/MSparse.h --- a/liboctave/array/MSparse.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/MSparse.h Mon Aug 01 12:40:18 2016 -0400 @@ -123,61 +123,61 @@ #include "MSparse.cc" // A macro that can be used to declare and instantiate OP= operators. -#define SPARSE_OP_ASSIGN_DECL(T, OP, API) \ - template API MSparse& \ +#define SPARSE_OP_ASSIGN_DECL(T, OP, API) \ + template API MSparse& \ operator OP (MSparse&, const MSparse&) // A macro that can be used to declare and instantiate unary operators. -#define SPARSE_UNOP_DECL(T, OP, API) \ - template API MSparse \ +#define SPARSE_UNOP_DECL(T, OP, API) \ + template API MSparse \ operator OP (const MSparse&) // A macro that can be used to declare and instantiate binary operators. -#define SPARSE_BINOP_DECL(A_T, T, F, API, X_T, Y_T) \ - template API A_T \ +#define SPARSE_BINOP_DECL(A_T, T, F, API, X_T, Y_T) \ + template API A_T \ F (const X_T&, const Y_T&) // A function that can be used to forward OP= operations from derived // classes back to us. -#define SPARSE_OP_ASSIGN_FWD_FCN(R, F, T, C_X, X_T, C_Y, Y_T) \ - inline R \ - F (X_T& x, const Y_T& y) \ - { \ - return R (F (C_X (x), C_Y (y))); \ +#define SPARSE_OP_ASSIGN_FWD_FCN(R, F, T, C_X, X_T, C_Y, Y_T) \ + inline R \ + F (X_T& x, const Y_T& y) \ + { \ + return R (F (C_X (x), C_Y (y))); \ } // A function that can be used to forward unary operations from derived // classes back to us. -#define SPARSE_UNOP_FWD_FCN(R, F, T, C_X, X_T) \ - inline R \ - F (const X_T& x) \ - { \ - return R (F (C_X (x))); \ +#define SPARSE_UNOP_FWD_FCN(R, F, T, C_X, X_T) \ + inline R \ + F (const X_T& x) \ + { \ + return R (F (C_X (x))); \ } // A function that can be used to forward binary operations from derived // classes back to us. -#define SPARSE_BINOP_FWD_FCN(R, F, T, C_X, X_T, C_Y, Y_T) \ - inline R \ - F (const X_T& x, const Y_T& y) \ - { \ - return R (F (C_X (x), C_Y (y))); \ +#define SPARSE_BINOP_FWD_FCN(R, F, T, C_X, X_T, C_Y, Y_T) \ + inline R \ + F (const X_T& x, const Y_T& y) \ + { \ + return R (F (C_X (x), C_Y (y))); \ } // Instantiate all the MSparse friends for MSparse element type T. -#define INSTANTIATE_SPARSE_FRIENDS(T, API) \ - SPARSE_OP_ASSIGN_DECL (T, +=, API); \ - SPARSE_OP_ASSIGN_DECL (T, -=, API); \ - SPARSE_UNOP_DECL (T, +, API); \ - SPARSE_UNOP_DECL (T, -, API); \ - SPARSE_BINOP_DECL (MArray, T, operator +, API, MSparse, T); \ - SPARSE_BINOP_DECL (MArray, T, operator -, API, MSparse, T); \ - SPARSE_BINOP_DECL (MSparse, T, operator *, API, MSparse, T); \ - SPARSE_BINOP_DECL (MSparse, T, operator /, API, MSparse, T); \ - SPARSE_BINOP_DECL (MArray, T, operator +, API, T, MSparse); \ - SPARSE_BINOP_DECL (MArray, T, operator -, API, T, MSparse); \ - SPARSE_BINOP_DECL (MSparse, T, operator *, API, T, MSparse); \ - SPARSE_BINOP_DECL (MSparse, T, operator /, API, T, MSparse); \ +#define INSTANTIATE_SPARSE_FRIENDS(T, API) \ + SPARSE_OP_ASSIGN_DECL (T, +=, API); \ + SPARSE_OP_ASSIGN_DECL (T, -=, API); \ + SPARSE_UNOP_DECL (T, +, API); \ + SPARSE_UNOP_DECL (T, -, API); \ + SPARSE_BINOP_DECL (MArray, T, operator +, API, MSparse, T); \ + SPARSE_BINOP_DECL (MArray, T, operator -, API, MSparse, T); \ + SPARSE_BINOP_DECL (MSparse, T, operator *, API, MSparse, T); \ + SPARSE_BINOP_DECL (MSparse, T, operator /, API, MSparse, T); \ + SPARSE_BINOP_DECL (MArray, T, operator +, API, T, MSparse); \ + SPARSE_BINOP_DECL (MArray, T, operator -, API, T, MSparse); \ + SPARSE_BINOP_DECL (MSparse, T, operator *, API, T, MSparse); \ + SPARSE_BINOP_DECL (MSparse, T, operator /, API, T, MSparse); \ SPARSE_BINOP_DECL (MSparse, T, operator +, API, MSparse, MSparse); \ SPARSE_BINOP_DECL (MSparse, T, operator -, API, MSparse, MSparse); \ SPARSE_BINOP_DECL (MSparse, T, quotient, API, MSparse, MSparse); \ @@ -185,13 +185,13 @@ // Define all the MSparse forwarding functions for return type R and // MSparse element type T -#define SPARSE_FORWARD_DEFS(B, R, F, T) \ - SPARSE_OP_ASSIGN_FWD_FCN \ - (R, operator +=, T, dynamic_cast&>, R, dynamic_cast&>, R) \ - SPARSE_OP_ASSIGN_FWD_FCN \ - (R, operator -=, T, dynamic_cast&>, R, dynamic_cast&>, R) \ - SPARSE_UNOP_FWD_FCN (R, operator +, T, dynamic_cast&>, R) \ - SPARSE_UNOP_FWD_FCN (R, operator -, T, dynamic_cast&>, R) \ +#define SPARSE_FORWARD_DEFS(B, R, F, T) \ + SPARSE_OP_ASSIGN_FWD_FCN (R, operator +=, T, dynamic_cast&>, \ + R, dynamic_cast&>, R) \ + SPARSE_OP_ASSIGN_FWD_FCN (R, operator -=, T, dynamic_cast&>, \ + R, dynamic_cast&>, R) \ + SPARSE_UNOP_FWD_FCN (R, operator +, T, dynamic_cast&>, R) \ + SPARSE_UNOP_FWD_FCN (R, operator -, T, dynamic_cast&>, R) \ SPARSE_BINOP_FWD_FCN (F, operator +, T, dynamic_cast&>, R, , T) \ SPARSE_BINOP_FWD_FCN (F, operator -, T, dynamic_cast&>, R, , T) \ SPARSE_BINOP_FWD_FCN (R, operator *, T, dynamic_cast&>, R, , T) \ @@ -200,17 +200,13 @@ SPARSE_BINOP_FWD_FCN (F, operator -, T, , T, dynamic_cast&>, R) \ SPARSE_BINOP_FWD_FCN (R, operator *, T, , T, dynamic_cast&>, R) \ SPARSE_BINOP_FWD_FCN (R, operator /, T, , T, dynamic_cast&>, R) \ - SPARSE_BINOP_FWD_FCN \ - (R, operator +, T, dynamic_cast&>, R, \ - dynamic_cast&>, R) \ - SPARSE_BINOP_FWD_FCN \ - (R, operator -, T, dynamic_cast&>, R, \ - dynamic_cast&>, R) \ - SPARSE_BINOP_FWD_FCN \ - (R, product, T, dynamic_cast&>, R, \ - dynamic_cast&>, R) \ - SPARSE_BINOP_FWD_FCN \ - (R, quotient, T, dynamic_cast&>, R, \ - dynamic_cast&>, R) + SPARSE_BINOP_FWD_FCN (R, operator +, T, dynamic_cast&>, \ + R, dynamic_cast&>, R) \ + SPARSE_BINOP_FWD_FCN (R, operator -, T, dynamic_cast&>, \ + R, dynamic_cast&>, R) \ + SPARSE_BINOP_FWD_FCN (R, product, T, dynamic_cast&>, \ + R, dynamic_cast&>, R) \ + SPARSE_BINOP_FWD_FCN (R, quotient, T, dynamic_cast&>, \ + R, dynamic_cast&>, R) #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/array/Sparse.cc --- a/liboctave/array/Sparse.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/Sparse.cc Mon Aug 01 12:40:18 2016 -0400 @@ -3011,7 +3011,8 @@ << prefix << "rep->count: " << rep->count << "\n"; } -#define INSTANTIATE_SPARSE(T, API) \ - template class API Sparse; \ - template std::istream& read_sparse_matrix \ - (std::istream& is, Sparse& a, T (*read_fcn) (std::istream&)); +#define INSTANTIATE_SPARSE(T, API) \ + template class API Sparse; \ + template std::istream& \ + read_sparse_matrix (std::istream& is, Sparse& a, \ + T (*read_fcn) (std::istream&)); diff -r dd992fd74fce -r e43d83253e28 liboctave/array/dMatrix.cc --- a/liboctave/array/dMatrix.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/dMatrix.cc Mon Aug 01 12:40:18 2016 -0400 @@ -3117,8 +3117,8 @@ // FIXME: it would be nice to share code among the min/max functions below. -#define EMPTY_RETURN_CHECK(T) \ - if (nr == 0 || nc == 0) \ +#define EMPTY_RETURN_CHECK(T) \ + if (nr == 0 || nc == 0) \ return T (nr, nc); Matrix diff -r dd992fd74fce -r e43d83253e28 liboctave/array/dSparse.cc --- a/liboctave/array/dSparse.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/dSparse.cc Mon Aug 01 12:40:18 2016 -0400 @@ -7477,12 +7477,12 @@ SparseMatrix SparseMatrix::sumsq (int dim) const { -#define ROW_EXPR \ - double d = data (i); \ +#define ROW_EXPR \ + double d = data (i); \ tmp[ridx (i)] += d * d -#define COL_EXPR \ - double d = data (i); \ +#define COL_EXPR \ + double d = data (i); \ tmp[j] += d * d SPARSE_BASE_REDUCTION_OP (SparseMatrix, double, ROW_EXPR, COL_EXPR, @@ -7656,8 +7656,8 @@ // FIXME: it would be nice to share code among the min/max functions below. -#define EMPTY_RETURN_CHECK(T) \ - if (nr == 0 || nc == 0) \ +#define EMPTY_RETURN_CHECK(T) \ + if (nr == 0 || nc == 0) \ return T (nr, nc); SparseMatrix diff -r dd992fd74fce -r e43d83253e28 liboctave/array/fCMatrix.cc --- a/liboctave/array/fCMatrix.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/fCMatrix.cc Mon Aug 01 12:40:18 2016 -0400 @@ -3735,8 +3735,8 @@ // FIXME: it would be nice to share code among the min/max // functions below. -#define EMPTY_RETURN_CHECK(T) \ - if (nr == 0 || nc == 0) \ +#define EMPTY_RETURN_CHECK(T) \ + if (nr == 0 || nc == 0) \ return T (nr, nc); FloatComplexMatrix diff -r dd992fd74fce -r e43d83253e28 liboctave/array/fMatrix.cc --- a/liboctave/array/fMatrix.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/fMatrix.cc Mon Aug 01 12:40:18 2016 -0400 @@ -3128,8 +3128,8 @@ // FIXME: it would be nice to share code among the min/max functions below. -#define EMPTY_RETURN_CHECK(T) \ - if (nr == 0 || nc == 0) \ +#define EMPTY_RETURN_CHECK(T) \ + if (nr == 0 || nc == 0) \ return T (nr, nc); FloatMatrix diff -r dd992fd74fce -r e43d83253e28 liboctave/array/idx-vector.cc --- a/liboctave/array/idx-vector.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/array/idx-vector.cc Mon Aug 01 12:40:18 2016 -0400 @@ -1304,8 +1304,8 @@ } // Instantiate the octave_int constructors we want. -#define INSTANTIATE_SCALAR_VECTOR_REP_CONST(T) \ - template OCTAVE_API idx_vector::idx_scalar_rep::idx_scalar_rep (T); \ +#define INSTANTIATE_SCALAR_VECTOR_REP_CONST(T) \ + template OCTAVE_API idx_vector::idx_scalar_rep::idx_scalar_rep (T); \ template OCTAVE_API idx_vector::idx_vector_rep::idx_vector_rep (const Array&); INSTANTIATE_SCALAR_VECTOR_REP_CONST (float) diff -r dd992fd74fce -r e43d83253e28 liboctave/cruft/misc/f77-fcn.h --- a/liboctave/cruft/misc/f77-fcn.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/cruft/misc/f77-fcn.h Mon Aug 01 12:40:18 2016 -0400 @@ -37,9 +37,9 @@ /* How to print an error for the F77_XFCN macro. */ -#define F77_XFCN_ERROR(f, F) \ - (*current_liboctave_error_handler) \ - ("exception encountered in Fortran subroutine %s", \ +#define F77_XFCN_ERROR(f, F) \ + (*current_liboctave_error_handler) \ + ("exception encountered in Fortran subroutine %s", \ STRINGIZE (F77_FUNC (f, F))) /* This can be used to call a Fortran subroutine that might call @@ -49,30 +49,30 @@ subroutine is called. In that case, we resotre the context and go to the top level. */ -#define F77_XFCN(f, F, args) \ - do \ - { \ - octave_jmp_buf saved_context; \ +#define F77_XFCN(f, F, args) \ + do \ + { \ + octave_jmp_buf saved_context; \ sig_atomic_t saved_octave_interrupt_immediately = octave_interrupt_immediately; \ - f77_exception_encountered = 0; \ - octave_save_current_context (saved_context); \ - if (octave_set_current_context) \ - { \ + f77_exception_encountered = 0; \ + octave_save_current_context (saved_context); \ + if (octave_set_current_context) \ + { \ octave_interrupt_immediately = saved_octave_interrupt_immediately; \ - octave_restore_current_context (saved_context); \ - if (f77_exception_encountered) \ - F77_XFCN_ERROR (f, F); \ - else \ - octave_rethrow_exception (); \ - } \ - else \ - { \ - octave_interrupt_immediately++; \ - F77_FUNC (f, F) args; \ - octave_interrupt_immediately--; \ - octave_restore_current_context (saved_context); \ - } \ - } \ + octave_restore_current_context (saved_context); \ + if (f77_exception_encountered) \ + F77_XFCN_ERROR (f, F); \ + else \ + octave_rethrow_exception (); \ + } \ + else \ + { \ + octave_interrupt_immediately++; \ + F77_FUNC (f, F) args; \ + octave_interrupt_immediately--; \ + octave_restore_current_context (saved_context); \ + } \ + } \ while (0) /* So we can check to see if an exception has occurred. */ @@ -146,11 +146,11 @@ and the length in a single argument. */ #define F77_CHAR_ARG(x) octave_make_cray_ftn_ch_dsc (x, strlen (x)) -#define F77_CONST_CHAR_ARG(x) \ +#define F77_CONST_CHAR_ARG(x) \ octave_make_cray_const_ftn_ch_dsc (x, strlen (x)) #define F77_CHAR_ARG2(x, l) octave_make_cray_ftn_ch_dsc (x, l) #define F77_CONST_CHAR_ARG2(x, l) octave_make_cray_const_ftn_ch_dsc (x, l) -#define F77_CXX_STRING_ARG(x) \ +#define F77_CXX_STRING_ARG(x) \ octave_make_cray_const_ftn_ch_dsc (x.c_str (), x.length ()) #define F77_CHAR_ARG_LEN(l) #define F77_CHAR_ARG_LEN_TYPE @@ -336,16 +336,16 @@ #define F77_INT4 int32_t #define F77_LOGICAL octave_idx_type -#define F77_CMPLX_ARG(x) \ +#define F77_CMPLX_ARG(x) \ reinterpret_cast (x) -#define F77_CONST_CMPLX_ARG(x) \ +#define F77_CONST_CMPLX_ARG(x) \ reinterpret_cast (x) -#define F77_DBLE_CMPLX_ARG(x) \ +#define F77_DBLE_CMPLX_ARG(x) \ reinterpret_cast (x) -#define F77_CONST_DBLE_CMPLX_ARG(x) \ +#define F77_CONST_DBLE_CMPLX_ARG(x) \ reinterpret_cast (x) /* Build a C string local variable CS from the Fortran string parameter S @@ -353,10 +353,10 @@ The string will be cleaned up at the end of the current block. Needs to include and . */ -#define F77_CSTRING(s, len, cs) \ - OCTAVE_LOCAL_BUFFER (char, cs, F77_CHAR_ARG_LEN_USE (s, len) + 1); \ - memcpy (cs, F77_CHAR_ARG_USE (s), F77_CHAR_ARG_LEN_USE (s, len)); \ - cs[F77_CHAR_ARG_LEN_USE(s, len)] = '\0' +#define F77_CSTRING(s, len, cs) \ + OCTAVE_LOCAL_BUFFER (char, cs, F77_CHAR_ARG_LEN_USE (s, len) + 1); \ + memcpy (cs, F77_CHAR_ARG_USE (s), F77_CHAR_ARG_LEN_USE (s, len)); \ + cs[F77_CHAR_ARG_LEN_USE(s, len)] = '\0' OCTAVE_NORETURN OCTAVE_API extern F77_RET_T diff -r dd992fd74fce -r e43d83253e28 liboctave/cruft/misc/quit.h --- a/liboctave/cruft/misc/quit.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/cruft/misc/quit.h Mon Aug 01 12:40:18 2016 -0400 @@ -197,15 +197,15 @@ #else -#define OCTAVE_QUIT \ - do \ - { \ - if (octave_signal_caught) \ - { \ - octave_signal_caught = 0; \ - octave_handle_signal (); \ - } \ - } \ +#define OCTAVE_QUIT \ + do \ + { \ + if (octave_signal_caught) \ + { \ + octave_signal_caught = 0; \ + octave_handle_signal (); \ + } \ + } \ while (0) #endif @@ -225,72 +225,72 @@ so that you can perform extra clean up operations before throwing the interrupt exception. */ -#define BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE \ - BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_1; \ - octave_rethrow_exception (); \ +#define BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE \ + BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_1; \ + octave_rethrow_exception (); \ BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_2 -#define BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_1 \ - do \ - { \ - octave_jmp_buf saved_context; \ - \ - octave_save_current_context (saved_context); \ - \ - if (octave_set_current_context) \ - { \ +#define BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_1 \ + do \ + { \ + octave_jmp_buf saved_context; \ + \ + octave_save_current_context (saved_context); \ + \ + if (octave_set_current_context) \ + { \ octave_restore_current_context (saved_context) -#define BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_2 \ - } \ - else \ - { \ - octave_interrupt_immediately++ +#define BEGIN_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE_2 \ + } \ + else \ + { \ + octave_interrupt_immediately++ -#define END_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE \ - octave_interrupt_immediately--; \ - octave_restore_current_context (saved_context); \ - } \ - } \ +#define END_INTERRUPT_IMMEDIATELY_IN_FOREIGN_CODE \ + octave_interrupt_immediately--; \ + octave_restore_current_context (saved_context); \ +} \ +} \ while (0) #if defined (__cplusplus) -#define BEGIN_INTERRUPT_WITH_EXCEPTIONS \ +#define BEGIN_INTERRUPT_WITH_EXCEPTIONS \ sig_atomic_t saved_octave_interrupt_immediately = octave_interrupt_immediately; \ - \ - try \ - { \ + \ + try \ + { \ octave_interrupt_immediately = 0; -#define END_INTERRUPT_WITH_EXCEPTIONS \ - } \ - catch (const octave_interrupt_exception&) \ - { \ +#define END_INTERRUPT_WITH_EXCEPTIONS \ + } \ + catch (const octave_interrupt_exception&) \ + { \ octave_interrupt_immediately = saved_octave_interrupt_immediately; \ - octave_jump_to_enclosing_context (); \ - } \ - catch (const octave_execution_exception&) \ - { \ + octave_jump_to_enclosing_context (); \ + } \ + catch (const octave_execution_exception&) \ + { \ octave_interrupt_immediately = saved_octave_interrupt_immediately; \ - octave_exception_state = octave_exec_exception; \ - octave_jump_to_enclosing_context (); \ - } \ - catch (const std::bad_alloc&) \ - { \ + octave_exception_state = octave_exec_exception; \ + octave_jump_to_enclosing_context (); \ + } \ + catch (const std::bad_alloc&) \ + { \ octave_interrupt_immediately = saved_octave_interrupt_immediately; \ - octave_exception_state = octave_alloc_exception; \ - octave_jump_to_enclosing_context (); \ - } \ - catch (const octave_exit_exception& ex) \ - { \ + octave_exception_state = octave_alloc_exception; \ + octave_jump_to_enclosing_context (); \ + } \ + catch (const octave_exit_exception& ex) \ + { \ octave_interrupt_immediately = saved_octave_interrupt_immediately; \ - octave_exception_state = octave_quit_exception; \ - octave_exit_exception_status = ex.exit_status (); \ - octave_exit_exception_safe_to_return = ex.safe_to_return (); \ - octave_jump_to_enclosing_context (); \ - } \ - \ + octave_exception_state = octave_quit_exception; \ + octave_exit_exception_status = ex.exit_status (); \ + octave_exit_exception_safe_to_return = ex.safe_to_return (); \ + octave_jump_to_enclosing_context (); \ + } \ + \ octave_interrupt_immediately = saved_octave_interrupt_immediately #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/bsxfun-decl.h --- a/liboctave/numeric/bsxfun-decl.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/bsxfun-decl.h Mon Aug 01 12:40:18 2016 -0400 @@ -26,36 +26,36 @@ #include "octave-config.h" -#define BSXFUN_OP_DECL(OP, ARRAY, API) \ -extern API ARRAY bsxfun_ ## OP (const ARRAY&, const ARRAY&); +#define BSXFUN_OP_DECL(OP, ARRAY, API) \ + extern API ARRAY bsxfun_ ## OP (const ARRAY&, const ARRAY&); -#define BSXFUN_OP2_DECL(OP, ARRAY, ARRAY1, ARRAY2, API) \ -extern API ARRAY bsxfun_ ## OP (const ARRAY1&, const ARRAY2&); +#define BSXFUN_OP2_DECL(OP, ARRAY, ARRAY1, ARRAY2, API) \ + extern API ARRAY bsxfun_ ## OP (const ARRAY1&, const ARRAY2&); -#define BSXFUN_REL_DECL(OP, ARRAY, API) \ -extern API boolNDArray bsxfun_ ## OP (const ARRAY&, const ARRAY&); +#define BSXFUN_REL_DECL(OP, ARRAY, API) \ + extern API boolNDArray bsxfun_ ## OP (const ARRAY&, const ARRAY&); -#define BSXFUN_STDOP_DECLS(ARRAY, API) \ - BSXFUN_OP_DECL (add, ARRAY, API) \ - BSXFUN_OP_DECL (sub, ARRAY, API) \ - BSXFUN_OP_DECL (mul, ARRAY, API) \ - BSXFUN_OP_DECL (div, ARRAY, API) \ - BSXFUN_OP_DECL (pow, ARRAY, API) \ - BSXFUN_OP_DECL (min, ARRAY, API) \ +#define BSXFUN_STDOP_DECLS(ARRAY, API) \ + BSXFUN_OP_DECL (add, ARRAY, API) \ + BSXFUN_OP_DECL (sub, ARRAY, API) \ + BSXFUN_OP_DECL (mul, ARRAY, API) \ + BSXFUN_OP_DECL (div, ARRAY, API) \ + BSXFUN_OP_DECL (pow, ARRAY, API) \ + BSXFUN_OP_DECL (min, ARRAY, API) \ BSXFUN_OP_DECL (max, ARRAY, API) -#define BSXFUN_MIXED_INT_DECLS(INT_TYPE, API) \ +#define BSXFUN_MIXED_INT_DECLS(INT_TYPE, API) \ BSXFUN_OP2_DECL (pow, INT_TYPE, INT_TYPE, NDArray, API) \ BSXFUN_OP2_DECL (pow, INT_TYPE, INT_TYPE, FloatNDArray, API) \ BSXFUN_OP2_DECL (pow, INT_TYPE, NDArray, INT_TYPE, API) \ BSXFUN_OP2_DECL (pow, INT_TYPE, FloatNDArray, INT_TYPE, API) -#define BSXFUN_STDREL_DECLS(ARRAY, API) \ - BSXFUN_REL_DECL (eq, ARRAY, API) \ - BSXFUN_REL_DECL (ne, ARRAY, API) \ - BSXFUN_REL_DECL (lt, ARRAY, API) \ - BSXFUN_REL_DECL (le, ARRAY, API) \ - BSXFUN_REL_DECL (gt, ARRAY, API) \ +#define BSXFUN_STDREL_DECLS(ARRAY, API) \ + BSXFUN_REL_DECL (eq, ARRAY, API) \ + BSXFUN_REL_DECL (ne, ARRAY, API) \ + BSXFUN_REL_DECL (lt, ARRAY, API) \ + BSXFUN_REL_DECL (le, ARRAY, API) \ + BSXFUN_REL_DECL (gt, ARRAY, API) \ BSXFUN_REL_DECL (ge, ARRAY, API) #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/bsxfun-defs.cc --- a/liboctave/numeric/bsxfun-defs.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/bsxfun-defs.cc Mon Aug 01 12:40:18 2016 -0400 @@ -210,50 +210,50 @@ } } -#define BSXFUN_OP_DEF(OP, ARRAY) \ -ARRAY bsxfun_ ## OP (const ARRAY& x, const ARRAY& y) +#define BSXFUN_OP_DEF(OP, ARRAY) \ + ARRAY bsxfun_ ## OP (const ARRAY& x, const ARRAY& y) -#define BSXFUN_OP2_DEF(OP, ARRAY, ARRAY1, ARRAY2) \ -ARRAY bsxfun_ ## OP (const ARRAY1& x, const ARRAY2& y) +#define BSXFUN_OP2_DEF(OP, ARRAY, ARRAY1, ARRAY2) \ + ARRAY bsxfun_ ## OP (const ARRAY1& x, const ARRAY2& y) -#define BSXFUN_REL_DEF(OP, ARRAY) \ -boolNDArray bsxfun_ ## OP (const ARRAY& x, const ARRAY& y) +#define BSXFUN_REL_DEF(OP, ARRAY) \ + boolNDArray bsxfun_ ## OP (const ARRAY& x, const ARRAY& y) -#define BSXFUN_OP_DEF_MXLOOP(OP, ARRAY, LOOP) \ - BSXFUN_OP_DEF(OP, ARRAY) \ +#define BSXFUN_OP_DEF_MXLOOP(OP, ARRAY, LOOP) \ + BSXFUN_OP_DEF(OP, ARRAY) \ { return do_bsxfun_op \ - (x, y, LOOP, LOOP, LOOP); } + (x, y, LOOP, LOOP, LOOP); } -#define BSXFUN_OP2_DEF_MXLOOP(OP, ARRAY, ARRAY1, ARRAY2, LOOP) \ - BSXFUN_OP2_DEF(OP, ARRAY, ARRAY1, ARRAY2) \ +#define BSXFUN_OP2_DEF_MXLOOP(OP, ARRAY, ARRAY1, ARRAY2, LOOP) \ + BSXFUN_OP2_DEF(OP, ARRAY, ARRAY1, ARRAY2) \ { return do_bsxfun_op \ - (x, y, LOOP, LOOP, LOOP); } + (x, y, LOOP, LOOP, LOOP); } -#define BSXFUN_REL_DEF_MXLOOP(OP, ARRAY, LOOP) \ - BSXFUN_REL_DEF(OP, ARRAY) \ +#define BSXFUN_REL_DEF_MXLOOP(OP, ARRAY, LOOP) \ + BSXFUN_REL_DEF(OP, ARRAY) \ { return do_bsxfun_op \ - (x, y, LOOP, LOOP, LOOP); } + (x, y, LOOP, LOOP, LOOP); } -#define BSXFUN_STDOP_DEFS_MXLOOP(ARRAY) \ - BSXFUN_OP_DEF_MXLOOP (add, ARRAY, mx_inline_add) \ - BSXFUN_OP_DEF_MXLOOP (sub, ARRAY, mx_inline_sub) \ - BSXFUN_OP_DEF_MXLOOP (mul, ARRAY, mx_inline_mul) \ - BSXFUN_OP_DEF_MXLOOP (div, ARRAY, mx_inline_div) \ - BSXFUN_OP_DEF_MXLOOP (min, ARRAY, mx_inline_xmin) \ +#define BSXFUN_STDOP_DEFS_MXLOOP(ARRAY) \ + BSXFUN_OP_DEF_MXLOOP (add, ARRAY, mx_inline_add) \ + BSXFUN_OP_DEF_MXLOOP (sub, ARRAY, mx_inline_sub) \ + BSXFUN_OP_DEF_MXLOOP (mul, ARRAY, mx_inline_mul) \ + BSXFUN_OP_DEF_MXLOOP (div, ARRAY, mx_inline_div) \ + BSXFUN_OP_DEF_MXLOOP (min, ARRAY, mx_inline_xmin) \ BSXFUN_OP_DEF_MXLOOP (max, ARRAY, mx_inline_xmax) -#define BSXFUN_STDREL_DEFS_MXLOOP(ARRAY) \ - BSXFUN_REL_DEF_MXLOOP (eq, ARRAY, mx_inline_eq) \ - BSXFUN_REL_DEF_MXLOOP (ne, ARRAY, mx_inline_ne) \ - BSXFUN_REL_DEF_MXLOOP (lt, ARRAY, mx_inline_lt) \ - BSXFUN_REL_DEF_MXLOOP (le, ARRAY, mx_inline_le) \ - BSXFUN_REL_DEF_MXLOOP (gt, ARRAY, mx_inline_gt) \ +#define BSXFUN_STDREL_DEFS_MXLOOP(ARRAY) \ + BSXFUN_REL_DEF_MXLOOP (eq, ARRAY, mx_inline_eq) \ + BSXFUN_REL_DEF_MXLOOP (ne, ARRAY, mx_inline_ne) \ + BSXFUN_REL_DEF_MXLOOP (lt, ARRAY, mx_inline_lt) \ + BSXFUN_REL_DEF_MXLOOP (le, ARRAY, mx_inline_le) \ + BSXFUN_REL_DEF_MXLOOP (gt, ARRAY, mx_inline_gt) \ BSXFUN_REL_DEF_MXLOOP (ge, ARRAY, mx_inline_ge) //For bsxfun power with mixed integer/float types -#define BSXFUN_POW_MIXED_MXLOOP(INT_TYPE) \ +#define BSXFUN_POW_MIXED_MXLOOP(INT_TYPE) \ BSXFUN_OP2_DEF_MXLOOP (pow, INT_TYPE, INT_TYPE, NDArray, mx_inline_pow) \ - BSXFUN_OP2_DEF_MXLOOP (pow, INT_TYPE, INT_TYPE, FloatNDArray, mx_inline_pow)\ + BSXFUN_OP2_DEF_MXLOOP (pow, INT_TYPE, INT_TYPE, FloatNDArray, mx_inline_pow) \ BSXFUN_OP2_DEF_MXLOOP (pow, INT_TYPE, NDArray, INT_TYPE, mx_inline_pow) \ BSXFUN_OP2_DEF_MXLOOP (pow, INT_TYPE, FloatNDArray, INT_TYPE, mx_inline_pow) diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/lo-specfun.cc --- a/liboctave/numeric/lo-specfun.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/lo-specfun.cc Mon Aug 01 12:40:18 2016 -0400 @@ -1402,77 +1402,77 @@ return retval; } -#define SS_BESSEL(name, fcn) \ - Complex \ +#define SS_BESSEL(name, fcn) \ + Complex \ name (double alpha, const Complex& x, bool scaled, octave_idx_type& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define SM_BESSEL(name, fcn) \ - ComplexMatrix \ - name (double alpha, const ComplexMatrix& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define SM_BESSEL(name, fcn) \ + ComplexMatrix \ + name (double alpha, const ComplexMatrix& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define MS_BESSEL(name, fcn) \ - ComplexMatrix \ - name (const Matrix& alpha, const Complex& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define MS_BESSEL(name, fcn) \ + ComplexMatrix \ + name (const Matrix& alpha, const Complex& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define MM_BESSEL(name, fcn) \ - ComplexMatrix \ - name (const Matrix& alpha, const ComplexMatrix& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define MM_BESSEL(name, fcn) \ + ComplexMatrix \ + name (const Matrix& alpha, const ComplexMatrix& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define SN_BESSEL(name, fcn) \ - ComplexNDArray \ - name (double alpha, const ComplexNDArray& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define SN_BESSEL(name, fcn) \ + ComplexNDArray \ + name (double alpha, const ComplexNDArray& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define NS_BESSEL(name, fcn) \ - ComplexNDArray \ - name (const NDArray& alpha, const Complex& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define NS_BESSEL(name, fcn) \ + ComplexNDArray \ + name (const NDArray& alpha, const Complex& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define NN_BESSEL(name, fcn) \ - ComplexNDArray \ - name (const NDArray& alpha, const ComplexNDArray& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define NN_BESSEL(name, fcn) \ + ComplexNDArray \ + name (const NDArray& alpha, const ComplexNDArray& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define RC_BESSEL(name, fcn) \ - ComplexMatrix \ +#define RC_BESSEL(name, fcn) \ + ComplexMatrix \ name (const RowVector& alpha, const ComplexColumnVector& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define ALL_BESSEL(name, fcn) \ - SS_BESSEL (name, fcn) \ - SM_BESSEL (name, fcn) \ - MS_BESSEL (name, fcn) \ - MM_BESSEL (name, fcn) \ - SN_BESSEL (name, fcn) \ - NS_BESSEL (name, fcn) \ - NN_BESSEL (name, fcn) \ +#define ALL_BESSEL(name, fcn) \ + SS_BESSEL (name, fcn) \ + SM_BESSEL (name, fcn) \ + MS_BESSEL (name, fcn) \ + MM_BESSEL (name, fcn) \ + SN_BESSEL (name, fcn) \ + NS_BESSEL (name, fcn) \ + NN_BESSEL (name, fcn) \ RC_BESSEL (name, fcn) ALL_BESSEL (besselj, zbesj) @@ -2015,77 +2015,77 @@ return retval; } -#define SS_BESSEL(name, fcn) \ - FloatComplex \ +#define SS_BESSEL(name, fcn) \ + FloatComplex \ name (float alpha, const FloatComplex& x, bool scaled, octave_idx_type& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define SM_BESSEL(name, fcn) \ - FloatComplexMatrix \ - name (float alpha, const FloatComplexMatrix& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define SM_BESSEL(name, fcn) \ + FloatComplexMatrix \ + name (float alpha, const FloatComplexMatrix& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define MS_BESSEL(name, fcn) \ - FloatComplexMatrix \ +#define MS_BESSEL(name, fcn) \ + FloatComplexMatrix \ name (const FloatMatrix& alpha, const FloatComplex& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define MM_BESSEL(name, fcn) \ - FloatComplexMatrix \ +#define MM_BESSEL(name, fcn) \ + FloatComplexMatrix \ name (const FloatMatrix& alpha, const FloatComplexMatrix& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define SN_BESSEL(name, fcn) \ - FloatComplexNDArray \ - name (float alpha, const FloatComplexNDArray& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ +#define SN_BESSEL(name, fcn) \ + FloatComplexNDArray \ + name (float alpha, const FloatComplexNDArray& x, bool scaled, \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define NS_BESSEL(name, fcn) \ - FloatComplexNDArray \ +#define NS_BESSEL(name, fcn) \ + FloatComplexNDArray \ name (const FloatNDArray& alpha, const FloatComplex& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define NN_BESSEL(name, fcn) \ - FloatComplexNDArray \ +#define NN_BESSEL(name, fcn) \ + FloatComplexNDArray \ name (const FloatNDArray& alpha, const FloatComplexNDArray& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define RC_BESSEL(name, fcn) \ - FloatComplexMatrix \ +#define RC_BESSEL(name, fcn) \ + FloatComplexMatrix \ name (const FloatRowVector& alpha, const FloatComplexColumnVector& x, bool scaled, \ - Array& ierr) \ - { \ - return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ + Array& ierr) \ + { \ + return do_bessel (fcn, #name, alpha, x, scaled, ierr); \ } -#define ALL_BESSEL(name, fcn) \ - SS_BESSEL (name, fcn) \ - SM_BESSEL (name, fcn) \ - MS_BESSEL (name, fcn) \ - MM_BESSEL (name, fcn) \ - SN_BESSEL (name, fcn) \ - NS_BESSEL (name, fcn) \ - NN_BESSEL (name, fcn) \ +#define ALL_BESSEL(name, fcn) \ + SS_BESSEL (name, fcn) \ + SM_BESSEL (name, fcn) \ + MS_BESSEL (name, fcn) \ + MM_BESSEL (name, fcn) \ + SN_BESSEL (name, fcn) \ + NS_BESSEL (name, fcn) \ + NN_BESSEL (name, fcn) \ RC_BESSEL (name, fcn) ALL_BESSEL (besselj, cbesj) diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/oct-convn.cc --- a/liboctave/numeric/oct-convn.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/oct-convn.cc Mon Aug 01 12:40:18 2016 -0400 @@ -40,34 +40,36 @@ T *c, bool inner); // Forward instances to our Fortran implementations. -#define FORWARD_IMPL(T_CXX, R_CXX, T, R, T_CAST, T_CONST_CAST, \ - R_CONST_CAST, f, F) \ -extern "C" \ -F77_RET_T \ -F77_FUNC (f##conv2o, F##CONV2O) (const F77_INT&, \ - const F77_INT&, \ - const T*, const F77_INT&, \ - const F77_INT&, const R*, T *); \ -\ -extern "C" \ -F77_RET_T \ -F77_FUNC (f##conv2i, F##CONV2I) (const F77_INT&, \ - const F77_INT&, \ - const T*, const F77_INT&, \ - const F77_INT&, const R*, T *); \ -\ -template <> void \ -convolve_2d (const T_CXX *a, F77_INT ma, F77_INT na, \ - const R_CXX *b, F77_INT mb, F77_INT nb, \ - T_CXX *c, bool inner) \ -{ \ - if (inner) \ - F77_XFCN (f##conv2i, F##CONV2I, (ma, na, T_CONST_CAST (a), \ - mb, nb, R_CONST_CAST (b), T_CAST (c))); \ - else \ - F77_XFCN (f##conv2o, F##CONV2O, (ma, na, T_CONST_CAST (a), \ - mb, nb, R_CONST_CAST (b), T_CAST (c))); \ -} +#define FORWARD_IMPL(T_CXX, R_CXX, T, R, T_CAST, T_CONST_CAST, \ + R_CONST_CAST, f, F) \ + extern "C" \ + F77_RET_T \ + F77_FUNC (f##conv2o, F##CONV2O) (const F77_INT&, \ + const F77_INT&, \ + const T*, const F77_INT&, \ + const F77_INT&, const R*, T *); \ + \ + extern "C" \ + F77_RET_T \ + F77_FUNC (f##conv2i, F##CONV2I) (const F77_INT&, \ + const F77_INT&, \ + const T*, const F77_INT&, \ + const F77_INT&, const R*, T *); \ + \ + template <> void \ + convolve_2d (const T_CXX *a, F77_INT ma, F77_INT na, \ + const R_CXX *b, F77_INT mb, F77_INT nb, \ + T_CXX *c, bool inner) \ + { \ + if (inner) \ + F77_XFCN (f##conv2i, F##CONV2I, (ma, na, T_CONST_CAST (a), \ + mb, nb, R_CONST_CAST (b), \ + T_CAST (c))); \ + else \ + F77_XFCN (f##conv2o, F##CONV2O, (ma, na, T_CONST_CAST (a), \ + mb, nb, R_CONST_CAST (b), \ + T_CAST (c))); \ + } FORWARD_IMPL (double, double, F77_DBLE, F77_DBLE, , , , d, D) FORWARD_IMPL (float, float, F77_REAL, F77_REAL, , , , s, S) @@ -162,23 +164,25 @@ return c; } -#define CONV_DEFS(TPREF, RPREF) \ -TPREF ## NDArray \ -convn (const TPREF ## NDArray& a, const RPREF ## NDArray& b, convn_type ct) \ -{ \ - return convolve (a, b, ct); \ -} \ -TPREF ## Matrix \ -convn (const TPREF ## Matrix& a, const RPREF ## Matrix& b, convn_type ct) \ -{ \ - return convolve (a, b, ct); \ -} \ -TPREF ## Matrix \ -convn (const TPREF ## Matrix& a, const RPREF ## ColumnVector& c, \ - const RPREF ## RowVector& r, convn_type ct) \ -{ \ - return convolve (a, c * r, ct); \ -} +#define CONV_DEFS(TPREF, RPREF) \ + TPREF ## NDArray \ + convn (const TPREF ## NDArray& a, const RPREF ## NDArray& b, \ + convn_type ct) \ + { \ + return convolve (a, b, ct); \ + } \ + TPREF ## Matrix \ + convn (const TPREF ## Matrix& a, const RPREF ## Matrix& b, \ + convn_type ct) \ + { \ + return convolve (a, b, ct); \ + } \ + TPREF ## Matrix \ + convn (const TPREF ## Matrix& a, const RPREF ## ColumnVector& c, \ + const RPREF ## RowVector& r, convn_type ct) \ + { \ + return convolve (a, c * r, ct); \ + } CONV_DEFS ( , ) CONV_DEFS (Complex, ) diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/oct-convn.h --- a/liboctave/numeric/oct-convn.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/oct-convn.h Mon Aug 01 12:40:18 2016 -0400 @@ -53,14 +53,17 @@ convn_valid }; -#define CONV_DECLS(TPREF, RPREF) \ -extern OCTAVE_API TPREF ## NDArray \ -convn (const TPREF ## NDArray& a, const RPREF ## NDArray& b, convn_type ct); \ -extern OCTAVE_API TPREF ## Matrix \ -convn (const TPREF ## Matrix& a, const RPREF ## Matrix& b, convn_type ct); \ -extern OCTAVE_API TPREF ## Matrix \ -convn (const TPREF ## Matrix& a, const RPREF ## ColumnVector& c, \ - const RPREF ## RowVector& r, convn_type ct) +#define CONV_DECLS(TPREF, RPREF) \ + extern OCTAVE_API TPREF ## NDArray \ + convn (const TPREF ## NDArray& a, const RPREF ## NDArray& b, \ + convn_type ct); \ + extern OCTAVE_API TPREF ## Matrix \ + convn (const TPREF ## Matrix& a, const RPREF ## Matrix& b, \ + convn_type ct); \ + extern OCTAVE_API TPREF ## Matrix \ + convn (const TPREF ## Matrix& a, const RPREF ## ColumnVector& c, \ + const RPREF ## RowVector& r, convn_type ct) + CONV_DECLS ( , ); CONV_DECLS (Complex, ); diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/oct-norm.cc --- a/liboctave/numeric/oct-norm.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/oct-norm.cc Mon Aug 01 12:40:18 2016 -0400 @@ -292,30 +292,30 @@ } // now the dispatchers -#define DEFINE_DISPATCHER(FUNC_NAME, ARG_TYPE, RES_TYPE) \ -template \ -RES_TYPE FUNC_NAME (const ARG_TYPE& v, R p) \ -{ \ - RES_TYPE res; \ - if (p == 2) \ - FUNC_NAME (v, res, norm_accumulator_2 ()); \ - else if (p == 1) \ - FUNC_NAME (v, res, norm_accumulator_1 ()); \ - else if (lo_ieee_isinf (p)) \ - { \ - if (p > 0) \ - FUNC_NAME (v, res, norm_accumulator_inf ()); \ - else \ - FUNC_NAME (v, res, norm_accumulator_minf ()); \ - } \ - else if (p == 0) \ - FUNC_NAME (v, res, norm_accumulator_0 ()); \ - else if (p > 0) \ - FUNC_NAME (v, res, norm_accumulator_p (p)); \ - else \ - FUNC_NAME (v, res, norm_accumulator_mp (p)); \ - return res; \ -} +#define DEFINE_DISPATCHER(FUNC_NAME, ARG_TYPE, RES_TYPE) \ + template \ + RES_TYPE FUNC_NAME (const ARG_TYPE& v, R p) \ + { \ + RES_TYPE res; \ + if (p == 2) \ + FUNC_NAME (v, res, norm_accumulator_2 ()); \ + else if (p == 1) \ + FUNC_NAME (v, res, norm_accumulator_1 ()); \ + else if (lo_ieee_isinf (p)) \ + { \ + if (p > 0) \ + FUNC_NAME (v, res, norm_accumulator_inf ()); \ + else \ + FUNC_NAME (v, res, norm_accumulator_minf ()); \ + } \ + else if (p == 0) \ + FUNC_NAME (v, res, norm_accumulator_0 ()); \ + else if (p > 0) \ + FUNC_NAME (v, res, norm_accumulator_p (p)); \ + else \ + FUNC_NAME (v, res, norm_accumulator_mp (p)); \ + return res; \ + } DEFINE_DISPATCHER (vector_norm, MArray, R) DEFINE_DISPATCHER (column_norms, MArray, MArray) @@ -527,15 +527,23 @@ // and finally, here's what we've promised in the header file -#define DEFINE_XNORM_FUNCS(PREFIX, RTYPE) \ - OCTAVE_API RTYPE xnorm (const PREFIX##ColumnVector& x, RTYPE p) \ - { return vector_norm (x, p); } \ - OCTAVE_API RTYPE xnorm (const PREFIX##RowVector& x, RTYPE p) \ - { return vector_norm (x, p); } \ - OCTAVE_API RTYPE xnorm (const PREFIX##Matrix& x, RTYPE p) \ - { return svd_matrix_norm (x, p, PREFIX##Matrix ()); } \ - OCTAVE_API RTYPE xfrobnorm (const PREFIX##Matrix& x) \ - { return vector_norm (x, static_cast (2)); } +#define DEFINE_XNORM_FUNCS(PREFIX, RTYPE) \ + OCTAVE_API RTYPE xnorm (const PREFIX##ColumnVector& x, RTYPE p) \ + { \ + return vector_norm (x, p); \ + } \ + OCTAVE_API RTYPE xnorm (const PREFIX##RowVector& x, RTYPE p) \ + { \ + return vector_norm (x, p); \ + } \ + OCTAVE_API RTYPE xnorm (const PREFIX##Matrix& x, RTYPE p) \ + { \ + return svd_matrix_norm (x, p, PREFIX##Matrix ()); \ + } \ + OCTAVE_API RTYPE xfrobnorm (const PREFIX##Matrix& x) \ + { \ + return vector_norm (x, static_cast (2)); \ + } DEFINE_XNORM_FUNCS(, double) DEFINE_XNORM_FUNCS(Complex, double) @@ -553,24 +561,32 @@ res = acc; } -#define DEFINE_XNORM_SPARSE_FUNCS(PREFIX, RTYPE) \ - OCTAVE_API RTYPE xnorm (const Sparse##PREFIX##Matrix& x, RTYPE p) \ - { return matrix_norm (x, p, PREFIX##Matrix ()); } \ - OCTAVE_API RTYPE xfrobnorm (const Sparse##PREFIX##Matrix& x) \ - { \ - RTYPE res; \ - array_norm_2 (x.data (), x.nnz (), res); \ - return res; \ +#define DEFINE_XNORM_SPARSE_FUNCS(PREFIX, RTYPE) \ + OCTAVE_API RTYPE xnorm (const Sparse##PREFIX##Matrix& x, RTYPE p) \ + { \ + return matrix_norm (x, p, PREFIX##Matrix ()); \ + } \ + OCTAVE_API RTYPE xfrobnorm (const Sparse##PREFIX##Matrix& x) \ + { \ + RTYPE res; \ + array_norm_2 (x.data (), x.nnz (), res); \ + return res; \ } DEFINE_XNORM_SPARSE_FUNCS(, double) DEFINE_XNORM_SPARSE_FUNCS(Complex, double) -#define DEFINE_COLROW_NORM_FUNCS(PREFIX, RPREFIX, RTYPE) \ - extern OCTAVE_API RPREFIX##RowVector xcolnorms (const PREFIX##Matrix& m, RTYPE p) \ - { return column_norms (m, p); } \ - extern OCTAVE_API RPREFIX##ColumnVector xrownorms (const PREFIX##Matrix& m, RTYPE p) \ - { return row_norms (m, p); } \ +#define DEFINE_COLROW_NORM_FUNCS(PREFIX, RPREFIX, RTYPE) \ + extern OCTAVE_API RPREFIX##RowVector \ + xcolnorms (const PREFIX##Matrix& m, RTYPE p) \ + { \ + return column_norms (m, p); \ + } \ + extern OCTAVE_API RPREFIX##ColumnVector \ + xrownorms (const PREFIX##Matrix& m, RTYPE p) \ + { \ + return row_norms (m, p); \ + } \ DEFINE_COLROW_NORM_FUNCS(, , double) DEFINE_COLROW_NORM_FUNCS(Complex, , double) diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/oct-norm.h --- a/liboctave/numeric/oct-norm.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/oct-norm.h Mon Aug 01 12:40:18 2016 -0400 @@ -29,15 +29,19 @@ #include "oct-cmplx.h" -#define DECLARE_XNORM_FUNCS(PREFIX, RTYPE) \ - class PREFIX##Matrix; \ - class PREFIX##ColumnVector; \ - class PREFIX##RowVector; \ - \ - extern OCTAVE_API RTYPE xnorm (const PREFIX##ColumnVector&, RTYPE p = 2); \ - extern OCTAVE_API RTYPE xnorm (const PREFIX##RowVector&, RTYPE p = 2); \ - extern OCTAVE_API RTYPE xnorm (const PREFIX##Matrix&, RTYPE p = 2); \ - extern OCTAVE_API RTYPE xfrobnorm (const PREFIX##Matrix&); +#define DECLARE_XNORM_FUNCS(PREFIX, RTYPE) \ + class PREFIX##Matrix; \ + class PREFIX##ColumnVector; \ + class PREFIX##RowVector; \ + \ + extern OCTAVE_API RTYPE \ + xnorm (const PREFIX##ColumnVector&, RTYPE p = 2); \ + extern OCTAVE_API RTYPE \ + xnorm (const PREFIX##RowVector&, RTYPE p = 2); \ + extern OCTAVE_API RTYPE \ + xnorm (const PREFIX##Matrix&, RTYPE p = 2); \ + extern OCTAVE_API RTYPE \ + xfrobnorm (const PREFIX##Matrix&); DECLARE_XNORM_FUNCS(, double) DECLARE_XNORM_FUNCS(Complex, double) @@ -47,9 +51,11 @@ DECLARE_XNORM_FUNCS(Sparse, double) DECLARE_XNORM_FUNCS(SparseComplex, double) -#define DECLARE_COLROW_NORM_FUNCS(PREFIX, RPREFIX, RTYPE) \ - extern OCTAVE_API RPREFIX##RowVector xcolnorms (const PREFIX##Matrix&, RTYPE p = 2); \ - extern OCTAVE_API RPREFIX##ColumnVector xrownorms (const PREFIX##Matrix&, RTYPE p = 2); \ +#define DECLARE_COLROW_NORM_FUNCS(PREFIX, RPREFIX, RTYPE) \ + extern OCTAVE_API RPREFIX##RowVector \ + xcolnorms (const PREFIX##Matrix&, RTYPE p = 2); \ + extern OCTAVE_API RPREFIX##ColumnVector \ + xrownorms (const PREFIX##Matrix&, RTYPE p = 2); \ DECLARE_COLROW_NORM_FUNCS(, , double) DECLARE_COLROW_NORM_FUNCS(Complex, , double) diff -r dd992fd74fce -r e43d83253e28 liboctave/numeric/oct-rand.cc --- a/liboctave/numeric/oct-rand.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/numeric/oct-rand.cc Mon Aug 01 12:40:18 2016 -0400 @@ -710,17 +710,17 @@ } } -#define MAKE_RAND(len) \ - do \ - { \ - double val; \ - for (volatile octave_idx_type i = 0; i < len; i++) \ - { \ - octave_quit (); \ - RAND_FUNC (val); \ - v[i] = val; \ - } \ - } \ +#define MAKE_RAND(len) \ + do \ + { \ + double val; \ + for (volatile octave_idx_type i = 0; i < len; i++) \ + { \ + octave_quit (); \ + RAND_FUNC (val); \ + v[i] = val; \ + } \ + } \ while (0) void diff -r dd992fd74fce -r e43d83253e28 liboctave/operators/Sparse-op-decls.h --- a/liboctave/operators/Sparse-op-decls.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/operators/Sparse-op-decls.h Mon Aug 01 12:40:18 2016 -0400 @@ -29,158 +29,158 @@ class SparseBoolMatrix; -#define SPARSE_BIN_OP_DECL(R, OP, X, Y, API) \ +#define SPARSE_BIN_OP_DECL(R, OP, X, Y, API) \ extern API R OP (const X&, const Y&) -#define SPARSE_CMP_OP_DECL(OP, X, Y, API) \ +#define SPARSE_CMP_OP_DECL(OP, X, Y, API) \ extern API SparseBoolMatrix OP (const X&, const Y&) -#define SPARSE_BOOL_OP_DECL(OP, X, Y, API) \ +#define SPARSE_BOOL_OP_DECL(OP, X, Y, API) \ extern API SparseBoolMatrix OP (const X&, const Y&) // sparse matrix by scalar operations. -#define SPARSE_SMS_BIN_OP_DECLS(R1, R2, M, S, API) \ - SPARSE_BIN_OP_DECL (R1, operator +, M, S, API); \ - SPARSE_BIN_OP_DECL (R1, operator -, M, S, API); \ - SPARSE_BIN_OP_DECL (R2, operator *, M, S, API); \ +#define SPARSE_SMS_BIN_OP_DECLS(R1, R2, M, S, API) \ + SPARSE_BIN_OP_DECL (R1, operator +, M, S, API); \ + SPARSE_BIN_OP_DECL (R1, operator -, M, S, API); \ + SPARSE_BIN_OP_DECL (R2, operator *, M, S, API); \ SPARSE_BIN_OP_DECL (R2, operator /, M, S, API); -#define SPARSE_SMS_CMP_OP_DECLS(M, S, API) \ - SPARSE_CMP_OP_DECL (mx_el_lt, M, S, API); \ - SPARSE_CMP_OP_DECL (mx_el_le, M, S, API); \ - SPARSE_CMP_OP_DECL (mx_el_ge, M, S, API); \ - SPARSE_CMP_OP_DECL (mx_el_gt, M, S, API); \ - SPARSE_CMP_OP_DECL (mx_el_eq, M, S, API); \ +#define SPARSE_SMS_CMP_OP_DECLS(M, S, API) \ + SPARSE_CMP_OP_DECL (mx_el_lt, M, S, API); \ + SPARSE_CMP_OP_DECL (mx_el_le, M, S, API); \ + SPARSE_CMP_OP_DECL (mx_el_ge, M, S, API); \ + SPARSE_CMP_OP_DECL (mx_el_gt, M, S, API); \ + SPARSE_CMP_OP_DECL (mx_el_eq, M, S, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M, S, API); -#define SPARSE_SMS_EQNE_OP_DECLS(M, S, API) \ - SPARSE_CMP_OP_DECL (mx_el_eq, M, S, API); \ +#define SPARSE_SMS_EQNE_OP_DECLS(M, S, API) \ + SPARSE_CMP_OP_DECL (mx_el_eq, M, S, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M, S, API); -#define SPARSE_SMS_BOOL_OP_DECLS(M, S, API) \ - SPARSE_BOOL_OP_DECL (mx_el_and, M, S, API); \ +#define SPARSE_SMS_BOOL_OP_DECLS(M, S, API) \ + SPARSE_BOOL_OP_DECL (mx_el_and, M, S, API); \ SPARSE_BOOL_OP_DECL (mx_el_or, M, S, API); -#define SPARSE_SMS_OP_DECLS(R1, R2, M, S, API) \ - SPARSE_SMS_BIN_OP_DECLS (R1, R2, M, S, API) \ - SPARSE_SMS_CMP_OP_DECLS (M, S, API) \ +#define SPARSE_SMS_OP_DECLS(R1, R2, M, S, API) \ + SPARSE_SMS_BIN_OP_DECLS (R1, R2, M, S, API) \ + SPARSE_SMS_CMP_OP_DECLS (M, S, API) \ SPARSE_SMS_BOOL_OP_DECLS (M, S, API) // scalar by sparse matrix operations. -#define SPARSE_SSM_BIN_OP_DECLS(R1, R2, S, M, API) \ - SPARSE_BIN_OP_DECL (R1, operator +, S, M, API); \ - SPARSE_BIN_OP_DECL (R1, operator -, S, M, API); \ - SPARSE_BIN_OP_DECL (R2, operator *, S, M, API); \ +#define SPARSE_SSM_BIN_OP_DECLS(R1, R2, S, M, API) \ + SPARSE_BIN_OP_DECL (R1, operator +, S, M, API); \ + SPARSE_BIN_OP_DECL (R1, operator -, S, M, API); \ + SPARSE_BIN_OP_DECL (R2, operator *, S, M, API); \ SPARSE_BIN_OP_DECL (R2, operator /, S, M, API); -#define SPARSE_SSM_CMP_OP_DECLS(S, M, API) \ - SPARSE_CMP_OP_DECL (mx_el_lt, S, M, API); \ - SPARSE_CMP_OP_DECL (mx_el_le, S, M, API); \ - SPARSE_CMP_OP_DECL (mx_el_ge, S, M, API); \ - SPARSE_CMP_OP_DECL (mx_el_gt, S, M, API); \ - SPARSE_CMP_OP_DECL (mx_el_eq, S, M, API); \ +#define SPARSE_SSM_CMP_OP_DECLS(S, M, API) \ + SPARSE_CMP_OP_DECL (mx_el_lt, S, M, API); \ + SPARSE_CMP_OP_DECL (mx_el_le, S, M, API); \ + SPARSE_CMP_OP_DECL (mx_el_ge, S, M, API); \ + SPARSE_CMP_OP_DECL (mx_el_gt, S, M, API); \ + SPARSE_CMP_OP_DECL (mx_el_eq, S, M, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, S, M, API); -#define SPARSE_SSM_EQNE_OP_DECLS(S, M, API) \ - SPARSE_CMP_OP_DECL (mx_el_eq, S, M, API); \ +#define SPARSE_SSM_EQNE_OP_DECLS(S, M, API) \ + SPARSE_CMP_OP_DECL (mx_el_eq, S, M, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, S, M, API); -#define SPARSE_SSM_BOOL_OP_DECLS(S, M, API) \ - SPARSE_BOOL_OP_DECL (mx_el_and, S, M, API); \ - SPARSE_BOOL_OP_DECL (mx_el_or, S, M, API); \ +#define SPARSE_SSM_BOOL_OP_DECLS(S, M, API) \ + SPARSE_BOOL_OP_DECL (mx_el_and, S, M, API); \ + SPARSE_BOOL_OP_DECL (mx_el_or, S, M, API); \ -#define SPARSE_SSM_OP_DECLS(R1, R2, S, M, API) \ - SPARSE_SSM_BIN_OP_DECLS (R1, R2, S, M, API) \ - SPARSE_SSM_CMP_OP_DECLS (S, M, API) \ - SPARSE_SSM_BOOL_OP_DECLS (S, M, API) \ +#define SPARSE_SSM_OP_DECLS(R1, R2, S, M, API) \ + SPARSE_SSM_BIN_OP_DECLS (R1, R2, S, M, API) \ + SPARSE_SSM_CMP_OP_DECLS (S, M, API) \ + SPARSE_SSM_BOOL_OP_DECLS (S, M, API) \ // sparse matrix by sparse matrix operations. #define SPARSE_SMSM_BIN_OP_DECLS(R1, R2, M1, M2, API) \ - SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \ - SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \ - SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \ SPARSE_BIN_OP_DECL (R2, quotient, M1, M2, API); -#define SPARSE_SMSM_CMP_OP_DECLS(M1, M2, API) \ - SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define SPARSE_SMSM_CMP_OP_DECLS(M1, M2, API) \ + SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define SPARSE_SMSM_EQNE_OP_DECLS(M1, M2, API) \ - SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define SPARSE_SMSM_EQNE_OP_DECLS(M1, M2, API) \ + SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define SPARSE_SMSM_BOOL_OP_DECLS(M1, M2, API) \ +#define SPARSE_SMSM_BOOL_OP_DECLS(M1, M2, API) \ SPARSE_BOOL_OP_DECL (mx_el_and, M1, M2, API); \ SPARSE_BOOL_OP_DECL (mx_el_or, M1, M2, API); -#define SPARSE_SMSM_OP_DECLS(R1, R2, M1, M2, API) \ - SPARSE_SMSM_BIN_OP_DECLS (R1, R2, M1, M2, API) \ - SPARSE_SMSM_CMP_OP_DECLS (M1, M2, API) \ +#define SPARSE_SMSM_OP_DECLS(R1, R2, M1, M2, API) \ + SPARSE_SMSM_BIN_OP_DECLS (R1, R2, M1, M2, API) \ + SPARSE_SMSM_CMP_OP_DECLS (M1, M2, API) \ SPARSE_SMSM_BOOL_OP_DECLS (M1, M2, API) // matrix by sparse matrix operations. #define SPARSE_MSM_BIN_OP_DECLS(R1, R2, M1, M2, API) \ - SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \ - SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \ - SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \ SPARSE_BIN_OP_DECL (R2, quotient, M1, M2, API); -#define SPARSE_MSM_CMP_OP_DECLS(M1, M2, API) \ - SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define SPARSE_MSM_CMP_OP_DECLS(M1, M2, API) \ + SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define SPARSE_MSM_EQNE_OP_DECLS(M1, M2, API) \ - SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define SPARSE_MSM_EQNE_OP_DECLS(M1, M2, API) \ + SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define SPARSE_MSM_BOOL_OP_DECLS(M1, M2, API) \ +#define SPARSE_MSM_BOOL_OP_DECLS(M1, M2, API) \ SPARSE_BOOL_OP_DECL (mx_el_and, M1, M2, API); \ SPARSE_BOOL_OP_DECL (mx_el_or, M1, M2, API); -#define SPARSE_MSM_OP_DECLS(R1, R2, M1, M2, API) \ - SPARSE_MSM_BIN_OP_DECLS (R1, R2, M1, M2, API) \ - SPARSE_MSM_CMP_OP_DECLS (M1, M2, API) \ +#define SPARSE_MSM_OP_DECLS(R1, R2, M1, M2, API) \ + SPARSE_MSM_BIN_OP_DECLS (R1, R2, M1, M2, API) \ + SPARSE_MSM_CMP_OP_DECLS (M1, M2, API) \ SPARSE_MSM_BOOL_OP_DECLS (M1, M2, API) // sparse matrix by matrix operations. #define SPARSE_SMM_BIN_OP_DECLS(R1, R2, M1, M2, API) \ - SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \ - SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \ - SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \ + SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \ SPARSE_BIN_OP_DECL (R2, quotient, M1, M2, API); -#define SPARSE_SMM_CMP_OP_DECLS(M1, M2, API) \ - SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \ - SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define SPARSE_SMM_CMP_OP_DECLS(M1, M2, API) \ + SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \ + SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define SPARSE_SMM_EQNE_OP_DECLS(M1, M2, API) \ - SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define SPARSE_SMM_EQNE_OP_DECLS(M1, M2, API) \ + SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \ SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define SPARSE_SMM_BOOL_OP_DECLS(M1, M2, API) \ +#define SPARSE_SMM_BOOL_OP_DECLS(M1, M2, API) \ SPARSE_BOOL_OP_DECL (mx_el_and, M1, M2, API); \ SPARSE_BOOL_OP_DECL (mx_el_or, M1, M2, API); -#define SPARSE_SMM_OP_DECLS(R1, R2, M1, M2, API) \ - SPARSE_SMM_BIN_OP_DECLS (R1, R2, M1, M2, API) \ - SPARSE_SMM_CMP_OP_DECLS (M1, M2, API) \ +#define SPARSE_SMM_OP_DECLS(R1, R2, M1, M2, API) \ + SPARSE_SMM_BIN_OP_DECLS (R1, R2, M1, M2, API) \ + SPARSE_SMM_CMP_OP_DECLS (M1, M2, API) \ SPARSE_SMM_BOOL_OP_DECLS (M1, M2, API) #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/operators/Sparse-op-defs.h --- a/liboctave/operators/Sparse-op-defs.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/operators/Sparse-op-defs.h Mon Aug 01 12:40:18 2016 -0400 @@ -33,87 +33,87 @@ // sparse matrix by scalar operations. -#define SPARSE_SMS_BIN_OP_1(R, F, OP, M, S) \ - R \ - F (const M& m, const S& s) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - R r (nr, nc, (0.0 OP s)); \ - \ - for (octave_idx_type j = 0; j < nc; j++) \ - for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - r.xelem (m.ridx (i), j) = m.data (i) OP s; \ - return r; \ +#define SPARSE_SMS_BIN_OP_1(R, F, OP, M, S) \ + R \ + F (const M& m, const S& s) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + R r (nr, nc, (0.0 OP s)); \ + \ + for (octave_idx_type j = 0; j < nc; j++) \ + for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ + r.xelem (m.ridx (i), j) = m.data (i) OP s; \ + return r; \ } -#define SPARSE_SMS_BIN_OP_2(R, F, OP, M, S) \ - R \ - F (const M& m, const S& s) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - octave_idx_type nz = m.nnz (); \ - \ - R r (nr, nc, nz); \ - \ - for (octave_idx_type i = 0; i < nz; i++) \ - { \ - r.xdata (i) = m.data (i) OP s; \ - r.xridx (i) = m.ridx (i); \ - } \ - for (octave_idx_type i = 0; i < nc + 1; i++) \ - r.xcidx (i) = m.cidx (i); \ - \ - r.maybe_compress (true); \ - return r; \ +#define SPARSE_SMS_BIN_OP_2(R, F, OP, M, S) \ + R \ + F (const M& m, const S& s) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + octave_idx_type nz = m.nnz (); \ + \ + R r (nr, nc, nz); \ + \ + for (octave_idx_type i = 0; i < nz; i++) \ + { \ + r.xdata (i) = m.data (i) OP s; \ + r.xridx (i) = m.ridx (i); \ + } \ + for (octave_idx_type i = 0; i < nc + 1; i++) \ + r.xcidx (i) = m.cidx (i); \ + \ + r.maybe_compress (true); \ + return r; \ } -#define SPARSE_SMS_BIN_OPS(R1, R2, M, S) \ +#define SPARSE_SMS_BIN_OPS(R1, R2, M, S) \ SPARSE_SMS_BIN_OP_1 (R1, operator +, +, M, S) \ SPARSE_SMS_BIN_OP_1 (R1, operator -, -, M, S) \ SPARSE_SMS_BIN_OP_2 (R2, operator *, *, M, S) \ SPARSE_SMS_BIN_OP_2 (R2, operator /, /, M, S) -#define SPARSE_SMS_CMP_OP(F, OP, M, MZ, MC, S, SZ, SC) \ - SparseBoolMatrix \ - F (const M& m, const S& s) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - SparseBoolMatrix r; \ - \ - if (MC (MZ) OP SC (s)) \ - { \ - r = SparseBoolMatrix (nr, nc, true); \ - for (octave_idx_type j = 0; j < nc; j++) \ - for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if (! (MC (m.data (i)) OP SC (s))) \ - r.data (m.ridx (i) + j * nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (nr, nc, m.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < nc; j++) \ - { \ +#define SPARSE_SMS_CMP_OP(F, OP, M, MZ, MC, S, SZ, SC) \ + SparseBoolMatrix \ + F (const M& m, const S& s) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + SparseBoolMatrix r; \ + \ + if (MC (MZ) OP SC (s)) \ + { \ + r = SparseBoolMatrix (nr, nc, true); \ + for (octave_idx_type j = 0; j < nc; j++) \ + for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ + if (! (MC (m.data (i)) OP SC (s))) \ + r.data (m.ridx (i) + j * nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (nr, nc, m.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < nc; j++) \ + { \ for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if (MC (m.data (i)) OP SC (s)) \ - { \ - r.ridx (nel) = m.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - return r; \ + if (MC (m.data (i)) OP SC (s)) \ + { \ + r.ridx (nel) = m.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + return r; \ } -#define SPARSE_SMS_CMP_OPS(M, MZ, CM, S, SZ, CS) \ +#define SPARSE_SMS_CMP_OPS(M, MZ, CM, S, SZ, CS) \ SPARSE_SMS_CMP_OP (mx_el_lt, <, M, MZ, , S, SZ, ) \ SPARSE_SMS_CMP_OP (mx_el_le, <=, M, MZ, , S, SZ, ) \ SPARSE_SMS_CMP_OP (mx_el_ge, >=, M, MZ, , S, SZ, ) \ @@ -121,141 +121,141 @@ SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \ SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, ) -#define SPARSE_SMS_EQNE_OPS(M, MZ, CM, S, SZ, CS) \ +#define SPARSE_SMS_EQNE_OPS(M, MZ, CM, S, SZ, CS) \ SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \ SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, ) -#define SPARSE_SMS_BOOL_OP(F, OP, M, S, LHS_ZERO, RHS_ZERO) \ - SparseBoolMatrix \ - F (const M& m, const S& s) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - SparseBoolMatrix r; \ - \ - if (nr > 0 && nc > 0) \ - { \ - if (LHS_ZERO OP (s != RHS_ZERO)) \ - { \ - r = SparseBoolMatrix (nr, nc, true); \ - for (octave_idx_type j = 0; j < nc; j++) \ +#define SPARSE_SMS_BOOL_OP(F, OP, M, S, LHS_ZERO, RHS_ZERO) \ + SparseBoolMatrix \ + F (const M& m, const S& s) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + SparseBoolMatrix r; \ + \ + if (nr > 0 && nc > 0) \ + { \ + if (LHS_ZERO OP (s != RHS_ZERO)) \ + { \ + r = SparseBoolMatrix (nr, nc, true); \ + for (octave_idx_type j = 0; j < nc; j++) \ for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if (! ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO))) \ - r.data (m.ridx (i) + j * nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (nr, nc, m.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < nc; j++) \ - { \ + if (! ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO))) \ + r.data (m.ridx (i) + j * nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (nr, nc, m.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < nc; j++) \ + { \ for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO)) \ - { \ - r.ridx (nel) = m.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - return r; \ + if ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO)) \ + { \ + r.ridx (nel) = m.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + return r; \ } -#define SPARSE_SMS_BOOL_OPS2(M, S, LHS_ZERO, RHS_ZERO) \ - SPARSE_SMS_BOOL_OP (mx_el_and, &&, M, S, LHS_ZERO, RHS_ZERO) \ +#define SPARSE_SMS_BOOL_OPS2(M, S, LHS_ZERO, RHS_ZERO) \ + SPARSE_SMS_BOOL_OP (mx_el_and, &&, M, S, LHS_ZERO, RHS_ZERO) \ SPARSE_SMS_BOOL_OP (mx_el_or, ||, M, S, LHS_ZERO, RHS_ZERO) -#define SPARSE_SMS_BOOL_OPS(M, S, ZERO) \ +#define SPARSE_SMS_BOOL_OPS(M, S, ZERO) \ SPARSE_SMS_BOOL_OPS2(M, S, ZERO, ZERO) // scalar by sparse matrix operations. -#define SPARSE_SSM_BIN_OP_1(R, F, OP, S, M) \ - R \ - F (const S& s, const M& m) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - R r (nr, nc, (s OP 0.0)); \ - \ - for (octave_idx_type j = 0; j < nc; j++) \ - for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - r.xelem (m.ridx (i), j) = s OP m.data (i); \ - \ - return r; \ +#define SPARSE_SSM_BIN_OP_1(R, F, OP, S, M) \ + R \ + F (const S& s, const M& m) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + R r (nr, nc, (s OP 0.0)); \ + \ + for (octave_idx_type j = 0; j < nc; j++) \ + for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ + r.xelem (m.ridx (i), j) = s OP m.data (i); \ + \ + return r; \ } -#define SPARSE_SSM_BIN_OP_2(R, F, OP, S, M) \ - R \ - F (const S& s, const M& m) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - octave_idx_type nz = m.nnz (); \ - \ - R r (nr, nc, nz); \ - \ - for (octave_idx_type i = 0; i < nz; i++) \ - { \ - r.xdata (i) = s OP m.data (i); \ - r.xridx (i) = m.ridx (i); \ - } \ - for (octave_idx_type i = 0; i < nc + 1; i++) \ - r.xcidx (i) = m.cidx (i); \ - \ - r.maybe_compress(true); \ - return r; \ +#define SPARSE_SSM_BIN_OP_2(R, F, OP, S, M) \ + R \ + F (const S& s, const M& m) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + octave_idx_type nz = m.nnz (); \ + \ + R r (nr, nc, nz); \ + \ + for (octave_idx_type i = 0; i < nz; i++) \ + { \ + r.xdata (i) = s OP m.data (i); \ + r.xridx (i) = m.ridx (i); \ + } \ + for (octave_idx_type i = 0; i < nc + 1; i++) \ + r.xcidx (i) = m.cidx (i); \ + \ + r.maybe_compress(true); \ + return r; \ } -#define SPARSE_SSM_BIN_OPS(R1, R2, S, M) \ +#define SPARSE_SSM_BIN_OPS(R1, R2, S, M) \ SPARSE_SSM_BIN_OP_1 (R1, operator +, +, S, M) \ SPARSE_SSM_BIN_OP_1 (R1, operator -, -, S, M) \ SPARSE_SSM_BIN_OP_2 (R2, operator *, *, S, M) \ SPARSE_SSM_BIN_OP_2 (R2, operator /, /, S, M) -#define SPARSE_SSM_CMP_OP(F, OP, S, SZ, SC, M, MZ, MC) \ - SparseBoolMatrix \ - F (const S& s, const M& m) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - SparseBoolMatrix r; \ - \ - if (SC (s) OP SC (MZ)) \ - { \ - r = SparseBoolMatrix (nr, nc, true); \ - for (octave_idx_type j = 0; j < nc; j++) \ - for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if (! (SC (s) OP MC (m.data (i)))) \ - r.data (m.ridx (i) + j * nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (nr, nc, m.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < nc; j++) \ - { \ +#define SPARSE_SSM_CMP_OP(F, OP, S, SZ, SC, M, MZ, MC) \ + SparseBoolMatrix \ + F (const S& s, const M& m) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + SparseBoolMatrix r; \ + \ + if (SC (s) OP SC (MZ)) \ + { \ + r = SparseBoolMatrix (nr, nc, true); \ + for (octave_idx_type j = 0; j < nc; j++) \ + for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ + if (! (SC (s) OP MC (m.data (i)))) \ + r.data (m.ridx (i) + j * nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (nr, nc, m.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < nc; j++) \ + { \ for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if (SC (s) OP MC (m.data (i))) \ - { \ - r.ridx (nel) = m.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - return r; \ + if (SC (s) OP MC (m.data (i))) \ + { \ + r.ridx (nel) = m.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + return r; \ } -#define SPARSE_SSM_CMP_OPS(S, SZ, SC, M, MZ, MC) \ +#define SPARSE_SSM_CMP_OPS(S, SZ, SC, M, MZ, MC) \ SPARSE_SSM_CMP_OP (mx_el_lt, <, S, SZ, , M, MZ, ) \ SPARSE_SSM_CMP_OP (mx_el_le, <=, S, SZ, , M, MZ, ) \ SPARSE_SSM_CMP_OP (mx_el_ge, >=, S, SZ, , M, MZ, ) \ @@ -263,1728 +263,1728 @@ SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \ SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, ) -#define SPARSE_SSM_EQNE_OPS(S, SZ, SC, M, MZ, MC) \ +#define SPARSE_SSM_EQNE_OPS(S, SZ, SC, M, MZ, MC) \ SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \ SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, ) -#define SPARSE_SSM_BOOL_OP(F, OP, S, M, LHS_ZERO, RHS_ZERO) \ - SparseBoolMatrix \ - F (const S& s, const M& m) \ - { \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - SparseBoolMatrix r; \ - \ - if (nr > 0 && nc > 0) \ - { \ - if ((s != LHS_ZERO) OP RHS_ZERO) \ - { \ - r = SparseBoolMatrix (nr, nc, true); \ - for (octave_idx_type j = 0; j < nc; j++) \ +#define SPARSE_SSM_BOOL_OP(F, OP, S, M, LHS_ZERO, RHS_ZERO) \ + SparseBoolMatrix \ + F (const S& s, const M& m) \ + { \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + SparseBoolMatrix r; \ + \ + if (nr > 0 && nc > 0) \ + { \ + if ((s != LHS_ZERO) OP RHS_ZERO) \ + { \ + r = SparseBoolMatrix (nr, nc, true); \ + for (octave_idx_type j = 0; j < nc; j++) \ for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if (! ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO))) \ - r.data (m.ridx (i) + j * nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (nr, nc, m.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < nc; j++) \ - { \ + if (! ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO))) \ + r.data (m.ridx (i) + j * nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (nr, nc, m.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < nc; j++) \ + { \ for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ - if ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO)) \ - { \ - r.ridx (nel) = m.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - return r; \ + if ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO)) \ + { \ + r.ridx (nel) = m.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + return r; \ } -#define SPARSE_SSM_BOOL_OPS2(S, M, LHS_ZERO, RHS_ZERO) \ - SPARSE_SSM_BOOL_OP (mx_el_and, &&, S, M, LHS_ZERO, RHS_ZERO) \ +#define SPARSE_SSM_BOOL_OPS2(S, M, LHS_ZERO, RHS_ZERO) \ + SPARSE_SSM_BOOL_OP (mx_el_and, &&, S, M, LHS_ZERO, RHS_ZERO) \ SPARSE_SSM_BOOL_OP (mx_el_or, ||, S, M, LHS_ZERO, RHS_ZERO) -#define SPARSE_SSM_BOOL_OPS(S, M, ZERO) \ +#define SPARSE_SSM_BOOL_OPS(S, M, ZERO) \ SPARSE_SSM_BOOL_OPS2(S, M, ZERO, ZERO) // sparse matrix by sparse matrix operations. -#define SPARSE_SMSM_BIN_OP_1(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - { \ - if (m1.elem (0,0) == 0.) \ - r = OP R (m2); \ - else \ - { \ - r = R (m2_nr, m2_nc, m1.data (0) OP 0.); \ - \ - for (octave_idx_type j = 0 ; j < m2_nc ; j++) \ - { \ - octave_quit (); \ - octave_idx_type idxj = j * m2_nr; \ +#define SPARSE_SMSM_BIN_OP_1(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + { \ + if (m1.elem (0,0) == 0.) \ + r = OP R (m2); \ + else \ + { \ + r = R (m2_nr, m2_nc, m1.data (0) OP 0.); \ + \ + for (octave_idx_type j = 0 ; j < m2_nc ; j++) \ + { \ + octave_quit (); \ + octave_idx_type idxj = j * m2_nr; \ for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \ - { \ - octave_quit (); \ + { \ + octave_quit (); \ r.data (idxj + m2.ridx (i)) = m1.data (0) OP m2.data (i); \ - } \ - } \ - r.maybe_compress (); \ - } \ - } \ - else if (m2_nr == 1 && m2_nc == 1) \ - { \ - if (m2.elem (0,0) == 0.) \ - r = R (m1); \ - else \ - { \ - r = R (m1_nr, m1_nc, 0. OP m2.data (0)); \ - \ - for (octave_idx_type j = 0 ; j < m1_nc ; j++) \ - { \ - octave_quit (); \ - octave_idx_type idxj = j * m1_nr; \ + } \ + } \ + r.maybe_compress (); \ + } \ + } \ + else if (m2_nr == 1 && m2_nc == 1) \ + { \ + if (m2.elem (0,0) == 0.) \ + r = R (m1); \ + else \ + { \ + r = R (m1_nr, m1_nc, 0. OP m2.data (0)); \ + \ + for (octave_idx_type j = 0 ; j < m1_nc ; j++) \ + { \ + octave_quit (); \ + octave_idx_type idxj = j * m1_nr; \ for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \ - { \ - octave_quit (); \ + { \ + octave_quit (); \ r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.data (0); \ - } \ - } \ - r.maybe_compress (); \ - } \ - } \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ - r = R (m1_nr, m1_nc, (m1.nnz () + m2.nnz ())); \ - \ - octave_idx_type jx = 0; \ - r.cidx (0) = 0; \ - for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ - { \ - octave_idx_type ja = m1.cidx (i); \ - octave_idx_type ja_max = m1.cidx (i+1); \ - bool ja_lt_max = ja < ja_max; \ - \ - octave_idx_type jb = m2.cidx (i); \ - octave_idx_type jb_max = m2.cidx (i+1); \ - bool jb_lt_max = jb < jb_max; \ - \ - while (ja_lt_max || jb_lt_max) \ - { \ - octave_quit (); \ - if ((! jb_lt_max) || \ - (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ - { \ - r.ridx (jx) = m1.ridx (ja); \ - r.data (jx) = m1.data (ja) OP 0.; \ - jx++; \ - ja++; \ - ja_lt_max= ja < ja_max; \ - } \ - else if ((! ja_lt_max) || \ - (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ - { \ - r.ridx (jx) = m2.ridx (jb); \ - r.data (jx) = 0. OP m2.data (jb); \ - jx++; \ - jb++; \ - jb_lt_max= jb < jb_max; \ - } \ - else \ - { \ - if ((m1.data (ja) OP m2.data (jb)) != 0.) \ - { \ - r.data (jx) = m1.data (ja) OP m2.data (jb); \ - r.ridx (jx) = m1.ridx (ja); \ - jx++; \ - } \ - ja++; \ - ja_lt_max= ja < ja_max; \ - jb++; \ - jb_lt_max= jb < jb_max; \ - } \ - } \ - r.cidx (i+1) = jx; \ - } \ - \ - r.maybe_compress (); \ - } \ - \ - return r; \ + } \ + } \ + r.maybe_compress (); \ + } \ + } \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ + r = R (m1_nr, m1_nc, (m1.nnz () + m2.nnz ())); \ + \ + octave_idx_type jx = 0; \ + r.cidx (0) = 0; \ + for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ + { \ + octave_idx_type ja = m1.cidx (i); \ + octave_idx_type ja_max = m1.cidx (i+1); \ + bool ja_lt_max = ja < ja_max; \ + \ + octave_idx_type jb = m2.cidx (i); \ + octave_idx_type jb_max = m2.cidx (i+1); \ + bool jb_lt_max = jb < jb_max; \ + \ + while (ja_lt_max || jb_lt_max) \ + { \ + octave_quit (); \ + if ((! jb_lt_max) || \ + (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ + { \ + r.ridx (jx) = m1.ridx (ja); \ + r.data (jx) = m1.data (ja) OP 0.; \ + jx++; \ + ja++; \ + ja_lt_max= ja < ja_max; \ + } \ + else if ((! ja_lt_max) || \ + (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ + { \ + r.ridx (jx) = m2.ridx (jb); \ + r.data (jx) = 0. OP m2.data (jb); \ + jx++; \ + jb++; \ + jb_lt_max= jb < jb_max; \ + } \ + else \ + { \ + if ((m1.data (ja) OP m2.data (jb)) != 0.) \ + { \ + r.data (jx) = m1.data (ja) OP m2.data (jb); \ + r.ridx (jx) = m1.ridx (ja); \ + jx++; \ + } \ + ja++; \ + ja_lt_max= ja < ja_max; \ + jb++; \ + jb_lt_max= jb < jb_max; \ + } \ + } \ + r.cidx (i+1) = jx; \ + } \ + \ + r.maybe_compress (); \ + } \ + \ + return r; \ } -#define SPARSE_SMSM_BIN_OP_2(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - { \ - if (m1.elem (0,0) == 0.) \ - r = R (m2_nr, m2_nc); \ - else \ - { \ - r = R (m2); \ - octave_idx_type m2_nnz = m2.nnz (); \ - \ - for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \ - { \ - octave_quit (); \ - r.data (i) = m1.data (0) OP r.data (i); \ - } \ - r.maybe_compress (); \ - } \ - } \ - else if (m2_nr == 1 && m2_nc == 1) \ - { \ - if (m2.elem (0,0) == 0.) \ - r = R (m1_nr, m1_nc); \ - else \ - { \ - r = R (m1); \ - octave_idx_type m1_nnz = m1.nnz (); \ - \ - for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \ - { \ - octave_quit (); \ - r.data (i) = r.data (i) OP m2.data (0); \ - } \ - r.maybe_compress (); \ - } \ - } \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ +#define SPARSE_SMSM_BIN_OP_2(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + { \ + if (m1.elem (0,0) == 0.) \ + r = R (m2_nr, m2_nc); \ + else \ + { \ + r = R (m2); \ + octave_idx_type m2_nnz = m2.nnz (); \ + \ + for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \ + { \ + octave_quit (); \ + r.data (i) = m1.data (0) OP r.data (i); \ + } \ + r.maybe_compress (); \ + } \ + } \ + else if (m2_nr == 1 && m2_nc == 1) \ + { \ + if (m2.elem (0,0) == 0.) \ + r = R (m1_nr, m1_nc); \ + else \ + { \ + r = R (m1); \ + octave_idx_type m1_nnz = m1.nnz (); \ + \ + for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \ + { \ + octave_quit (); \ + r.data (i) = r.data (i) OP m2.data (0); \ + } \ + r.maybe_compress (); \ + } \ + } \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ r = R (m1_nr, m1_nc, (m1.nnz () > m2.nnz () ? m1.nnz () : m2.nnz ())); \ - \ - octave_idx_type jx = 0; \ - r.cidx (0) = 0; \ - for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ - { \ - octave_idx_type ja = m1.cidx (i); \ - octave_idx_type ja_max = m1.cidx (i+1); \ - bool ja_lt_max = ja < ja_max; \ - \ - octave_idx_type jb = m2.cidx (i); \ - octave_idx_type jb_max = m2.cidx (i+1); \ - bool jb_lt_max = jb < jb_max; \ - \ - while (ja_lt_max || jb_lt_max) \ - { \ - octave_quit (); \ - if ((! jb_lt_max) || \ - (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ - { \ - ja++; ja_lt_max= ja < ja_max; \ - } \ - else if ((! ja_lt_max) || \ - (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ - { \ - jb++; jb_lt_max= jb < jb_max; \ - } \ - else \ - { \ - if ((m1.data (ja) OP m2.data (jb)) != 0.) \ - { \ - r.data (jx) = m1.data (ja) OP m2.data (jb); \ - r.ridx (jx) = m1.ridx (ja); \ - jx++; \ - } \ - ja++; ja_lt_max= ja < ja_max; \ - jb++; jb_lt_max= jb < jb_max; \ - } \ - } \ - r.cidx (i+1) = jx; \ - } \ - \ - r.maybe_compress (); \ - } \ - \ - return r; \ + \ + octave_idx_type jx = 0; \ + r.cidx (0) = 0; \ + for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ + { \ + octave_idx_type ja = m1.cidx (i); \ + octave_idx_type ja_max = m1.cidx (i+1); \ + bool ja_lt_max = ja < ja_max; \ + \ + octave_idx_type jb = m2.cidx (i); \ + octave_idx_type jb_max = m2.cidx (i+1); \ + bool jb_lt_max = jb < jb_max; \ + \ + while (ja_lt_max || jb_lt_max) \ + { \ + octave_quit (); \ + if ((! jb_lt_max) || \ + (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ + { \ + ja++; ja_lt_max= ja < ja_max; \ + } \ + else if ((! ja_lt_max) || \ + (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ + { \ + jb++; jb_lt_max= jb < jb_max; \ + } \ + else \ + { \ + if ((m1.data (ja) OP m2.data (jb)) != 0.) \ + { \ + r.data (jx) = m1.data (ja) OP m2.data (jb); \ + r.ridx (jx) = m1.ridx (ja); \ + jx++; \ + } \ + ja++; ja_lt_max= ja < ja_max; \ + jb++; jb_lt_max= jb < jb_max; \ + } \ + } \ + r.cidx (i+1) = jx; \ + } \ + \ + r.maybe_compress (); \ + } \ + \ + return r; \ } -#define SPARSE_SMSM_BIN_OP_3(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - { \ - if ((m1.elem (0,0) OP Complex ()) == Complex ()) \ - { \ - octave_idx_type m2_nnz = m2.nnz (); \ - r = R (m2); \ - for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \ - r.data (i) = m1.elem (0,0) OP r.data (i); \ - r.maybe_compress (); \ - } \ - else \ - { \ - r = R (m2_nr, m2_nc, m1.elem (0,0) OP Complex ()); \ - for (octave_idx_type j = 0 ; j < m2_nc ; j++) \ - { \ - octave_quit (); \ - octave_idx_type idxj = j * m2_nr; \ +#define SPARSE_SMSM_BIN_OP_3(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + { \ + if ((m1.elem (0,0) OP Complex ()) == Complex ()) \ + { \ + octave_idx_type m2_nnz = m2.nnz (); \ + r = R (m2); \ + for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \ + r.data (i) = m1.elem (0,0) OP r.data (i); \ + r.maybe_compress (); \ + } \ + else \ + { \ + r = R (m2_nr, m2_nc, m1.elem (0,0) OP Complex ()); \ + for (octave_idx_type j = 0 ; j < m2_nc ; j++) \ + { \ + octave_quit (); \ + octave_idx_type idxj = j * m2_nr; \ for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \ - { \ - octave_quit (); \ + { \ + octave_quit (); \ r.data (idxj + m2.ridx (i)) = m1.elem (0,0) OP m2.data (i); \ - } \ - } \ - r.maybe_compress (); \ - } \ - } \ - else if (m2_nr == 1 && m2_nc == 1) \ - { \ - if ((Complex () OP m1.elem (0,0)) == Complex ()) \ - { \ - octave_idx_type m1_nnz = m1.nnz (); \ - r = R (m1); \ - for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \ - r.data (i) = r.data (i) OP m2.elem (0,0); \ - r.maybe_compress (); \ - } \ - else \ - { \ - r = R (m1_nr, m1_nc, Complex () OP m2.elem (0,0)); \ - for (octave_idx_type j = 0 ; j < m1_nc ; j++) \ - { \ - octave_quit (); \ - octave_idx_type idxj = j * m1_nr; \ + } \ + } \ + r.maybe_compress (); \ + } \ + } \ + else if (m2_nr == 1 && m2_nc == 1) \ + { \ + if ((Complex () OP m1.elem (0,0)) == Complex ()) \ + { \ + octave_idx_type m1_nnz = m1.nnz (); \ + r = R (m1); \ + for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \ + r.data (i) = r.data (i) OP m2.elem (0,0); \ + r.maybe_compress (); \ + } \ + else \ + { \ + r = R (m1_nr, m1_nc, Complex () OP m2.elem (0,0)); \ + for (octave_idx_type j = 0 ; j < m1_nc ; j++) \ + { \ + octave_quit (); \ + octave_idx_type idxj = j * m1_nr; \ for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \ - { \ - octave_quit (); \ + { \ + octave_quit (); \ r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.elem (0,0); \ - } \ - } \ - r.maybe_compress (); \ - } \ - } \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ - \ - /* FIXME: Kludge... Always double/Complex, so Complex () */ \ - r = R (m1_nr, m1_nc, (Complex () OP Complex ())); \ - \ - for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ - { \ - octave_idx_type ja = m1.cidx (i); \ - octave_idx_type ja_max = m1.cidx (i+1); \ - bool ja_lt_max = ja < ja_max; \ - \ - octave_idx_type jb = m2.cidx (i); \ - octave_idx_type jb_max = m2.cidx (i+1); \ - bool jb_lt_max = jb < jb_max; \ - \ - while (ja_lt_max || jb_lt_max) \ - { \ - octave_quit (); \ - if ((! jb_lt_max) || \ - (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ - { \ - /* keep those kludges coming */ \ + } \ + } \ + r.maybe_compress (); \ + } \ + } \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ + \ + /* FIXME: Kludge... Always double/Complex, so Complex () */ \ + r = R (m1_nr, m1_nc, (Complex () OP Complex ())); \ + \ + for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ + { \ + octave_idx_type ja = m1.cidx (i); \ + octave_idx_type ja_max = m1.cidx (i+1); \ + bool ja_lt_max = ja < ja_max; \ + \ + octave_idx_type jb = m2.cidx (i); \ + octave_idx_type jb_max = m2.cidx (i+1); \ + bool jb_lt_max = jb < jb_max; \ + \ + while (ja_lt_max || jb_lt_max) \ + { \ + octave_quit (); \ + if ((! jb_lt_max) || \ + (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ + { \ + /* keep those kludges coming */ \ r.elem (m1.ridx (ja),i) = m1.data (ja) OP Complex (); \ - ja++; \ - ja_lt_max= ja < ja_max; \ - } \ - else if ((! ja_lt_max) || \ - (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ - { \ - /* keep those kludges coming */ \ - r.elem (m2.ridx (jb),i) = Complex () OP m2.data (jb); \ - jb++; \ - jb_lt_max= jb < jb_max; \ - } \ - else \ - { \ + ja++; \ + ja_lt_max= ja < ja_max; \ + } \ + else if ((! ja_lt_max) || \ + (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ + { \ + /* keep those kludges coming */ \ + r.elem (m2.ridx (jb),i) = Complex () OP m2.data (jb); \ + jb++; \ + jb_lt_max= jb < jb_max; \ + } \ + else \ + { \ r.elem (m1.ridx (ja),i) = m1.data (ja) OP m2.data (jb); \ - ja++; \ - ja_lt_max= ja < ja_max; \ - jb++; \ - jb_lt_max= jb < jb_max; \ - } \ - } \ - } \ - r.maybe_compress (true); \ - } \ - \ - return r; \ + ja++; \ + ja_lt_max= ja < ja_max; \ + jb++; \ + jb_lt_max= jb < jb_max; \ + } \ + } \ + } \ + r.maybe_compress (true); \ + } \ + \ + return r; \ } // Note that SM ./ SM needs to take into account the NaN and Inf values // implied by the division by zero. // FIXME: Are the NaNs double(NaN) or Complex(NaN,Nan) in the complex case? -#define SPARSE_SMSM_BIN_OPS(R1, R2, M1, M2) \ - SPARSE_SMSM_BIN_OP_1 (R1, operator +, +, M1, M2) \ - SPARSE_SMSM_BIN_OP_1 (R1, operator -, -, M1, M2) \ - SPARSE_SMSM_BIN_OP_2 (R2, product, *, M1, M2) \ +#define SPARSE_SMSM_BIN_OPS(R1, R2, M1, M2) \ + SPARSE_SMSM_BIN_OP_1 (R1, operator +, +, M1, M2) \ + SPARSE_SMSM_BIN_OP_1 (R1, operator -, -, M1, M2) \ + SPARSE_SMSM_BIN_OP_2 (R2, product, *, M1, M2) \ SPARSE_SMSM_BIN_OP_3 (R2, quotient, /, M1, M2) // FIXME: this macro duplicates the bodies of the template functions // defined in the SPARSE_SSM_CMP_OP and SPARSE_SMS_CMP_OP macros. -#define SPARSE_SMSM_CMP_OP(F, OP, M1, Z1, C1, M2, Z2, C2) \ - SparseBoolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - SparseBoolMatrix r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - { \ - if (C1 (m1.elem (0,0)) OP C2 (Z2)) \ - { \ - r = SparseBoolMatrix (m2_nr, m2_nc, true); \ - for (octave_idx_type j = 0; j < m2_nc; j++) \ +#define SPARSE_SMSM_CMP_OP(F, OP, M1, Z1, C1, M2, Z2, C2) \ + SparseBoolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + SparseBoolMatrix r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + { \ + if (C1 (m1.elem (0,0)) OP C2 (Z2)) \ + { \ + r = SparseBoolMatrix (m2_nr, m2_nc, true); \ + for (octave_idx_type j = 0; j < m2_nc; j++) \ for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ - if (! (C1 (m1.elem (0,0)) OP C2 (m2.data (i)))) \ - r.data (m2.ridx (i) + j * m2_nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m2_nc; j++) \ - { \ + if (! (C1 (m1.elem (0,0)) OP C2 (m2.data (i)))) \ + r.data (m2.ridx (i) + j * m2_nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m2_nc; j++) \ + { \ for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ - if (C1 (m1.elem (0,0)) OP C2 (m2.data (i))) \ - { \ - r.ridx (nel) = m2.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - else if (m2_nr == 1 && m2_nc == 1) \ - { \ - if (C1 (Z1) OP C2 (m2.elem (0,0))) \ - { \ - r = SparseBoolMatrix (m1_nr, m1_nc, true); \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ + if (C1 (m1.elem (0,0)) OP C2 (m2.data (i))) \ + { \ + r.ridx (nel) = m2.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + else if (m2_nr == 1 && m2_nc == 1) \ + { \ + if (C1 (Z1) OP C2 (m2.elem (0,0))) \ + { \ + r = SparseBoolMatrix (m1_nr, m1_nc, true); \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ - if (! (C1 (m1.data (i)) OP C2 (m2.elem (0,0)))) \ - r.data (m1.ridx (i) + j * m1_nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ + if (! (C1 (m1.data (i)) OP C2 (m2.elem (0,0)))) \ + r.data (m1.ridx (i) + j * m1_nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ - if (C1 (m1.data (i)) OP C2 (m2.elem (0,0))) \ - { \ - r.ridx (nel) = m1.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - else if (m1_nr == m2_nr && m1_nc == m2_nc) \ - { \ - if (m1_nr != 0 || m1_nc != 0) \ - { \ - if (C1 (Z1) OP C2 (Z2)) \ - { \ - r = SparseBoolMatrix (m1_nr, m1_nc, true); \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - octave_idx_type i1 = m1.cidx (j); \ - octave_idx_type e1 = m1.cidx (j+1); \ - octave_idx_type i2 = m2.cidx (j); \ - octave_idx_type e2 = m2.cidx (j+1); \ - while (i1 < e1 || i2 < e2) \ - { \ - if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ - { \ - if (! (C1 (Z1) OP C2 (m2.data (i2)))) \ - r.data (m2.ridx (i2) + j * m1_nr) = false; \ - i2++; \ - } \ - else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ - { \ - if (! (C1 (m1.data (i1)) OP C2 (Z2))) \ - r.data (m1.ridx (i1) + j * m1_nr) = false; \ - i1++; \ - } \ - else \ - { \ - if (! (C1 (m1.data (i1)) OP C2 (m2.data (i2)))) \ - r.data (m1.ridx (i1) + j * m1_nr) = false; \ - i1++; \ - i2++; \ - } \ - } \ - } \ - r.maybe_compress (true); \ - } \ - else \ - { \ + if (C1 (m1.data (i)) OP C2 (m2.elem (0,0))) \ + { \ + r.ridx (nel) = m1.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + else if (m1_nr == m2_nr && m1_nc == m2_nc) \ + { \ + if (m1_nr != 0 || m1_nc != 0) \ + { \ + if (C1 (Z1) OP C2 (Z2)) \ + { \ + r = SparseBoolMatrix (m1_nr, m1_nc, true); \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + octave_idx_type i1 = m1.cidx (j); \ + octave_idx_type e1 = m1.cidx (j+1); \ + octave_idx_type i2 = m2.cidx (j); \ + octave_idx_type e2 = m2.cidx (j+1); \ + while (i1 < e1 || i2 < e2) \ + { \ + if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ + { \ + if (! (C1 (Z1) OP C2 (m2.data (i2)))) \ + r.data (m2.ridx (i2) + j * m1_nr) = false; \ + i2++; \ + } \ + else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ + { \ + if (! (C1 (m1.data (i1)) OP C2 (Z2))) \ + r.data (m1.ridx (i1) + j * m1_nr) = false; \ + i1++; \ + } \ + else \ + { \ + if (! (C1 (m1.data (i1)) OP C2 (m2.data (i2)))) \ + r.data (m1.ridx (i1) + j * m1_nr) = false; \ + i1++; \ + i2++; \ + } \ + } \ + } \ + r.maybe_compress (true); \ + } \ + else \ + { \ r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - octave_idx_type i1 = m1.cidx (j); \ - octave_idx_type e1 = m1.cidx (j+1); \ - octave_idx_type i2 = m2.cidx (j); \ - octave_idx_type e2 = m2.cidx (j+1); \ - while (i1 < e1 || i2 < e2) \ - { \ - if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ - { \ - if (C1 (Z1) OP C2 (m2.data (i2))) \ - { \ - r.ridx (nel) = m2.ridx (i2); \ - r.data (nel++) = true; \ - } \ - i2++; \ - } \ - else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ - { \ - if (C1 (m1.data (i1)) OP C2 (Z2)) \ - { \ - r.ridx (nel) = m1.ridx (i1); \ - r.data (nel++) = true; \ - } \ - i1++; \ - } \ - else \ - { \ - if (C1 (m1.data (i1)) OP C2 (m2.data (i2))) \ - { \ - r.ridx (nel) = m1.ridx (i1); \ - r.data (nel++) = true; \ - } \ - i1++; \ - i2++; \ - } \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - } \ - else \ - { \ - if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - } \ - return r; \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + octave_idx_type i1 = m1.cidx (j); \ + octave_idx_type e1 = m1.cidx (j+1); \ + octave_idx_type i2 = m2.cidx (j); \ + octave_idx_type e2 = m2.cidx (j+1); \ + while (i1 < e1 || i2 < e2) \ + { \ + if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ + { \ + if (C1 (Z1) OP C2 (m2.data (i2))) \ + { \ + r.ridx (nel) = m2.ridx (i2); \ + r.data (nel++) = true; \ + } \ + i2++; \ + } \ + else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ + { \ + if (C1 (m1.data (i1)) OP C2 (Z2)) \ + { \ + r.ridx (nel) = m1.ridx (i1); \ + r.data (nel++) = true; \ + } \ + i1++; \ + } \ + else \ + { \ + if (C1 (m1.data (i1)) OP C2 (m2.data (i2))) \ + { \ + r.ridx (nel) = m1.ridx (i1); \ + r.data (nel++) = true; \ + } \ + i1++; \ + i2++; \ + } \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + } \ + else \ + { \ + if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + } \ + return r; \ } -#define SPARSE_SMSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \ - SPARSE_SMSM_CMP_OP (mx_el_lt, <, M1, Z1, , M2, Z2, ) \ - SPARSE_SMSM_CMP_OP (mx_el_le, <=, M1, Z1, , M2, Z2, ) \ - SPARSE_SMSM_CMP_OP (mx_el_ge, >=, M1, Z1, , M2, Z2, ) \ - SPARSE_SMSM_CMP_OP (mx_el_gt, >, M1, Z1, , M2, Z2, ) \ - SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \ +#define SPARSE_SMSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \ + SPARSE_SMSM_CMP_OP (mx_el_lt, <, M1, Z1, , M2, Z2, ) \ + SPARSE_SMSM_CMP_OP (mx_el_le, <=, M1, Z1, , M2, Z2, ) \ + SPARSE_SMSM_CMP_OP (mx_el_ge, >=, M1, Z1, , M2, Z2, ) \ + SPARSE_SMSM_CMP_OP (mx_el_gt, >, M1, Z1, , M2, Z2, ) \ + SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \ SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, ) -#define SPARSE_SMSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \ - SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \ +#define SPARSE_SMSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \ + SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \ SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, ) // FIXME: this macro duplicates the bodies of the template functions // defined in the SPARSE_SSM_BOOL_OP and SPARSE_SMS_BOOL_OP macros. -#define SPARSE_SMSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \ - SparseBoolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - SparseBoolMatrix r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - { \ - if (m2_nr > 0 && m2_nc > 0) \ - { \ - if ((m1.elem (0,0) != LHS_ZERO) OP RHS_ZERO) \ - { \ - r = SparseBoolMatrix (m2_nr, m2_nc, true); \ - for (octave_idx_type j = 0; j < m2_nc; j++) \ +#define SPARSE_SMSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \ + SparseBoolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + SparseBoolMatrix r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + { \ + if (m2_nr > 0 && m2_nc > 0) \ + { \ + if ((m1.elem (0,0) != LHS_ZERO) OP RHS_ZERO) \ + { \ + r = SparseBoolMatrix (m2_nr, m2_nc, true); \ + for (octave_idx_type j = 0; j < m2_nc; j++) \ for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ if (! ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO))) \ - r.data (m2.ridx (i) + j * m2_nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m2_nc; j++) \ - { \ + r.data (m2.ridx (i) + j * m2_nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m2_nc; j++) \ + { \ for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ if ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO)) \ - { \ - r.ridx (nel) = m2.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - } \ - else if (m2_nr == 1 && m2_nc == 1) \ - { \ - if (m1_nr > 0 && m1_nc > 0) \ - { \ - if (LHS_ZERO OP (m2.elem (0,0) != RHS_ZERO)) \ - { \ - r = SparseBoolMatrix (m1_nr, m1_nc, true); \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + r.ridx (nel) = m2.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + } \ + else if (m2_nr == 1 && m2_nc == 1) \ + { \ + if (m1_nr > 0 && m1_nc > 0) \ + { \ + if (LHS_ZERO OP (m2.elem (0,0) != RHS_ZERO)) \ + { \ + r = SparseBoolMatrix (m1_nr, m1_nc, true); \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ if (! ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO))) \ - r.data (m1.ridx (i) + j * m1_nr) = false; \ - r.maybe_compress (true); \ - } \ - else \ - { \ - r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ + r.data (m1.ridx (i) + j * m1_nr) = false; \ + r.maybe_compress (true); \ + } \ + else \ + { \ + r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ if ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO)) \ - { \ - r.ridx (nel) = m1.ridx (i); \ - r.data (nel++) = true; \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - } \ - else if (m1_nr == m2_nr && m1_nc == m2_nc) \ - { \ - if (m1_nr != 0 || m1_nc != 0) \ - { \ + { \ + r.ridx (nel) = m1.ridx (i); \ + r.data (nel++) = true; \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + } \ + else if (m1_nr == m2_nr && m1_nc == m2_nc) \ + { \ + if (m1_nr != 0 || m1_nc != 0) \ + { \ r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \ - r.cidx (0) = static_cast (0); \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - octave_idx_type i1 = m1.cidx (j); \ - octave_idx_type e1 = m1.cidx (j+1); \ - octave_idx_type i2 = m2.cidx (j); \ - octave_idx_type e2 = m2.cidx (j+1); \ - while (i1 < e1 || i2 < e2) \ - { \ + r.cidx (0) = static_cast (0); \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + octave_idx_type i1 = m1.cidx (j); \ + octave_idx_type e1 = m1.cidx (j+1); \ + octave_idx_type i2 = m2.cidx (j); \ + octave_idx_type e2 = m2.cidx (j+1); \ + while (i1 < e1 || i2 < e2) \ + { \ if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ - { \ - if (LHS_ZERO OP m2.data (i2) != RHS_ZERO) \ - { \ - r.ridx (nel) = m2.ridx (i2); \ - r.data (nel++) = true; \ - } \ - i2++; \ - } \ - else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ - { \ - if (m1.data (i1) != LHS_ZERO OP RHS_ZERO) \ - { \ - r.ridx (nel) = m1.ridx (i1); \ - r.data (nel++) = true; \ - } \ - i1++; \ - } \ - else \ - { \ + { \ + if (LHS_ZERO OP m2.data (i2) != RHS_ZERO) \ + { \ + r.ridx (nel) = m2.ridx (i2); \ + r.data (nel++) = true; \ + } \ + i2++; \ + } \ + else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ + { \ + if (m1.data (i1) != LHS_ZERO OP RHS_ZERO) \ + { \ + r.ridx (nel) = m1.ridx (i1); \ + r.data (nel++) = true; \ + } \ + i1++; \ + } \ + else \ + { \ if (m1.data (i1) != LHS_ZERO OP m2.data (i2) != RHS_ZERO) \ - { \ - r.ridx (nel) = m1.ridx (i1); \ - r.data (nel++) = true; \ - } \ - i1++; \ - i2++; \ - } \ - } \ - r.cidx (j + 1) = nel; \ - } \ - r.maybe_compress (false); \ - } \ - } \ - else \ - { \ - if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - } \ - return r; \ + { \ + r.ridx (nel) = m1.ridx (i1); \ + r.data (nel++) = true; \ + } \ + i1++; \ + i2++; \ + } \ + } \ + r.cidx (j + 1) = nel; \ + } \ + r.maybe_compress (false); \ + } \ + } \ + else \ + { \ + if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + } \ + return r; \ } -#define SPARSE_SMSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \ - SPARSE_SMSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \ +#define SPARSE_SMSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \ + SPARSE_SMSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \ SPARSE_SMSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO) -#define SPARSE_SMSM_BOOL_OPS(M1, M2, ZERO) \ +#define SPARSE_SMSM_BOOL_OPS(M1, M2, ZERO) \ SPARSE_SMSM_BOOL_OPS2(M1, M2, ZERO, ZERO) // matrix by sparse matrix operations. -#define SPARSE_MSM_BIN_OP_1(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m2_nr == 1 && m2_nc == 1) \ - r = R (m1 OP m2.elem (0,0)); \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ - r = R (F (m1, m2.matrix_value ())); \ - } \ - return r; \ +#define SPARSE_MSM_BIN_OP_1(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m2_nr == 1 && m2_nc == 1) \ + r = R (m1 OP m2.elem (0,0)); \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ + r = R (F (m1, m2.matrix_value ())); \ + } \ + return r; \ } -#define SPARSE_MSM_BIN_OP_2(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m2_nr == 1 && m2_nc == 1) \ - r = R (m1 OP m2.elem (0,0)); \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ - if (do_mx_check (m1, mx_inline_all_finite)) \ - { \ - /* Sparsity pattern is preserved. */ \ - octave_idx_type m2_nz = m2.nnz (); \ - r = R (m2_nr, m2_nc, m2_nz); \ - for (octave_idx_type j = 0, k = 0; j < m2_nc; j++) \ - { \ - octave_quit (); \ +#define SPARSE_MSM_BIN_OP_2(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m2_nr == 1 && m2_nc == 1) \ + r = R (m1 OP m2.elem (0,0)); \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ + if (do_mx_check (m1, mx_inline_all_finite)) \ + { \ + /* Sparsity pattern is preserved. */ \ + octave_idx_type m2_nz = m2.nnz (); \ + r = R (m2_nr, m2_nc, m2_nz); \ + for (octave_idx_type j = 0, k = 0; j < m2_nc; j++) \ + { \ + octave_quit (); \ for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ - { \ - octave_idx_type mri = m2.ridx (i); \ - R::element_type x = m1(mri, j) OP m2.data (i); \ - if (x != 0.0) \ - { \ - r.xdata (k) = x; \ - r.xridx (k) = m2.ridx (i); \ - k++; \ - } \ - } \ - r.xcidx (j+1) = k; \ - } \ - r.maybe_compress (false); \ - return r; \ - } \ - else \ - r = R (F (m1, m2.matrix_value ())); \ - } \ - \ - return r; \ + { \ + octave_idx_type mri = m2.ridx (i); \ + R::element_type x = m1(mri, j) OP m2.data (i); \ + if (x != 0.0) \ + { \ + r.xdata (k) = x; \ + r.xridx (k) = m2.ridx (i); \ + k++; \ + } \ + } \ + r.xcidx (j+1) = k; \ + } \ + r.maybe_compress (false); \ + return r; \ + } \ + else \ + r = R (F (m1, m2.matrix_value ())); \ + } \ + \ + return r; \ } // FIXME: Pass a specific ZERO value -#define SPARSE_MSM_BIN_OPS(R1, R2, M1, M2) \ - SPARSE_MSM_BIN_OP_1 (R1, operator +, +, M1, M2) \ - SPARSE_MSM_BIN_OP_1 (R1, operator -, -, M1, M2) \ - SPARSE_MSM_BIN_OP_2 (R2, product, *, M1, M2) \ +#define SPARSE_MSM_BIN_OPS(R1, R2, M1, M2) \ + SPARSE_MSM_BIN_OP_1 (R1, operator +, +, M1, M2) \ + SPARSE_MSM_BIN_OP_1 (R1, operator -, -, M1, M2) \ + SPARSE_MSM_BIN_OP_2 (R2, product, *, M1, M2) \ SPARSE_MSM_BIN_OP_1 (R2, quotient, /, M1, M2) -#define SPARSE_MSM_CMP_OP(F, OP, M1, C1, M2, C2) \ - SparseBoolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - SparseBoolMatrix r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m2_nr == 1 && m2_nc == 1) \ - r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \ - else if (m1_nr == m2_nr && m1_nc == m2_nc) \ - { \ - if (m1_nr != 0 || m1_nc != 0) \ - { \ - /* Count num of nonzero elements */ \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \ - nel++; \ - \ - r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ - \ - octave_idx_type ii = 0; \ - r.cidx (0) = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - { \ +#define SPARSE_MSM_CMP_OP(F, OP, M1, C1, M2, C2) \ + SparseBoolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + SparseBoolMatrix r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m2_nr == 1 && m2_nc == 1) \ + r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \ + else if (m1_nr == m2_nr && m1_nc == m2_nc) \ + { \ + if (m1_nr != 0 || m1_nc != 0) \ + { \ + /* Count num of nonzero elements */ \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \ + nel++; \ + \ + r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ + \ + octave_idx_type ii = 0; \ + r.cidx (0) = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + { \ bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \ - if (el) \ - { \ - r.data (ii) = el; \ - r.ridx (ii++) = i; \ - } \ - } \ - r.cidx (j+1) = ii; \ - } \ - } \ - } \ - else \ - { \ - if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - } \ - return r; \ + if (el) \ + { \ + r.data (ii) = el; \ + r.ridx (ii++) = i; \ + } \ + } \ + r.cidx (j+1) = ii; \ + } \ + } \ + } \ + else \ + { \ + if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + } \ + return r; \ } -#define SPARSE_MSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \ - SPARSE_MSM_CMP_OP (mx_el_lt, <, M1, , M2, ) \ - SPARSE_MSM_CMP_OP (mx_el_le, <=, M1, , M2, ) \ - SPARSE_MSM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \ - SPARSE_MSM_CMP_OP (mx_el_gt, >, M1, , M2, ) \ - SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ +#define SPARSE_MSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \ + SPARSE_MSM_CMP_OP (mx_el_lt, <, M1, , M2, ) \ + SPARSE_MSM_CMP_OP (mx_el_le, <=, M1, , M2, ) \ + SPARSE_MSM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \ + SPARSE_MSM_CMP_OP (mx_el_gt, >, M1, , M2, ) \ + SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, ) -#define SPARSE_MSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \ - SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ +#define SPARSE_MSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \ + SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, ) -#define SPARSE_MSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \ - SparseBoolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - SparseBoolMatrix r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m2_nr == 1 && m2_nc == 1) \ - r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \ - else if (m1_nr == m2_nr && m1_nc == m2_nc) \ - { \ - if (m1_nr != 0 || m1_nc != 0) \ - { \ - /* Count num of nonzero elements */ \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - if ((m1.elem (i, j) != LHS_ZERO) \ - OP (m2.elem (i, j) != RHS_ZERO)) \ - nel++; \ - \ - r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ - \ - octave_idx_type ii = 0; \ - r.cidx (0) = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - { \ - bool el = (m1.elem (i, j) != LHS_ZERO) \ - OP (m2.elem (i, j) != RHS_ZERO); \ - if (el) \ - { \ - r.data (ii) = el; \ - r.ridx (ii++) = i; \ - } \ - } \ - r.cidx (j+1) = ii; \ - } \ - } \ - } \ - else \ - { \ - if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - } \ - return r; \ +#define SPARSE_MSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \ + SparseBoolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + SparseBoolMatrix r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m2_nr == 1 && m2_nc == 1) \ + r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \ + else if (m1_nr == m2_nr && m1_nc == m2_nc) \ + { \ + if (m1_nr != 0 || m1_nc != 0) \ + { \ + /* Count num of nonzero elements */ \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + if ((m1.elem (i, j) != LHS_ZERO) \ + OP (m2.elem (i, j) != RHS_ZERO)) \ + nel++; \ + \ + r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ + \ + octave_idx_type ii = 0; \ + r.cidx (0) = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + { \ + bool el = (m1.elem (i, j) != LHS_ZERO) \ + OP (m2.elem (i, j) != RHS_ZERO); \ + if (el) \ + { \ + r.data (ii) = el; \ + r.ridx (ii++) = i; \ + } \ + } \ + r.cidx (j+1) = ii; \ + } \ + } \ + } \ + else \ + { \ + if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + } \ + return r; \ } -#define SPARSE_MSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \ - SPARSE_MSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \ +#define SPARSE_MSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \ + SPARSE_MSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \ SPARSE_MSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO) -#define SPARSE_MSM_BOOL_OPS(M1, M2, ZERO) \ +#define SPARSE_MSM_BOOL_OPS(M1, M2, ZERO) \ SPARSE_MSM_BOOL_OPS2(M1, M2, ZERO, ZERO) // sparse matrix by matrix operations. -#define SPARSE_SMM_BIN_OP_1(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - r = R (m1.elem (0,0) OP m2); \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ - r = R (m1.matrix_value () OP m2); \ - } \ - return r; \ +#define SPARSE_SMM_BIN_OP_1(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + r = R (m1.elem (0,0) OP m2); \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ + r = R (m1.matrix_value () OP m2); \ + } \ + return r; \ } // sm .* m preserves sparsity if m contains no Infs nor Nans. -#define SPARSE_SMM_BIN_OP_2_CHECK_product(ET) \ +#define SPARSE_SMM_BIN_OP_2_CHECK_product(ET) \ do_mx_check (m2, mx_inline_all_finite) // sm ./ m preserves sparsity if m contains no NaNs or zeros. -#define SPARSE_SMM_BIN_OP_2_CHECK_quotient(ET) \ +#define SPARSE_SMM_BIN_OP_2_CHECK_quotient(ET) \ ! do_mx_check (m2, mx_inline_any_nan) && m2.nnz () == m2.numel () -#define SPARSE_SMM_BIN_OP_2(R, F, OP, M1, M2) \ - R \ - F (const M1& m1, const M2& m2) \ - { \ - R r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - r = R (m1.elem (0,0) OP m2); \ - else if (m1_nr != m2_nr || m1_nc != m2_nc) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - else \ - { \ - if (SPARSE_SMM_BIN_OP_2_CHECK_ ## F(M2::element_type)) \ - { \ - /* Sparsity pattern is preserved. */ \ - octave_idx_type m1_nz = m1.nnz (); \ - r = R (m1_nr, m1_nc, m1_nz); \ - for (octave_idx_type j = 0, k = 0; j < m1_nc; j++) \ - { \ - octave_quit (); \ +#define SPARSE_SMM_BIN_OP_2(R, F, OP, M1, M2) \ + R \ + F (const M1& m1, const M2& m2) \ + { \ + R r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + r = R (m1.elem (0,0) OP m2); \ + else if (m1_nr != m2_nr || m1_nc != m2_nc) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + else \ + { \ + if (SPARSE_SMM_BIN_OP_2_CHECK_ ## F(M2::element_type)) \ + { \ + /* Sparsity pattern is preserved. */ \ + octave_idx_type m1_nz = m1.nnz (); \ + r = R (m1_nr, m1_nc, m1_nz); \ + for (octave_idx_type j = 0, k = 0; j < m1_nc; j++) \ + { \ + octave_quit (); \ for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ - { \ - octave_idx_type mri = m1.ridx (i); \ - R::element_type x = m1.data (i) OP m2 (mri, j); \ - if (x != 0.0) \ - { \ - r.xdata (k) = x; \ - r.xridx (k) = m1.ridx (i); \ - k++; \ - } \ - } \ - r.xcidx (j+1) = k; \ - } \ - r.maybe_compress (false); \ - return r; \ - } \ - else \ - r = R (F (m1.matrix_value (), m2)); \ - } \ - \ - return r; \ + { \ + octave_idx_type mri = m1.ridx (i); \ + R::element_type x = m1.data (i) OP m2 (mri, j); \ + if (x != 0.0) \ + { \ + r.xdata (k) = x; \ + r.xridx (k) = m1.ridx (i); \ + k++; \ + } \ + } \ + r.xcidx (j+1) = k; \ + } \ + r.maybe_compress (false); \ + return r; \ + } \ + else \ + r = R (F (m1.matrix_value (), m2)); \ + } \ + \ + return r; \ } -#define SPARSE_SMM_BIN_OPS(R1, R2, M1, M2) \ - SPARSE_SMM_BIN_OP_1 (R1, operator +, +, M1, M2) \ - SPARSE_SMM_BIN_OP_1 (R1, operator -, -, M1, M2) \ - SPARSE_SMM_BIN_OP_2 (R2, product, *, M1, M2) \ +#define SPARSE_SMM_BIN_OPS(R1, R2, M1, M2) \ + SPARSE_SMM_BIN_OP_1 (R1, operator +, +, M1, M2) \ + SPARSE_SMM_BIN_OP_1 (R1, operator -, -, M1, M2) \ + SPARSE_SMM_BIN_OP_2 (R2, product, *, M1, M2) \ SPARSE_SMM_BIN_OP_2 (R2, quotient, /, M1, M2) -#define SPARSE_SMM_CMP_OP(F, OP, M1, C1, M2, C2) \ - SparseBoolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - SparseBoolMatrix r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \ - else if (m1_nr == m2_nr && m1_nc == m2_nc) \ - { \ - if (m1_nr != 0 || m1_nc != 0) \ - { \ - /* Count num of nonzero elements */ \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \ - nel++; \ - \ - r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ - \ - octave_idx_type ii = 0; \ - r.cidx (0) = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - { \ +#define SPARSE_SMM_CMP_OP(F, OP, M1, C1, M2, C2) \ + SparseBoolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + SparseBoolMatrix r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \ + else if (m1_nr == m2_nr && m1_nc == m2_nc) \ + { \ + if (m1_nr != 0 || m1_nc != 0) \ + { \ + /* Count num of nonzero elements */ \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \ + nel++; \ + \ + r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ + \ + octave_idx_type ii = 0; \ + r.cidx (0) = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + { \ bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \ - if (el) \ - { \ - r.data (ii) = el; \ - r.ridx (ii++) = i; \ - } \ - } \ - r.cidx (j+1) = ii; \ - } \ - } \ - } \ - else \ - { \ - if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - } \ - return r; \ + if (el) \ + { \ + r.data (ii) = el; \ + r.ridx (ii++) = i; \ + } \ + } \ + r.cidx (j+1) = ii; \ + } \ + } \ + } \ + else \ + { \ + if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + } \ + return r; \ } -#define SPARSE_SMM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \ - SPARSE_SMM_CMP_OP (mx_el_lt, <, M1, , M2, ) \ - SPARSE_SMM_CMP_OP (mx_el_le, <=, M1, , M2, ) \ - SPARSE_SMM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \ - SPARSE_SMM_CMP_OP (mx_el_gt, >, M1, , M2, ) \ - SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ +#define SPARSE_SMM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \ + SPARSE_SMM_CMP_OP (mx_el_lt, <, M1, , M2, ) \ + SPARSE_SMM_CMP_OP (mx_el_le, <=, M1, , M2, ) \ + SPARSE_SMM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \ + SPARSE_SMM_CMP_OP (mx_el_gt, >, M1, , M2, ) \ + SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, ) -#define SPARSE_SMM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \ - SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ +#define SPARSE_SMM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \ + SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \ SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, ) -#define SPARSE_SMM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \ - SparseBoolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - SparseBoolMatrix r; \ - \ - octave_idx_type m1_nr = m1.rows (); \ - octave_idx_type m1_nc = m1.cols (); \ - \ - octave_idx_type m2_nr = m2.rows (); \ - octave_idx_type m2_nc = m2.cols (); \ - \ - if (m1_nr == 1 && m1_nc == 1) \ - r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \ - else if (m1_nr == m2_nr && m1_nc == m2_nc) \ - { \ - if (m1_nr != 0 || m1_nc != 0) \ - { \ - /* Count num of nonzero elements */ \ - octave_idx_type nel = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - if ((m1.elem (i, j) != LHS_ZERO) \ - OP (m2.elem (i, j) != RHS_ZERO)) \ - nel++; \ - \ - r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ - \ - octave_idx_type ii = 0; \ - r.cidx (0) = 0; \ - for (octave_idx_type j = 0; j < m1_nc; j++) \ - { \ - for (octave_idx_type i = 0; i < m1_nr; i++) \ - { \ - bool el = (m1.elem (i, j) != LHS_ZERO) \ - OP (m2.elem (i, j) != RHS_ZERO); \ - if (el) \ - { \ - r.data (ii) = el; \ - r.ridx (ii++) = i; \ - } \ - } \ - r.cidx (j+1) = ii; \ - } \ - } \ - } \ - else \ - { \ - if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ - err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ - } \ - return r; \ +#define SPARSE_SMM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \ + SparseBoolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + SparseBoolMatrix r; \ + \ + octave_idx_type m1_nr = m1.rows (); \ + octave_idx_type m1_nc = m1.cols (); \ + \ + octave_idx_type m2_nr = m2.rows (); \ + octave_idx_type m2_nc = m2.cols (); \ + \ + if (m1_nr == 1 && m1_nc == 1) \ + r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \ + else if (m1_nr == m2_nr && m1_nc == m2_nc) \ + { \ + if (m1_nr != 0 || m1_nc != 0) \ + { \ + /* Count num of nonzero elements */ \ + octave_idx_type nel = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + if ((m1.elem (i, j) != LHS_ZERO) \ + OP (m2.elem (i, j) != RHS_ZERO)) \ + nel++; \ + \ + r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ + \ + octave_idx_type ii = 0; \ + r.cidx (0) = 0; \ + for (octave_idx_type j = 0; j < m1_nc; j++) \ + { \ + for (octave_idx_type i = 0; i < m1_nr; i++) \ + { \ + bool el = (m1.elem (i, j) != LHS_ZERO) \ + OP (m2.elem (i, j) != RHS_ZERO); \ + if (el) \ + { \ + r.data (ii) = el; \ + r.ridx (ii++) = i; \ + } \ + } \ + r.cidx (j+1) = ii; \ + } \ + } \ + } \ + else \ + { \ + if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ + err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ + } \ + return r; \ } -#define SPARSE_SMM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \ - SPARSE_SMM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \ +#define SPARSE_SMM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \ + SPARSE_SMM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \ SPARSE_SMM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO) -#define SPARSE_SMM_BOOL_OPS(M1, M2, ZERO) \ +#define SPARSE_SMM_BOOL_OPS(M1, M2, ZERO) \ SPARSE_SMM_BOOL_OPS2(M1, M2, ZERO, ZERO) // Avoid some code duplication. Maybe we should use templates. -#define SPARSE_CUMSUM(RET_TYPE, ELT_TYPE, FCN) \ - \ - octave_idx_type nr = rows (); \ - octave_idx_type nc = cols (); \ - \ - RET_TYPE retval; \ - \ - if (nr > 0 && nc > 0) \ - { \ - if ((nr == 1 && dim == -1) || dim == 1) \ - /* Ugly!! Is there a better way? */ \ - retval = transpose (). FCN (0) .transpose (); \ - else \ - { \ - octave_idx_type nel = 0; \ - for (octave_idx_type i = 0; i < nc; i++) \ - { \ - ELT_TYPE t = ELT_TYPE (); \ +#define SPARSE_CUMSUM(RET_TYPE, ELT_TYPE, FCN) \ + \ + octave_idx_type nr = rows (); \ + octave_idx_type nc = cols (); \ + \ + RET_TYPE retval; \ + \ + if (nr > 0 && nc > 0) \ + { \ + if ((nr == 1 && dim == -1) || dim == 1) \ + /* Ugly!! Is there a better way? */ \ + retval = transpose (). FCN (0) .transpose (); \ + else \ + { \ + octave_idx_type nel = 0; \ + for (octave_idx_type i = 0; i < nc; i++) \ + { \ + ELT_TYPE t = ELT_TYPE (); \ for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ - { \ - t += data (j); \ - if (t != ELT_TYPE ()) \ - { \ - if (j == cidx (i+1) - 1) \ - nel += nr - ridx (j); \ - else \ - nel += ridx (j+1) - ridx (j); \ - } \ - } \ - } \ - retval = RET_TYPE (nr, nc, nel); \ - retval.cidx (0) = 0; \ - octave_idx_type ii = 0; \ - for (octave_idx_type i = 0; i < nc; i++) \ - { \ - ELT_TYPE t = ELT_TYPE (); \ + { \ + t += data (j); \ + if (t != ELT_TYPE ()) \ + { \ + if (j == cidx (i+1) - 1) \ + nel += nr - ridx (j); \ + else \ + nel += ridx (j+1) - ridx (j); \ + } \ + } \ + } \ + retval = RET_TYPE (nr, nc, nel); \ + retval.cidx (0) = 0; \ + octave_idx_type ii = 0; \ + for (octave_idx_type i = 0; i < nc; i++) \ + { \ + ELT_TYPE t = ELT_TYPE (); \ for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ - { \ - t += data (j); \ - if (t != ELT_TYPE ()) \ - { \ - if (j == cidx (i+1) - 1) \ - { \ + { \ + t += data (j); \ + if (t != ELT_TYPE ()) \ + { \ + if (j == cidx (i+1) - 1) \ + { \ for (octave_idx_type k = ridx (j); k < nr; k++) \ - { \ - retval.data (ii) = t; \ - retval.ridx (ii++) = k; \ - } \ - } \ - else \ - { \ + { \ + retval.data (ii) = t; \ + retval.ridx (ii++) = k; \ + } \ + } \ + else \ + { \ for (octave_idx_type k = ridx (j); k < ridx (j+1); k++) \ - { \ - retval.data (ii) = t; \ - retval.ridx (ii++) = k; \ - } \ - } \ - } \ - } \ - retval.cidx (i+1) = ii; \ - } \ - } \ - } \ - else \ - retval = RET_TYPE (nr,nc); \ - \ + { \ + retval.data (ii) = t; \ + retval.ridx (ii++) = k; \ + } \ + } \ + } \ + } \ + retval.cidx (i+1) = ii; \ + } \ + } \ + } \ + else \ + retval = RET_TYPE (nr,nc); \ + \ return retval -#define SPARSE_CUMPROD(RET_TYPE, ELT_TYPE, FCN) \ - \ - octave_idx_type nr = rows (); \ - octave_idx_type nc = cols (); \ - \ - RET_TYPE retval; \ - \ - if (nr > 0 && nc > 0) \ - { \ - if ((nr == 1 && dim == -1) || dim == 1) \ - /* Ugly!! Is there a better way? */ \ - retval = transpose (). FCN (0) .transpose (); \ - else \ - { \ - octave_idx_type nel = 0; \ - for (octave_idx_type i = 0; i < nc; i++) \ - { \ - octave_idx_type jj = 0; \ - for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ - { \ - if (jj == ridx (j)) \ - { \ - nel++; \ - jj++; \ - } \ - else \ - break; \ - } \ - } \ - retval = RET_TYPE (nr, nc, nel); \ - retval.cidx (0) = 0; \ - octave_idx_type ii = 0; \ - for (octave_idx_type i = 0; i < nc; i++) \ - { \ - ELT_TYPE t = ELT_TYPE (1.); \ - octave_idx_type jj = 0; \ - for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ - { \ - if (jj == ridx (j)) \ - { \ - t *= data (j); \ - retval.data (ii) = t; \ - retval.ridx (ii++) = jj++; \ - } \ - else \ - break; \ - } \ - retval.cidx (i+1) = ii; \ - } \ - } \ - } \ - else \ - retval = RET_TYPE (nr,nc); \ - \ +#define SPARSE_CUMPROD(RET_TYPE, ELT_TYPE, FCN) \ + \ + octave_idx_type nr = rows (); \ + octave_idx_type nc = cols (); \ + \ + RET_TYPE retval; \ + \ + if (nr > 0 && nc > 0) \ + { \ + if ((nr == 1 && dim == -1) || dim == 1) \ + /* Ugly!! Is there a better way? */ \ + retval = transpose (). FCN (0) .transpose (); \ + else \ + { \ + octave_idx_type nel = 0; \ + for (octave_idx_type i = 0; i < nc; i++) \ + { \ + octave_idx_type jj = 0; \ + for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ + { \ + if (jj == ridx (j)) \ + { \ + nel++; \ + jj++; \ + } \ + else \ + break; \ + } \ + } \ + retval = RET_TYPE (nr, nc, nel); \ + retval.cidx (0) = 0; \ + octave_idx_type ii = 0; \ + for (octave_idx_type i = 0; i < nc; i++) \ + { \ + ELT_TYPE t = ELT_TYPE (1.); \ + octave_idx_type jj = 0; \ + for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ + { \ + if (jj == ridx (j)) \ + { \ + t *= data (j); \ + retval.data (ii) = t; \ + retval.ridx (ii++) = jj++; \ + } \ + else \ + break; \ + } \ + retval.cidx (i+1) = ii; \ + } \ + } \ + } \ + else \ + retval = RET_TYPE (nr,nc); \ + \ return retval #define SPARSE_BASE_REDUCTION_OP(RET_TYPE, EL_TYPE, ROW_EXPR, COL_EXPR, \ - INIT_VAL, MT_RESULT) \ - \ - octave_idx_type nr = rows (); \ - octave_idx_type nc = cols (); \ - \ - RET_TYPE retval; \ - \ - if (nr > 0 && nc > 0) \ - { \ - if ((nr == 1 && dim == -1) || dim == 1) \ - { \ + INIT_VAL, MT_RESULT) \ + \ + octave_idx_type nr = rows (); \ + octave_idx_type nc = cols (); \ + \ + RET_TYPE retval; \ + \ + if (nr > 0 && nc > 0) \ + { \ + if ((nr == 1 && dim == -1) || dim == 1) \ + { \ /* Define j here to allow fancy definition for prod method */ \ - octave_idx_type j = 0; \ - OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nr); \ - \ - for (octave_idx_type i = 0; i < nr; i++) \ - tmp[i] = INIT_VAL; \ - for (j = 0; j < nc; j++) \ - { \ + octave_idx_type j = 0; \ + OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nr); \ + \ + for (octave_idx_type i = 0; i < nr; i++) \ + tmp[i] = INIT_VAL; \ + for (j = 0; j < nc; j++) \ + { \ for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \ - { \ - ROW_EXPR; \ - } \ - } \ - octave_idx_type nel = 0; \ - for (octave_idx_type i = 0; i < nr; i++) \ - if (tmp[i] != EL_TYPE ()) \ - nel++; \ + { \ + ROW_EXPR; \ + } \ + } \ + octave_idx_type nel = 0; \ + for (octave_idx_type i = 0; i < nr; i++) \ + if (tmp[i] != EL_TYPE ()) \ + nel++; \ retval = RET_TYPE (nr, static_cast (1), nel); \ - retval.cidx (0) = 0; \ - retval.cidx (1) = nel; \ - nel = 0; \ - for (octave_idx_type i = 0; i < nr; i++) \ - if (tmp[i] != EL_TYPE ()) \ - { \ - retval.data (nel) = tmp[i]; \ - retval.ridx (nel++) = i; \ - } \ - } \ - else \ - { \ - OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nc); \ - \ - for (octave_idx_type j = 0; j < nc; j++) \ - { \ - tmp[j] = INIT_VAL; \ + retval.cidx (0) = 0; \ + retval.cidx (1) = nel; \ + nel = 0; \ + for (octave_idx_type i = 0; i < nr; i++) \ + if (tmp[i] != EL_TYPE ()) \ + { \ + retval.data (nel) = tmp[i]; \ + retval.ridx (nel++) = i; \ + } \ + } \ + else \ + { \ + OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nc); \ + \ + for (octave_idx_type j = 0; j < nc; j++) \ + { \ + tmp[j] = INIT_VAL; \ for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \ - { \ - COL_EXPR; \ - } \ - } \ - octave_idx_type nel = 0; \ - for (octave_idx_type i = 0; i < nc; i++) \ - if (tmp[i] != EL_TYPE ()) \ - nel++; \ + { \ + COL_EXPR; \ + } \ + } \ + octave_idx_type nel = 0; \ + for (octave_idx_type i = 0; i < nc; i++) \ + if (tmp[i] != EL_TYPE ()) \ + nel++; \ retval = RET_TYPE (static_cast (1), nc, nel); \ - retval.cidx (0) = 0; \ - nel = 0; \ - for (octave_idx_type i = 0; i < nc; i++) \ - if (tmp[i] != EL_TYPE ()) \ - { \ - retval.data (nel) = tmp[i]; \ - retval.ridx (nel++) = 0; \ - retval.cidx (i+1) = retval.cidx (i) + 1; \ - } \ - else \ - retval.cidx (i+1) = retval.cidx (i); \ - } \ - } \ - else if (nc == 0 && (nr == 0 || (nr == 1 && dim == -1))) \ - { \ - if (MT_RESULT) \ - { \ - retval = RET_TYPE (static_cast (1), \ - static_cast (1), \ - static_cast (1)); \ - retval.cidx (0) = 0; \ - retval.cidx (1) = 1; \ - retval.ridx (0) = 0; \ - retval.data (0) = MT_RESULT; \ - } \ - else \ - retval = RET_TYPE (static_cast (1), \ - static_cast (1), \ - static_cast (0)); \ - } \ - else if (nr == 0 && (dim == 0 || dim == -1)) \ - { \ - if (MT_RESULT) \ - { \ + retval.cidx (0) = 0; \ + nel = 0; \ + for (octave_idx_type i = 0; i < nc; i++) \ + if (tmp[i] != EL_TYPE ()) \ + { \ + retval.data (nel) = tmp[i]; \ + retval.ridx (nel++) = 0; \ + retval.cidx (i+1) = retval.cidx (i) + 1; \ + } \ + else \ + retval.cidx (i+1) = retval.cidx (i); \ + } \ + } \ + else if (nc == 0 && (nr == 0 || (nr == 1 && dim == -1))) \ + { \ + if (MT_RESULT) \ + { \ + retval = RET_TYPE (static_cast (1), \ + static_cast (1), \ + static_cast (1)); \ + retval.cidx (0) = 0; \ + retval.cidx (1) = 1; \ + retval.ridx (0) = 0; \ + retval.data (0) = MT_RESULT; \ + } \ + else \ + retval = RET_TYPE (static_cast (1), \ + static_cast (1), \ + static_cast (0)); \ + } \ + else if (nr == 0 && (dim == 0 || dim == -1)) \ + { \ + if (MT_RESULT) \ + { \ retval = RET_TYPE (static_cast (1), nc, nc); \ - retval.cidx (0) = 0; \ - for (octave_idx_type i = 0; i < nc ; i++) \ - { \ - retval.ridx (i) = 0; \ - retval.cidx (i+1) = i+1; \ - retval.data (i) = MT_RESULT; \ - } \ - } \ - else \ - retval = RET_TYPE (static_cast (1), nc, \ - static_cast (0)); \ - } \ - else if (nc == 0 && dim == 1) \ - { \ - if (MT_RESULT) \ - { \ + retval.cidx (0) = 0; \ + for (octave_idx_type i = 0; i < nc ; i++) \ + { \ + retval.ridx (i) = 0; \ + retval.cidx (i+1) = i+1; \ + retval.data (i) = MT_RESULT; \ + } \ + } \ + else \ + retval = RET_TYPE (static_cast (1), nc, \ + static_cast (0)); \ + } \ + else if (nc == 0 && dim == 1) \ + { \ + if (MT_RESULT) \ + { \ retval = RET_TYPE (nr, static_cast (1), nr); \ - retval.cidx (0) = 0; \ - retval.cidx (1) = nr; \ - for (octave_idx_type i = 0; i < nr; i++) \ - { \ - retval.ridx (i) = i; \ - retval.data (i) = MT_RESULT; \ - } \ - } \ - else \ - retval = RET_TYPE (nr, static_cast (1), \ - static_cast (0)); \ - } \ - else \ - retval.resize (nr > 0, nc > 0); \ - \ + retval.cidx (0) = 0; \ + retval.cidx (1) = nr; \ + for (octave_idx_type i = 0; i < nr; i++) \ + { \ + retval.ridx (i) = i; \ + retval.data (i) = MT_RESULT; \ + } \ + } \ + else \ + retval = RET_TYPE (nr, static_cast (1), \ + static_cast (0)); \ + } \ + else \ + retval.resize (nr > 0, nc > 0); \ + \ return retval -#define SPARSE_REDUCTION_OP_ROW_EXPR(OP) \ +#define SPARSE_REDUCTION_OP_ROW_EXPR(OP) \ tmp[ridx (i)] OP data (i) -#define SPARSE_REDUCTION_OP_COL_EXPR(OP) \ +#define SPARSE_REDUCTION_OP_COL_EXPR(OP) \ tmp[j] OP data (i) #define SPARSE_REDUCTION_OP(RET_TYPE, EL_TYPE, OP, INIT_VAL, MT_RESULT) \ - SPARSE_BASE_REDUCTION_OP (RET_TYPE, EL_TYPE, \ - SPARSE_REDUCTION_OP_ROW_EXPR (OP), \ - SPARSE_REDUCTION_OP_COL_EXPR (OP), \ - INIT_VAL, MT_RESULT) + SPARSE_BASE_REDUCTION_OP (RET_TYPE, EL_TYPE, \ + SPARSE_REDUCTION_OP_ROW_EXPR (OP), \ + SPARSE_REDUCTION_OP_COL_EXPR (OP), \ + INIT_VAL, MT_RESULT) // Don't break from this loop if the test succeeds because // we are looping over the rows and not the columns in the inner loop. -#define SPARSE_ANY_ALL_OP_ROW_CODE(TEST_OP, TEST_TRUE_VAL) \ - if (data (i) TEST_OP 0.0) \ +#define SPARSE_ANY_ALL_OP_ROW_CODE(TEST_OP, TEST_TRUE_VAL) \ + if (data (i) TEST_OP 0.0) \ tmp[ridx (i)] = TEST_TRUE_VAL; -#define SPARSE_ANY_ALL_OP_COL_CODE(TEST_OP, TEST_TRUE_VAL) \ - if (data (i) TEST_OP 0.0) \ - { \ - tmp[j] = TEST_TRUE_VAL; \ - break; \ +#define SPARSE_ANY_ALL_OP_COL_CODE(TEST_OP, TEST_TRUE_VAL) \ + if (data (i) TEST_OP 0.0) \ + { \ + tmp[j] = TEST_TRUE_VAL; \ + break; \ } #define SPARSE_ANY_ALL_OP(DIM, INIT_VAL, MT_RESULT, TEST_OP, TEST_TRUE_VAL) \ - SPARSE_BASE_REDUCTION_OP (SparseBoolMatrix, char, \ - SPARSE_ANY_ALL_OP_ROW_CODE (TEST_OP, TEST_TRUE_VAL), \ - SPARSE_ANY_ALL_OP_COL_CODE (TEST_OP, TEST_TRUE_VAL), \ - INIT_VAL, MT_RESULT) + SPARSE_BASE_REDUCTION_OP (SparseBoolMatrix, char, \ + SPARSE_ANY_ALL_OP_ROW_CODE (TEST_OP, TEST_TRUE_VAL), \ + SPARSE_ANY_ALL_OP_COL_CODE (TEST_OP, TEST_TRUE_VAL), \ + INIT_VAL, MT_RESULT) -#define SPARSE_ALL_OP(DIM) \ - if ((rows () == 1 && dim == -1) || dim == 1) \ - return transpose (). all (0). transpose (); \ - else \ - { \ +#define SPARSE_ALL_OP(DIM) \ + if ((rows () == 1 && dim == -1) || dim == 1) \ + return transpose (). all (0). transpose (); \ + else \ + { \ SPARSE_ANY_ALL_OP (DIM, (cidx (j+1) - cidx (j) < nr ? false : true), \ - true, ==, false); \ + true, ==, false); \ } #define SPARSE_ANY_OP(DIM) SPARSE_ANY_ALL_OP (DIM, false, false, !=, true) -#define SPARSE_SPARSE_MUL(RET_TYPE, RET_EL_TYPE, EL_TYPE) \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - octave_idx_type a_nr = a.rows (); \ - octave_idx_type a_nc = a.cols (); \ - \ - if (nr == 1 && nc == 1) \ - { \ - RET_EL_TYPE s = m.elem (0,0); \ - octave_idx_type nz = a.nnz (); \ - RET_TYPE r (a_nr, a_nc, nz); \ - \ - for (octave_idx_type i = 0; i < nz; i++) \ - { \ - octave_quit (); \ - r.data (i) = s * a.data (i); \ - r.ridx (i) = a.ridx (i); \ - } \ - for (octave_idx_type i = 0; i < a_nc + 1; i++) \ - { \ - octave_quit (); \ - r.cidx (i) = a.cidx (i); \ - } \ - \ - r.maybe_compress (true); \ - return r; \ - } \ - else if (a_nr == 1 && a_nc == 1) \ - { \ - RET_EL_TYPE s = a.elem (0,0); \ - octave_idx_type nz = m.nnz (); \ - RET_TYPE r (nr, nc, nz); \ - \ - for (octave_idx_type i = 0; i < nz; i++) \ - { \ - octave_quit (); \ - r.data (i) = m.data (i) * s; \ - r.ridx (i) = m.ridx (i); \ - } \ - for (octave_idx_type i = 0; i < nc + 1; i++) \ - { \ - octave_quit (); \ - r.cidx (i) = m.cidx (i); \ - } \ - \ - r.maybe_compress (true); \ - return r; \ - } \ - else if (nc != a_nr) \ - err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ - else \ - { \ - OCTAVE_LOCAL_BUFFER (octave_idx_type, w, nr); \ - RET_TYPE retval (nr, a_nc, static_cast (0)); \ - for (octave_idx_type i = 0; i < nr; i++) \ - w[i] = 0; \ - retval.xcidx (0) = 0; \ - \ - octave_idx_type nel = 0; \ - \ - for (octave_idx_type i = 0; i < a_nc; i++) \ - { \ - for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ - { \ - octave_idx_type col = a.ridx (j); \ +#define SPARSE_SPARSE_MUL(RET_TYPE, RET_EL_TYPE, EL_TYPE) \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + octave_idx_type a_nr = a.rows (); \ + octave_idx_type a_nc = a.cols (); \ + \ + if (nr == 1 && nc == 1) \ + { \ + RET_EL_TYPE s = m.elem (0,0); \ + octave_idx_type nz = a.nnz (); \ + RET_TYPE r (a_nr, a_nc, nz); \ + \ + for (octave_idx_type i = 0; i < nz; i++) \ + { \ + octave_quit (); \ + r.data (i) = s * a.data (i); \ + r.ridx (i) = a.ridx (i); \ + } \ + for (octave_idx_type i = 0; i < a_nc + 1; i++) \ + { \ + octave_quit (); \ + r.cidx (i) = a.cidx (i); \ + } \ + \ + r.maybe_compress (true); \ + return r; \ + } \ + else if (a_nr == 1 && a_nc == 1) \ + { \ + RET_EL_TYPE s = a.elem (0,0); \ + octave_idx_type nz = m.nnz (); \ + RET_TYPE r (nr, nc, nz); \ + \ + for (octave_idx_type i = 0; i < nz; i++) \ + { \ + octave_quit (); \ + r.data (i) = m.data (i) * s; \ + r.ridx (i) = m.ridx (i); \ + } \ + for (octave_idx_type i = 0; i < nc + 1; i++) \ + { \ + octave_quit (); \ + r.cidx (i) = m.cidx (i); \ + } \ + \ + r.maybe_compress (true); \ + return r; \ + } \ + else if (nc != a_nr) \ + err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ + else \ + { \ + OCTAVE_LOCAL_BUFFER (octave_idx_type, w, nr); \ + RET_TYPE retval (nr, a_nc, static_cast (0)); \ + for (octave_idx_type i = 0; i < nr; i++) \ + w[i] = 0; \ + retval.xcidx (0) = 0; \ + \ + octave_idx_type nel = 0; \ + \ + for (octave_idx_type i = 0; i < a_nc; i++) \ + { \ + for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ + { \ + octave_idx_type col = a.ridx (j); \ for (octave_idx_type k = m.cidx (col) ; k < m.cidx (col+1); k++) \ - { \ - if (w[m.ridx (k)] < i + 1) \ - { \ - w[m.ridx (k)] = i + 1; \ - nel++; \ - } \ - octave_quit (); \ - } \ - } \ - retval.xcidx (i+1) = nel; \ - } \ - \ - if (nel == 0) \ - return RET_TYPE (nr, a_nc); \ - else \ - { \ - for (octave_idx_type i = 0; i < nr; i++) \ - w[i] = 0; \ - \ - OCTAVE_LOCAL_BUFFER (RET_EL_TYPE, Xcol, nr); \ - \ - retval.change_capacity (nel); \ - /* The optimal break-point as estimated from simulations */ \ + { \ + if (w[m.ridx (k)] < i + 1) \ + { \ + w[m.ridx (k)] = i + 1; \ + nel++; \ + } \ + octave_quit (); \ + } \ + } \ + retval.xcidx (i+1) = nel; \ + } \ + \ + if (nel == 0) \ + return RET_TYPE (nr, a_nc); \ + else \ + { \ + for (octave_idx_type i = 0; i < nr; i++) \ + w[i] = 0; \ + \ + OCTAVE_LOCAL_BUFFER (RET_EL_TYPE, Xcol, nr); \ + \ + retval.change_capacity (nel); \ + /* The optimal break-point as estimated from simulations */ \ /* Note that Mergesort is O(nz log(nz)) while searching all */ \ - /* values is O(nr), where nz here is nonzero per row of */ \ - /* length nr. The test itself was then derived from the */ \ + /* values is O(nr), where nz here is nonzero per row of */ \ + /* length nr. The test itself was then derived from the */ \ /* simulation with random square matrices and the observation */ \ - /* of the number of nonzero elements in the output matrix */ \ - /* it was found that the breakpoints were */ \ - /* nr: 500 1000 2000 5000 10000 */ \ - /* nz: 6 25 97 585 2202 */ \ + /* of the number of nonzero elements in the output matrix */ \ + /* it was found that the breakpoints were */ \ + /* nr: 500 1000 2000 5000 10000 */ \ + /* nz: 6 25 97 585 2202 */ \ /* The below is a simplication of the 'polyfit'-ed parameters */ \ - /* to these breakpoints */ \ - octave_idx_type n_per_col = (a_nc > 43000 ? 43000 : \ - (a_nc * a_nc) / 43000); \ - octave_idx_type ii = 0; \ - octave_idx_type *ri = retval.xridx (); \ - octave_sort sort; \ - \ - for (octave_idx_type i = 0; i < a_nc ; i++) \ - { \ - if (retval.xcidx (i+1) - retval.xcidx (i) > n_per_col) \ - { \ + /* to these breakpoints */ \ + octave_idx_type n_per_col = (a_nc > 43000 ? 43000 : \ + (a_nc * a_nc) / 43000); \ + octave_idx_type ii = 0; \ + octave_idx_type *ri = retval.xridx (); \ + octave_sort sort; \ + \ + for (octave_idx_type i = 0; i < a_nc ; i++) \ + { \ + if (retval.xcidx (i+1) - retval.xcidx (i) > n_per_col) \ + { \ for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ - { \ - octave_idx_type col = a.ridx (j); \ - EL_TYPE tmpval = a.data (j); \ - for (octave_idx_type k = m.cidx (col) ; \ - k < m.cidx (col+1); k++) \ - { \ - octave_quit (); \ - octave_idx_type row = m.ridx (k); \ - if (w[row] < i + 1) \ - { \ - w[row] = i + 1; \ - Xcol[row] = tmpval * m.data (k); \ - } \ - else \ - Xcol[row] += tmpval * m.data (k); \ - } \ - } \ - for (octave_idx_type k = 0; k < nr; k++) \ - if (w[k] == i + 1) \ - { \ - retval.xdata (ii) = Xcol[k]; \ - retval.xridx (ii++) = k; \ - } \ - } \ - else \ - { \ + { \ + octave_idx_type col = a.ridx (j); \ + EL_TYPE tmpval = a.data (j); \ + for (octave_idx_type k = m.cidx (col) ; \ + k < m.cidx (col+1); k++) \ + { \ + octave_quit (); \ + octave_idx_type row = m.ridx (k); \ + if (w[row] < i + 1) \ + { \ + w[row] = i + 1; \ + Xcol[row] = tmpval * m.data (k); \ + } \ + else \ + Xcol[row] += tmpval * m.data (k); \ + } \ + } \ + for (octave_idx_type k = 0; k < nr; k++) \ + if (w[k] == i + 1) \ + { \ + retval.xdata (ii) = Xcol[k]; \ + retval.xridx (ii++) = k; \ + } \ + } \ + else \ + { \ for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ - { \ - octave_idx_type col = a.ridx (j); \ - EL_TYPE tmpval = a.data (j); \ - for (octave_idx_type k = m.cidx (col) ; \ - k < m.cidx (col+1); k++) \ - { \ - octave_quit (); \ - octave_idx_type row = m.ridx (k); \ - if (w[row] < i + 1) \ - { \ - w[row] = i + 1; \ - retval.xridx (ii++) = row;\ - Xcol[row] = tmpval * m.data (k); \ - } \ - else \ - Xcol[row] += tmpval * m.data (k); \ - } \ - } \ + { \ + octave_idx_type col = a.ridx (j); \ + EL_TYPE tmpval = a.data (j); \ + for (octave_idx_type k = m.cidx (col) ; \ + k < m.cidx (col+1); k++) \ + { \ + octave_quit (); \ + octave_idx_type row = m.ridx (k); \ + if (w[row] < i + 1) \ + { \ + w[row] = i + 1; \ + retval.xridx (ii++) = row; \ + Xcol[row] = tmpval * m.data (k); \ + } \ + else \ + Xcol[row] += tmpval * m.data (k); \ + } \ + } \ sort.sort (ri + retval.xcidx (i), ii - retval.xcidx (i)); \ for (octave_idx_type k = retval.xcidx (i); k < ii; k++) \ - retval.xdata (k) = Xcol[retval.xridx (k)]; \ - } \ - } \ - retval.maybe_compress (true);\ - return retval; \ - } \ + retval.xdata (k) = Xcol[retval.xridx (k)]; \ + } \ + } \ + retval.maybe_compress (true); \ + return retval; \ + } \ } -#define SPARSE_FULL_MUL(RET_TYPE, EL_TYPE, ZERO) \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - octave_idx_type a_nr = a.rows (); \ - octave_idx_type a_nc = a.cols (); \ - \ - if (nr == 1 && nc == 1) \ - { \ - RET_TYPE retval = m.elem (0,0) * a; \ - return retval; \ - } \ - else if (nc != a_nr) \ - err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ - else \ - { \ - RET_TYPE retval (nr, a_nc, ZERO); \ - \ - for (octave_idx_type i = 0; i < a_nc ; i++) \ - { \ - for (octave_idx_type j = 0; j < a_nr; j++) \ - { \ - octave_quit (); \ - \ - EL_TYPE tmpval = a.elem (j,i); \ +#define SPARSE_FULL_MUL(RET_TYPE, EL_TYPE, ZERO) \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + octave_idx_type a_nr = a.rows (); \ + octave_idx_type a_nc = a.cols (); \ + \ + if (nr == 1 && nc == 1) \ + { \ + RET_TYPE retval = m.elem (0,0) * a; \ + return retval; \ + } \ + else if (nc != a_nr) \ + err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ + else \ + { \ + RET_TYPE retval (nr, a_nc, ZERO); \ + \ + for (octave_idx_type i = 0; i < a_nc ; i++) \ + { \ + for (octave_idx_type j = 0; j < a_nr; j++) \ + { \ + octave_quit (); \ + \ + EL_TYPE tmpval = a.elem (j,i); \ for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \ - retval.elem (m.ridx (k),i) += tmpval * m.data (k); \ - } \ - } \ - return retval; \ + retval.elem (m.ridx (k),i) += tmpval * m.data (k); \ + } \ + } \ + return retval; \ } -#define SPARSE_FULL_TRANS_MUL(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - octave_idx_type a_nr = a.rows (); \ - octave_idx_type a_nc = a.cols (); \ - \ - if (nr == 1 && nc == 1) \ - { \ - RET_TYPE retval = CONJ_OP (m.elem (0,0)) * a; \ - return retval; \ - } \ - else if (nr != a_nr) \ - err_nonconformant ("operator *", nc, nr, a_nr, a_nc); \ - else \ - { \ - RET_TYPE retval (nc, a_nc); \ - \ - for (octave_idx_type i = 0; i < a_nc ; i++) \ - { \ - for (octave_idx_type j = 0; j < nc; j++) \ - { \ - octave_quit (); \ - \ - EL_TYPE acc = ZERO; \ +#define SPARSE_FULL_TRANS_MUL(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + octave_idx_type a_nr = a.rows (); \ + octave_idx_type a_nc = a.cols (); \ + \ + if (nr == 1 && nc == 1) \ + { \ + RET_TYPE retval = CONJ_OP (m.elem (0,0)) * a; \ + return retval; \ + } \ + else if (nr != a_nr) \ + err_nonconformant ("operator *", nc, nr, a_nr, a_nc); \ + else \ + { \ + RET_TYPE retval (nc, a_nc); \ + \ + for (octave_idx_type i = 0; i < a_nc ; i++) \ + { \ + for (octave_idx_type j = 0; j < nc; j++) \ + { \ + octave_quit (); \ + \ + EL_TYPE acc = ZERO; \ for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \ - acc += a.elem (m.ridx (k),i) * CONJ_OP (m.data (k)); \ - retval.xelem (j,i) = acc; \ - } \ - } \ - return retval; \ + acc += a.elem (m.ridx (k),i) * CONJ_OP (m.data (k)); \ + retval.xelem (j,i) = acc; \ + } \ + } \ + return retval; \ } -#define FULL_SPARSE_MUL(RET_TYPE, EL_TYPE, ZERO) \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - octave_idx_type a_nr = a.rows (); \ - octave_idx_type a_nc = a.cols (); \ - \ - if (a_nr == 1 && a_nc == 1) \ - { \ - RET_TYPE retval = m * a.elem (0,0); \ - return retval; \ - } \ - else if (nc != a_nr) \ - err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ - else \ - { \ - RET_TYPE retval (nr, a_nc, ZERO); \ - \ - for (octave_idx_type i = 0; i < a_nc ; i++) \ - { \ - octave_quit (); \ - for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ - { \ - octave_idx_type col = a.ridx (j); \ - EL_TYPE tmpval = a.data (j); \ - \ - for (octave_idx_type k = 0 ; k < nr; k++) \ - retval.xelem (k,i) += tmpval * m.elem (k,col); \ - } \ - } \ - return retval; \ +#define FULL_SPARSE_MUL(RET_TYPE, EL_TYPE, ZERO) \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + octave_idx_type a_nr = a.rows (); \ + octave_idx_type a_nc = a.cols (); \ + \ + if (a_nr == 1 && a_nc == 1) \ + { \ + RET_TYPE retval = m * a.elem (0,0); \ + return retval; \ + } \ + else if (nc != a_nr) \ + err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ + else \ + { \ + RET_TYPE retval (nr, a_nc, ZERO); \ + \ + for (octave_idx_type i = 0; i < a_nc ; i++) \ + { \ + octave_quit (); \ + for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ + { \ + octave_idx_type col = a.ridx (j); \ + EL_TYPE tmpval = a.data (j); \ + \ + for (octave_idx_type k = 0 ; k < nr; k++) \ + retval.xelem (k,i) += tmpval * m.elem (k,col); \ + } \ + } \ + return retval; \ } -#define FULL_SPARSE_MUL_TRANS(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \ - octave_idx_type nr = m.rows (); \ - octave_idx_type nc = m.cols (); \ - \ - octave_idx_type a_nr = a.rows (); \ - octave_idx_type a_nc = a.cols (); \ - \ - if (a_nr == 1 && a_nc == 1) \ - { \ - RET_TYPE retval = m * CONJ_OP (a.elem (0,0)); \ - return retval; \ - } \ - else if (nc != a_nc) \ - err_nonconformant ("operator *", nr, nc, a_nc, a_nr); \ - else \ - { \ - RET_TYPE retval (nr, a_nr, ZERO); \ - \ - for (octave_idx_type i = 0; i < a_nc ; i++) \ - { \ - octave_quit (); \ - for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ - { \ - octave_idx_type col = a.ridx (j); \ - EL_TYPE tmpval = CONJ_OP (a.data (j)); \ - for (octave_idx_type k = 0 ; k < nr; k++) \ - retval.xelem (k,col) += tmpval * m.elem (k,i); \ - } \ - } \ - return retval; \ +#define FULL_SPARSE_MUL_TRANS(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \ + octave_idx_type nr = m.rows (); \ + octave_idx_type nc = m.cols (); \ + \ + octave_idx_type a_nr = a.rows (); \ + octave_idx_type a_nc = a.cols (); \ + \ + if (a_nr == 1 && a_nc == 1) \ + { \ + RET_TYPE retval = m * CONJ_OP (a.elem (0,0)); \ + return retval; \ + } \ + else if (nc != a_nc) \ + err_nonconformant ("operator *", nr, nc, a_nc, a_nr); \ + else \ + { \ + RET_TYPE retval (nr, a_nr, ZERO); \ + \ + for (octave_idx_type i = 0; i < a_nc ; i++) \ + { \ + octave_quit (); \ + for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ + { \ + octave_idx_type col = a.ridx (j); \ + EL_TYPE tmpval = CONJ_OP (a.data (j)); \ + for (octave_idx_type k = 0 ; k < nr; k++) \ + retval.xelem (k,col) += tmpval * m.elem (k,i); \ + } \ + } \ + return retval; \ } #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/operators/mx-inlines.cc --- a/liboctave/operators/mx-inlines.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/operators/mx-inlines.cc Mon Aug 01 12:40:18 2016 -0400 @@ -48,69 +48,106 @@ template inline void mx_inline_fill (size_t n, R *r, S s) throw () -{ for (size_t i = 0; i < n; i++) r[i] = s; } +{ + for (size_t i = 0; i < n; i++) + r[i] = s; +} -#define DEFMXUNOP(F, OP) \ -template \ -inline void F (size_t n, R *r, const X *x) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = OP x[i]; } +#define DEFMXUNOP(F, OP) \ + template \ + inline void F (size_t n, R *r, const X *x) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = OP x[i]; \ + } DEFMXUNOP (mx_inline_uminus, -) -#define DEFMXUNOPEQ(F, OP) \ -template \ -inline void F (size_t n, R *r) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = OP r[i]; } +#define DEFMXUNOPEQ(F, OP) \ + template \ + inline void F (size_t n, R *r) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = OP r[i]; \ + } DEFMXUNOPEQ (mx_inline_uminus2, -) -#define DEFMXUNBOOLOP(F, OP) \ -template \ -inline void F (size_t n, bool *r, const X *x) throw () \ -{ const X zero = X (); for (size_t i = 0; i < n; i++) r[i] = x[i] OP zero; } +#define DEFMXUNBOOLOP(F, OP) \ + template \ + inline void F (size_t n, bool *r, const X *x) throw () \ + { \ + const X zero = X (); \ + for (size_t i = 0; i < n; i++) \ + r[i] = x[i] OP zero; \ + } DEFMXUNBOOLOP (mx_inline_iszero, ==) DEFMXUNBOOLOP (mx_inline_notzero, !=) -#define DEFMXBINOP(F, OP) \ -template \ -inline void F (size_t n, R *r, const X *x, const Y *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = x[i] OP y[i]; } \ -template \ -inline void F (size_t n, R *r, const X *x, Y y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = x[i] OP y; } \ -template \ -inline void F (size_t n, R *r, X x, const Y *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = x OP y[i]; } +#define DEFMXBINOP(F, OP) \ + template \ + inline void F (size_t n, R *r, const X *x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = x[i] OP y[i]; \ + } \ + template \ + inline void F (size_t n, R *r, const X *x, Y y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = x[i] OP y; \ + } \ + template \ + inline void F (size_t n, R *r, X x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = x OP y[i]; \ + } DEFMXBINOP (mx_inline_add, +) DEFMXBINOP (mx_inline_sub, -) DEFMXBINOP (mx_inline_mul, *) DEFMXBINOP (mx_inline_div, /) -#define DEFMXBINOPEQ(F, OP) \ -template \ -inline void F (size_t n, R *r, const X *x) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] OP x[i]; } \ -template \ -inline void F (size_t n, R *r, X x) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] OP x; } +#define DEFMXBINOPEQ(F, OP) \ + template \ + inline void F (size_t n, R *r, const X *x) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] OP x[i]; \ + } \ + template \ + inline void F (size_t n, R *r, X x) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] OP x; \ + } DEFMXBINOPEQ (mx_inline_add2, +=) DEFMXBINOPEQ (mx_inline_sub2, -=) DEFMXBINOPEQ (mx_inline_mul2, *=) DEFMXBINOPEQ (mx_inline_div2, /=) -#define DEFMXCMPOP(F, OP) \ -template \ -inline void F (size_t n, bool *r, const X *x, const Y *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = x[i] OP y[i]; } \ -template \ -inline void F (size_t n, bool *r, const X *x, Y y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = x[i] OP y; } \ -template \ -inline void F (size_t n, bool *r, X x, const Y *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = x OP y[i]; } +#define DEFMXCMPOP(F, OP) \ + template \ + inline void F (size_t n, bool *r, const X *x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = x[i] OP y[i]; \ + } \ + template \ + inline void F (size_t n, bool *r, const X *x, Y y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = x[i] OP y; \ + } \ + template \ + inline void F (size_t n, bool *r, X x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = x OP y[i]; \ + } DEFMXCMPOP (mx_inline_lt, <) DEFMXCMPOP (mx_inline_le, <=) @@ -120,11 +157,26 @@ DEFMXCMPOP (mx_inline_ne, !=) // Convert to logical value, for logical op purposes. -template inline bool logical_value (T x) { return x; } -template inline bool logical_value (const std::complex& x) -{ return x.real () != 0 || x.imag () != 0; } -template inline bool logical_value (const octave_int& x) -{ return x.value (); } +template +inline bool +logical_value (T x) +{ + return x; +} + +template +inline bool +logical_value (const std::complex& x) +{ + return x.real () != 0 || x.imag () != 0; +} + +template +inline bool +logical_value (const octave_int& x) +{ + return x.value (); +} template void mx_inline_not (size_t n, bool *r, const X* x) throw () @@ -135,30 +187,32 @@ inline void mx_inline_not2 (size_t n, bool *r) throw () { - for (size_t i = 0; i < n; i++) r[i] = ! r[i]; + for (size_t i = 0; i < n; i++) + r[i] = ! r[i]; } -#define DEFMXBOOLOP(F, NOT1, OP, NOT2) \ -template \ -inline void F (size_t n, bool *r, const X *x, const Y *y) throw () \ -{ \ - for (size_t i = 0; i < n; i++) \ - r[i] = (NOT1 logical_value (x[i])) OP (NOT2 logical_value (y[i])); \ -} \ -template \ -inline void F (size_t n, bool *r, const X *x, Y y) throw () \ -{ \ - const bool yy = (NOT2 logical_value (y)); \ - for (size_t i = 0; i < n; i++) \ - r[i] = (NOT1 logical_value (x[i])) OP yy; \ -} \ -template \ -inline void F (size_t n, bool *r, X x, const Y *y) throw () \ -{ \ - const bool xx = (NOT1 logical_value (x)); \ - for (size_t i = 0; i < n; i++) \ - r[i] = xx OP (NOT2 logical_value (y[i])); \ -} +#define DEFMXBOOLOP(F, NOT1, OP, NOT2) \ + template \ + inline void F (size_t n, bool *r, const X *x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = ((NOT1 logical_value (x[i])) \ + OP (NOT2 logical_value (y[i]))); \ + } \ + template \ + inline void F (size_t n, bool *r, const X *x, Y y) throw () \ + { \ + const bool yy = (NOT2 logical_value (y)); \ + for (size_t i = 0; i < n; i++) \ + r[i] = (NOT1 logical_value (x[i])) OP yy; \ + } \ + template \ + inline void F (size_t n, bool *r, X x, const Y *y) throw () \ + { \ + const bool xx = (NOT1 logical_value (x)); \ + for (size_t i = 0; i < n; i++) \ + r[i] = xx OP (NOT2 logical_value (y[i])); \ + } DEFMXBOOLOP (mx_inline_and, , &, ) DEFMXBOOLOP (mx_inline_or, , |, ) @@ -167,16 +221,19 @@ DEFMXBOOLOP (mx_inline_and_not, , &, !) DEFMXBOOLOP (mx_inline_or_not, , |, !) -#define DEFMXBOOLOPEQ(F, OP) \ -template \ -inline void F (size_t n, bool *r, const X *x) throw () \ -{ \ - for (size_t i = 0; i < n; i++) \ - r[i] OP logical_value (x[i]); \ -} \ -template \ -inline void F (size_t n, bool *r, X x) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] OP x; } +#define DEFMXBOOLOPEQ(F, OP) \ + template \ + inline void F (size_t n, bool *r, const X *x) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] OP logical_value (x[i]); \ + } \ + template \ + inline void F (size_t n, bool *r, X x) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] OP x; \ + } DEFMXBOOLOPEQ (mx_inline_and2, &=) DEFMXBOOLOPEQ (mx_inline_or2, |=) @@ -246,51 +303,72 @@ return true; } -#define DEFMXMAPPER(F, FUN) \ -template \ -inline void F (size_t n, T *r, const T *x) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x[i]); } +#define DEFMXMAPPER(F, FUN) \ + template \ + inline void F (size_t n, T *r, const T *x) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x[i]); \ + } template inline void mx_inline_real (size_t n, T *r, const std::complex* x) throw () -{ for (size_t i = 0; i < n; i++) r[i] = x[i].real (); } +{ + for (size_t i = 0; i < n; i++) + r[i] = x[i].real (); +} + template inline void mx_inline_imag (size_t n, T *r, const std::complex* x) throw () -{ for (size_t i = 0; i < n; i++) r[i] = x[i].imag (); } +{ + for (size_t i = 0; i < n; i++) + r[i] = x[i].imag (); +} // Pairwise minimums/maximums -#define DEFMXMAPPER2(F, FUN) \ -template \ -inline void F (size_t n, T *r, const T *x, const T *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x[i], y[i]); } \ -template \ -inline void F (size_t n, T *r, const T *x, T y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x[i], y); } \ -template \ -inline void F (size_t n, T *r, T x, const T *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x, y[i]); } +#define DEFMXMAPPER2(F, FUN) \ + template \ + inline void F (size_t n, T *r, const T *x, const T *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x[i], y[i]); \ + } \ + template \ + inline void F (size_t n, T *r, const T *x, T y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x[i], y); \ + } \ + template \ + inline void F (size_t n, T *r, T x, const T *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x, y[i]); \ + } DEFMXMAPPER2 (mx_inline_xmin, octave::math::min) DEFMXMAPPER2 (mx_inline_xmax, octave::math::max) // Specialize array-scalar max/min -#define DEFMINMAXSPEC(T, F, OP) \ -template <> \ -inline void F (size_t n, T *r, const T *x, T y) throw () \ -{ \ - if (octave::math::isnan (y)) \ - std::memcpy (r, x, n * sizeof (T)); \ - else \ - for (size_t i = 0; i < n; i++) r[i] = (x[i] OP y) ? x[i] : y; \ -} \ -template <> \ -inline void F (size_t n, T *r, T x, const T *y) throw () \ -{ \ - if (octave::math::isnan (x)) \ - std::memcpy (r, y, n * sizeof (T)); \ - else \ - for (size_t i = 0; i < n; i++) r[i] = (y[i] OP x) ? y[i] : x; \ -} +#define DEFMINMAXSPEC(T, F, OP) \ + template <> \ + inline void F (size_t n, T *r, const T *x, T y) throw () \ + { \ + if (octave::math::isnan (y)) \ + std::memcpy (r, x, n * sizeof (T)); \ + else \ + for (size_t i = 0; i < n; i++) \ + r[i] = (x[i] OP y) ? x[i] : y; \ + } \ + template <> \ + inline void F (size_t n, T *r, T x, const T *y) throw () \ + { \ + if (octave::math::isnan (x)) \ + std::memcpy (r, y, n * sizeof (T)); \ + else \ + for (size_t i = 0; i < n; i++) \ + r[i] = (y[i] OP x) ? y[i] : x; \ + } DEFMINMAXSPEC (double, mx_inline_xmin, <=) DEFMINMAXSPEC (double, mx_inline_xmax, >=) @@ -298,16 +376,25 @@ DEFMINMAXSPEC (float, mx_inline_xmax, >=) // Pairwise power -#define DEFMXMAPPER2X(F, FUN) \ -template \ -inline void F (size_t n, R *r, const X *x, const Y *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x[i], y[i]); } \ -template \ -inline void F (size_t n, R *r, const X *x, Y y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x[i], y); } \ -template \ -inline void F (size_t n, R *r, X x, const Y *y) throw () \ -{ for (size_t i = 0; i < n; i++) r[i] = FUN (x, y[i]); } +#define DEFMXMAPPER2X(F, FUN) \ + template \ + inline void F (size_t n, R *r, const X *x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x[i], y[i]); \ + } \ + template \ + inline void F (size_t n, R *r, const X *x, Y y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x[i], y); \ + } \ + template \ + inline void F (size_t n, R *r, X x, const Y *y) throw () \ + { \ + for (size_t i = 0; i < n; i++) \ + r[i] = FUN (x, y[i]); \ + } // Let the compiler decide which pow to use, whichever best matches the // arguments provided. @@ -318,11 +405,17 @@ // The function is a template parameter to enable inlining. template inline void mx_inline_map (size_t n, R *r, const X *x) throw () -{ for (size_t i = 0; i < n; i++) r[i] = fun (x[i]); } +{ + for (size_t i = 0; i < n; i++) + r[i] = fun (x[i]); +} template inline void mx_inline_map (size_t n, R *r, const X *x) throw () -{ for (size_t i = 0; i < n; i++) r[i] = fun (x[i]); } +{ + for (size_t i = 0; i < n; i++) + r[i] = fun (x[i]); +} // Appliers. Since these call the operation just once, we pass it as // a pointer, to allow the compiler reduce number of instances. @@ -456,66 +549,166 @@ // magic to avoid underflows, which we don't need here. template inline T cabsq (const std::complex& c) -{ return c.real () * c.real () + c.imag () * c.imag (); } +{ return c.real () * c.real () + c.imag () * c.imag (); +} // default. works for integers and bool. template -inline bool xis_true (T x) { return x; } +inline bool +xis_true (T x) +{ + return x; +} + template -inline bool xis_false (T x) { return ! x; } +inline bool +xis_false (T x) +{ + return ! x; +} + // for octave_ints template -inline bool xis_true (const octave_int& x) { return x.value (); } +inline bool +xis_true (const octave_int& x) +{ + return x.value (); +} + template -inline bool xis_false (const octave_int& x) { return ! x.value (); } +inline bool +xis_false (const octave_int& x) +{ + return ! x.value (); +} + // for reals, we want to ignore NaNs. -inline bool xis_true (double x) { return ! octave::math::isnan (x) && x != 0.0; } -inline bool xis_false (double x) { return x == 0.0; } -inline bool xis_true (float x) { return ! octave::math::isnan (x) && x != 0.0f; } -inline bool xis_false (float x) { return x == 0.0f; } +inline bool +xis_true (double x) +{ + return ! octave::math::isnan (x) && x != 0.0; +} + +inline bool +xis_false (double x) +{ + return x == 0.0; +} + +inline bool +xis_true (float x) +{ + return ! octave::math::isnan (x) && x != 0.0f; +} + +inline bool +xis_false (float x) +{ + return x == 0.0f; +} + // Ditto for complex. -inline bool xis_true (const Complex& x) { return ! octave::math::isnan (x) && x != 0.0; } -inline bool xis_false (const Complex& x) { return x == 0.0; } -inline bool xis_true (const FloatComplex& x) { return ! octave::math::isnan (x) && x != 0.0f; } -inline bool xis_false (const FloatComplex& x) { return x == 0.0f; } +inline bool +xis_true (const Complex& x) +{ + return ! octave::math::isnan (x) && x != 0.0; +} + +inline bool +xis_false (const Complex& x) +{ + return x == 0.0; +} + +inline bool +xis_true (const FloatComplex& x) +{ + return ! octave::math::isnan (x) && x != 0.0f; +} + +inline bool +xis_false (const FloatComplex& x) +{ + return x == 0.0f; +} #define OP_RED_SUM(ac, el) ac += el #define OP_RED_PROD(ac, el) ac *= el #define OP_RED_SUMSQ(ac, el) ac += el*el #define OP_RED_SUMSQC(ac, el) ac += cabsq (el) -inline void op_dble_prod (double& ac, float el) -{ ac *= el; } -inline void op_dble_prod (Complex& ac, const FloatComplex& el) -{ ac *= el; } // FIXME: guaranteed? +inline void +op_dble_prod (double& ac, float el) +{ + ac *= el; +} + +// FIXME: guaranteed? +inline void +op_dble_prod (Complex& ac, const FloatComplex& el) +{ + ac *= el; +} + template -inline void op_dble_prod (double& ac, const octave_int& el) -{ ac *= el.double_value (); } +inline void +op_dble_prod (double& ac, const octave_int& el) +{ + ac *= el.double_value (); +} -inline void op_dble_sum (double& ac, float el) -{ ac += el; } -inline void op_dble_sum (Complex& ac, const FloatComplex& el) -{ ac += el; } // FIXME: guaranteed? +inline void +op_dble_sum (double& ac, float el) +{ + ac += el; +} + +// FIXME: guaranteed? +inline void +op_dble_sum (Complex& ac, const FloatComplex& el) +{ + ac += el; +} + template -inline void op_dble_sum (double& ac, const octave_int& el) -{ ac += el.double_value (); } +inline void +op_dble_sum (double& ac, const octave_int& el) +{ + ac += el.double_value (); +} // The following two implement a simple short-circuiting. -#define OP_RED_ANYC(ac, el) if (xis_true (el)) { ac = true; break; } else continue -#define OP_RED_ALLC(ac, el) if (xis_false (el)) { ac = false; break; } else continue +#define OP_RED_ANYC(ac, el) \ + if (xis_true (el)) \ + { \ + ac = true; \ + break; \ + } \ + else \ + continue -#define OP_RED_FCN(F, TSRC, TRES, OP, ZERO) \ -template \ -inline TRES \ -F (const TSRC* v, octave_idx_type n) \ -{ \ - TRES ac = ZERO; \ - for (octave_idx_type i = 0; i < n; i++) \ - OP(ac, v[i]); \ - return ac; \ -} +#define OP_RED_ALLC(ac, el) \ + if (xis_false (el)) \ + { \ + ac = false; \ + break; \ + } \ + else \ + continue -#define PROMOTE_DOUBLE(T) typename subst_template_param::type +#define OP_RED_FCN(F, TSRC, TRES, OP, ZERO) \ + template \ + inline TRES \ + F (const TSRC* v, octave_idx_type n) \ + { \ + TRES ac = ZERO; \ + for (octave_idx_type i = 0; i < n; i++) \ + OP(ac, v[i]); \ + return ac; \ + } + +#define PROMOTE_DOUBLE(T) \ + typename subst_template_param::type OP_RED_FCN (mx_inline_sum, T, T, OP_RED_SUM, 0) OP_RED_FCN (mx_inline_dsum, T, PROMOTE_DOUBLE(T), op_dble_sum, 0.0) @@ -527,20 +720,20 @@ OP_RED_FCN (mx_inline_any, T, bool, OP_RED_ANYC, false) OP_RED_FCN (mx_inline_all, T, bool, OP_RED_ALLC, true) -#define OP_RED_FCN2(F, TSRC, TRES, OP, ZERO) \ -template \ -inline void \ -F (const TSRC* v, TRES *r, octave_idx_type m, octave_idx_type n) \ -{ \ - for (octave_idx_type i = 0; i < m; i++) \ - r[i] = ZERO; \ - for (octave_idx_type j = 0; j < n; j++) \ - { \ - for (octave_idx_type i = 0; i < m; i++) \ - OP(r[i], v[i]); \ - v += m; \ - } \ -} +#define OP_RED_FCN2(F, TSRC, TRES, OP, ZERO) \ + template \ + inline void \ + F (const TSRC* v, TRES *r, octave_idx_type m, octave_idx_type n) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + r[i] = ZERO; \ + for (octave_idx_type j = 0; j < n; j++) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + OP(r[i], v[i]); \ + v += m; \ + } \ + } OP_RED_FCN2 (mx_inline_sum, T, T, OP_RED_SUM, 0) OP_RED_FCN2 (mx_inline_dsum, T, PROMOTE_DOUBLE(T), op_dble_sum, 0.0) @@ -561,61 +754,61 @@ // algorithm will achieve both, at the cost of a temporary octave_idx_type // array. -#define OP_ROW_SHORT_CIRCUIT(F, PRED, ZERO) \ -template \ -inline void \ -F (const T* v, bool *r, octave_idx_type m, octave_idx_type n) \ -{ \ - if (n <= 8) \ - return F ## _r (v, r, m, n); \ - \ - /* FIXME: it may be sub-optimal to allocate the buffer here. */ \ - OCTAVE_LOCAL_BUFFER (octave_idx_type, iact, m); \ - for (octave_idx_type i = 0; i < m; i++) iact[i] = i; \ - octave_idx_type nact = m; \ - for (octave_idx_type j = 0; j < n; j++) \ - { \ - octave_idx_type k = 0; \ - for (octave_idx_type i = 0; i < nact; i++) \ - { \ - octave_idx_type ia = iact[i]; \ - if (! PRED (v[ia])) \ - iact[k++] = ia; \ - } \ - nact = k; \ - v += m; \ - } \ - for (octave_idx_type i = 0; i < m; i++) r[i] = ! ZERO; \ - for (octave_idx_type i = 0; i < nact; i++) r[iact[i]] = ZERO; \ -} +#define OP_ROW_SHORT_CIRCUIT(F, PRED, ZERO) \ + template \ + inline void \ + F (const T* v, bool *r, octave_idx_type m, octave_idx_type n) \ + { \ + if (n <= 8) \ + return F ## _r (v, r, m, n); \ + \ + /* FIXME: it may be sub-optimal to allocate the buffer here. */ \ + OCTAVE_LOCAL_BUFFER (octave_idx_type, iact, m); \ + for (octave_idx_type i = 0; i < m; i++) iact[i] = i; \ + octave_idx_type nact = m; \ + for (octave_idx_type j = 0; j < n; j++) \ + { \ + octave_idx_type k = 0; \ + for (octave_idx_type i = 0; i < nact; i++) \ + { \ + octave_idx_type ia = iact[i]; \ + if (! PRED (v[ia])) \ + iact[k++] = ia; \ + } \ + nact = k; \ + v += m; \ + } \ + for (octave_idx_type i = 0; i < m; i++) r[i] = ! ZERO; \ + for (octave_idx_type i = 0; i < nact; i++) r[iact[i]] = ZERO; \ + } OP_ROW_SHORT_CIRCUIT (mx_inline_any, xis_true, false) OP_ROW_SHORT_CIRCUIT (mx_inline_all, xis_false, true) -#define OP_RED_FCNN(F, TSRC, TRES) \ -template \ -inline void \ -F (const TSRC *v, TRES *r, octave_idx_type l, \ - octave_idx_type n, octave_idx_type u) \ -{ \ - if (l == 1) \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - r[i] = F (v, n); \ - v += n; \ - } \ - } \ - else \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, l, n); \ - v += l*n; \ - r += l; \ - } \ - } \ -} +#define OP_RED_FCNN(F, TSRC, TRES) \ + template \ + inline void \ + F (const TSRC *v, TRES *r, octave_idx_type l, \ + octave_idx_type n, octave_idx_type u) \ + { \ + if (l == 1) \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + r[i] = F (v, n); \ + v += n; \ + } \ + } \ + else \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, l, n); \ + v += l*n; \ + r += l; \ + } \ + } \ + } OP_RED_FCNN (mx_inline_sum, T, T) OP_RED_FCNN (mx_inline_dsum, T, PROMOTE_DOUBLE(T)) @@ -627,109 +820,122 @@ OP_RED_FCNN (mx_inline_any, T, bool) OP_RED_FCNN (mx_inline_all, T, bool) -#define OP_CUM_FCN(F, TSRC, TRES, OP) \ -template \ -inline void \ -F (const TSRC *v, TRES *r, octave_idx_type n) \ -{ \ - if (n) \ - { \ - TRES t = r[0] = v[0]; \ - for (octave_idx_type i = 1; i < n; i++) \ - r[i] = t = t OP v[i]; \ - } \ -} +#define OP_CUM_FCN(F, TSRC, TRES, OP) \ + template \ + inline void \ + F (const TSRC *v, TRES *r, octave_idx_type n) \ + { \ + if (n) \ + { \ + TRES t = r[0] = v[0]; \ + for (octave_idx_type i = 1; i < n; i++) \ + r[i] = t = t OP v[i]; \ + } \ + } OP_CUM_FCN (mx_inline_cumsum, T, T, +) OP_CUM_FCN (mx_inline_cumprod, T, T, *) OP_CUM_FCN (mx_inline_cumcount, bool, T, +) -#define OP_CUM_FCN2(F, TSRC, TRES, OP) \ -template \ -inline void \ -F (const TSRC *v, TRES *r, octave_idx_type m, octave_idx_type n) \ -{ \ - if (n) \ - { \ - for (octave_idx_type i = 0; i < m; i++) \ - r[i] = v[i]; \ - const T *r0 = r; \ - for (octave_idx_type j = 1; j < n; j++) \ - { \ - r += m; v += m; \ - for (octave_idx_type i = 0; i < m; i++) \ - r[i] = r0[i] OP v[i]; \ - r0 += m; \ - } \ - } \ -} +#define OP_CUM_FCN2(F, TSRC, TRES, OP) \ + template \ + inline void \ + F (const TSRC *v, TRES *r, octave_idx_type m, octave_idx_type n) \ + { \ + if (n) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + r[i] = v[i]; \ + const T *r0 = r; \ + for (octave_idx_type j = 1; j < n; j++) \ + { \ + r += m; v += m; \ + for (octave_idx_type i = 0; i < m; i++) \ + r[i] = r0[i] OP v[i]; \ + r0 += m; \ + } \ + } \ + } OP_CUM_FCN2 (mx_inline_cumsum, T, T, +) OP_CUM_FCN2 (mx_inline_cumprod, T, T, *) OP_CUM_FCN2 (mx_inline_cumcount, bool, T, +) -#define OP_CUM_FCNN(F, TSRC, TRES) \ -template \ -inline void \ -F (const TSRC *v, TRES *r, octave_idx_type l, \ - octave_idx_type n, octave_idx_type u) \ -{ \ - if (l == 1) \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, n); \ - v += n; r += n; \ - } \ - } \ - else \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, l, n); \ - v += l*n; \ - r += l*n; \ - } \ - } \ -} +#define OP_CUM_FCNN(F, TSRC, TRES) \ + template \ + inline void \ + F (const TSRC *v, TRES *r, octave_idx_type l, \ + octave_idx_type n, octave_idx_type u) \ + { \ + if (l == 1) \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, n); \ + v += n; \ + r += n; \ + } \ + } \ + else \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, l, n); \ + v += l*n; \ + r += l*n; \ + } \ + } \ + } OP_CUM_FCNN (mx_inline_cumsum, T, T) OP_CUM_FCNN (mx_inline_cumprod, T, T) OP_CUM_FCNN (mx_inline_cumcount, bool, T) -#define OP_MINMAX_FCN(F, OP) \ -template \ -void F (const T *v, T *r, octave_idx_type n) \ -{ \ - if (! n) return; \ - T tmp = v[0]; \ - octave_idx_type i = 1; \ - if (octave::math::isnan (tmp)) \ - { \ - for (; i < n && octave::math::isnan (v[i]); i++) ; \ - if (i < n) tmp = v[i]; \ - } \ - for (; i < n; i++) \ - if (v[i] OP tmp) tmp = v[i]; \ - *r = tmp; \ -} \ -template \ -void F (const T *v, T *r, octave_idx_type *ri, octave_idx_type n) \ -{ \ - if (! n) return; \ - T tmp = v[0]; \ - octave_idx_type tmpi = 0; \ - octave_idx_type i = 1; \ - if (octave::math::isnan (tmp)) \ - { \ - for (; i < n && octave::math::isnan (v[i]); i++) ; \ - if (i < n) { tmp = v[i]; tmpi = i; } \ - } \ - for (; i < n; i++) \ - if (v[i] OP tmp) { tmp = v[i]; tmpi = i; }\ - *r = tmp; \ - *ri = tmpi; \ -} +#define OP_MINMAX_FCN(F, OP) \ + template \ + void F (const T *v, T *r, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + T tmp = v[0]; \ + octave_idx_type i = 1; \ + if (octave::math::isnan (tmp)) \ + { \ + for (; i < n && octave::math::isnan (v[i]); i++) ; \ + if (i < n) \ + tmp = v[i]; \ + } \ + for (; i < n; i++) \ + if (v[i] OP tmp) \ + tmp = v[i]; \ + *r = tmp; \ + } \ + template \ + void F (const T *v, T *r, octave_idx_type *ri, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + T tmp = v[0]; \ + octave_idx_type tmpi = 0; \ + octave_idx_type i = 1; \ + if (octave::math::isnan (tmp)) \ + { \ + for (; i < n && octave::math::isnan (v[i]); i++) ; \ + if (i < n) \ + { \ + tmp = v[i]; \ + tmpi = i; \ + } \ + } \ + for (; i < n; i++) \ + if (v[i] OP tmp) \ + { \ + tmp = v[i]; \ + tmpi = i; \ + } \ + *r = tmp; \ + *ri = tmpi; \ + } OP_MINMAX_FCN (mx_inline_min, <) OP_MINMAX_FCN (mx_inline_max, >) @@ -738,173 +944,220 @@ // for NaNs until we detect that no row will yield a NaN, in which case we // proceed to a faster code. -#define OP_MINMAX_FCN2(F, OP) \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type m, octave_idx_type n) \ -{ \ - if (! n) return; \ - bool nan = false; \ - octave_idx_type j = 0; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - r[i] = v[i]; \ - if (octave::math::isnan (v[i])) nan = true; \ - } \ - j++; v += m; \ - while (nan && j < n) \ - { \ - nan = false; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - if (octave::math::isnan (v[i])) \ - nan = true; \ - else if (octave::math::isnan (r[i]) || v[i] OP r[i]) \ - r[i] = v[i]; \ - } \ - j++; v += m; \ - } \ - while (j < n) \ - { \ - for (octave_idx_type i = 0; i < m; i++) \ - if (v[i] OP r[i]) r[i] = v[i]; \ - j++; v += m; \ - } \ -} \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type *ri, \ - octave_idx_type m, octave_idx_type n) \ -{ \ - if (! n) return; \ - bool nan = false; \ - octave_idx_type j = 0; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - r[i] = v[i]; ri[i] = j; \ - if (octave::math::isnan (v[i])) nan = true; \ - } \ - j++; v += m; \ - while (nan && j < n) \ - { \ - nan = false; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - if (octave::math::isnan (v[i])) \ - nan = true; \ - else if (octave::math::isnan (r[i]) || v[i] OP r[i]) \ - { r[i] = v[i]; ri[i] = j; } \ - } \ - j++; v += m; \ - } \ - while (j < n) \ - { \ - for (octave_idx_type i = 0; i < m; i++) \ - if (v[i] OP r[i]) \ - { r[i] = v[i]; ri[i] = j; } \ - j++; v += m; \ - } \ -} +#define OP_MINMAX_FCN2(F, OP) \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type m, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + bool nan = false; \ + octave_idx_type j = 0; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + r[i] = v[i]; \ + if (octave::math::isnan (v[i])) \ + nan = true; \ + } \ + j++; \ + v += m; \ + while (nan && j < n) \ + { \ + nan = false; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + if (octave::math::isnan (v[i])) \ + nan = true; \ + else if (octave::math::isnan (r[i]) || v[i] OP r[i]) \ + r[i] = v[i]; \ + } \ + j++; \ + v += m; \ + } \ + while (j < n) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + if (v[i] OP r[i]) \ + r[i] = v[i]; \ + j++; \ + v += m; \ + } \ + } \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type *ri, \ + octave_idx_type m, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + bool nan = false; \ + octave_idx_type j = 0; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + r[i] = v[i]; \ + ri[i] = j; \ + if (octave::math::isnan (v[i])) \ + nan = true; \ + } \ + j++; \ + v += m; \ + while (nan && j < n) \ + { \ + nan = false; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + if (octave::math::isnan (v[i])) \ + nan = true; \ + else if (octave::math::isnan (r[i]) || v[i] OP r[i]) \ + { \ + r[i] = v[i]; \ + ri[i] = j; \ + } \ + } \ + j++; \ + v += m; \ + } \ + while (j < n) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + if (v[i] OP r[i]) \ + { \ + r[i] = v[i]; \ + ri[i] = j; \ + } \ + j++; \ + v += m; \ + } \ + } OP_MINMAX_FCN2 (mx_inline_min, <) OP_MINMAX_FCN2 (mx_inline_max, >) -#define OP_MINMAX_FCNN(F) \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type l, \ - octave_idx_type n, octave_idx_type u) \ -{ \ - if (! n) return; \ - if (l == 1) \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, n); \ - v += n; r++; \ - } \ - } \ - else \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, l, n); \ - v += l*n; \ - r += l; \ - } \ - } \ -} \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type *ri, \ - octave_idx_type l, octave_idx_type n, octave_idx_type u) \ -{ \ - if (! n) return; \ - if (l == 1) \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, ri, n); \ - v += n; r++; ri++; \ - } \ - } \ - else \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, ri, l, n); \ - v += l*n; \ - r += l; ri += l; \ - } \ - } \ -} +#define OP_MINMAX_FCNN(F) \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type l, \ + octave_idx_type n, octave_idx_type u) \ + { \ + if (! n) \ + return; \ + if (l == 1) \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, n); \ + v += n; \ + r++; \ + } \ + } \ + else \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, l, n); \ + v += l*n; \ + r += l; \ + } \ + } \ + } \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type *ri, \ + octave_idx_type l, octave_idx_type n, octave_idx_type u) \ + { \ + if (! n) return; \ + if (l == 1) \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, ri, n); \ + v += n; \ + r++; \ + ri++; \ + } \ + } \ + else \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, ri, l, n); \ + v += l*n; \ + r += l; \ + ri += l; \ + } \ + } \ + } OP_MINMAX_FCNN (mx_inline_min) OP_MINMAX_FCNN (mx_inline_max) -#define OP_CUMMINMAX_FCN(F, OP) \ -template \ -void F (const T *v, T *r, octave_idx_type n) \ -{ \ - if (! n) return; \ - T tmp = v[0]; \ - octave_idx_type i = 1; \ - octave_idx_type j = 0; \ - if (octave::math::isnan (tmp)) \ - { \ - for (; i < n && octave::math::isnan (v[i]); i++) ; \ - for (; j < i; j++) r[j] = tmp; \ - if (i < n) tmp = v[i]; \ - } \ - for (; i < n; i++) \ - if (v[i] OP tmp) \ - { \ - for (; j < i; j++) r[j] = tmp; \ - tmp = v[i]; \ - } \ - for (; j < i; j++) r[j] = tmp; \ -} \ -template \ -void F (const T *v, T *r, octave_idx_type *ri, octave_idx_type n) \ -{ \ - if (! n) return; \ - T tmp = v[0]; octave_idx_type tmpi = 0; \ - octave_idx_type i = 1; \ - octave_idx_type j = 0; \ - if (octave::math::isnan (tmp)) \ - { \ - for (; i < n && octave::math::isnan (v[i]); i++) ; \ - for (; j < i; j++) { r[j] = tmp; ri[j] = tmpi; } \ - if (i < n) { tmp = v[i]; tmpi = i; } \ - } \ - for (; i < n; i++) \ - if (v[i] OP tmp) \ - { \ - for (; j < i; j++) { r[j] = tmp; ri[j] = tmpi; } \ - tmp = v[i]; tmpi = i; \ - } \ - for (; j < i; j++) { r[j] = tmp; ri[j] = tmpi; } \ -} +#define OP_CUMMINMAX_FCN(F, OP) \ + template \ + void F (const T *v, T *r, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + T tmp = v[0]; \ + octave_idx_type i = 1; \ + octave_idx_type j = 0; \ + if (octave::math::isnan (tmp)) \ + { \ + for (; i < n && octave::math::isnan (v[i]); i++) ; \ + for (; j < i; j++) \ + r[j] = tmp; \ + if (i < n) \ + tmp = v[i]; \ + } \ + for (; i < n; i++) \ + if (v[i] OP tmp) \ + { \ + for (; j < i; j++) \ + r[j] = tmp; \ + tmp = v[i]; \ + } \ + for (; j < i; j++) \ + r[j] = tmp; \ + } \ + template \ + void F (const T *v, T *r, octave_idx_type *ri, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + T tmp = v[0]; \ + octave_idx_type tmpi = 0; \ + octave_idx_type i = 1; \ + octave_idx_type j = 0; \ + if (octave::math::isnan (tmp)) \ + { \ + for (; i < n && octave::math::isnan (v[i]); i++) ; \ + for (; j < i; j++) \ + { \ + r[j] = tmp; \ + ri[j] = tmpi; \ + } \ + if (i < n) \ + { \ + tmp = v[i]; \ + tmpi = i; \ + } \ + } \ + for (; i < n; i++) \ + if (v[i] OP tmp) \ + { \ + for (; j < i; j++) \ + { \ + r[j] = tmp; \ + ri[j] = tmpi; \ + } \ + tmp = v[i]; \ + tmpi = i; \ + } \ + for (; j < i; j++) \ + { \ + r[j] = tmp; \ + ri[j] = tmpi; \ + } \ + } OP_CUMMINMAX_FCN (mx_inline_cummin, <) OP_CUMMINMAX_FCN (mx_inline_cummax, >) @@ -913,137 +1166,191 @@ // for NaNs until we detect that no row will yield a NaN, in which case we // proceed to a faster code. -#define OP_CUMMINMAX_FCN2(F, OP) \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type m, octave_idx_type n) \ -{ \ - if (! n) return; \ - bool nan = false; \ - const T *r0; \ - octave_idx_type j = 0; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - r[i] = v[i]; \ - if (octave::math::isnan (v[i])) nan = true; \ - } \ - j++; v += m; r0 = r; r += m; \ - while (nan && j < n) \ - { \ - nan = false; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - if (octave::math::isnan (v[i])) \ - { r[i] = r0[i]; nan = true; } \ - else if (octave::math::isnan (r0[i]) || v[i] OP r0[i]) \ - r[i] = v[i]; \ - else \ - r[i] = r0[i]; \ - } \ - j++; v += m; r0 = r; r += m; \ - } \ - while (j < n) \ - { \ - for (octave_idx_type i = 0; i < m; i++) \ - if (v[i] OP r0[i]) \ - r[i] = v[i]; \ - else \ - r[i] = r0[i]; \ - j++; v += m; r0 = r; r += m; \ - } \ -} \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type *ri, \ - octave_idx_type m, octave_idx_type n) \ -{ \ - if (! n) return; \ - bool nan = false; \ - const T *r0; const octave_idx_type *r0i; \ - octave_idx_type j = 0; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - r[i] = v[i]; ri[i] = 0; \ - if (octave::math::isnan (v[i])) nan = true; \ - } \ - j++; v += m; r0 = r; r += m; r0i = ri; ri += m; \ - while (nan && j < n) \ - { \ - nan = false; \ - for (octave_idx_type i = 0; i < m; i++) \ - { \ - if (octave::math::isnan (v[i])) \ - { r[i] = r0[i]; ri[i] = r0i[i]; nan = true; } \ - else if (octave::math::isnan (r0[i]) || v[i] OP r0[i]) \ - { r[i] = v[i]; ri[i] = j; }\ - else \ - { r[i] = r0[i]; ri[i] = r0i[i]; }\ - } \ - j++; v += m; r0 = r; r += m; r0i = ri; ri += m; \ - } \ - while (j < n) \ - { \ - for (octave_idx_type i = 0; i < m; i++) \ - if (v[i] OP r0[i]) \ - { r[i] = v[i]; ri[i] = j; } \ - else \ - { r[i] = r0[i]; ri[i] = r0i[i]; } \ - j++; v += m; r0 = r; r += m; r0i = ri; ri += m; \ - } \ -} +#define OP_CUMMINMAX_FCN2(F, OP) \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type m, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + bool nan = false; \ + const T *r0; \ + octave_idx_type j = 0; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + r[i] = v[i]; \ + if (octave::math::isnan (v[i])) \ + nan = true; \ + } \ + j++; \ + v += m; \ + r0 = r; \ + r += m; \ + while (nan && j < n) \ + { \ + nan = false; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + if (octave::math::isnan (v[i])) \ + { \ + r[i] = r0[i]; \ + nan = true; \ + } \ + else if (octave::math::isnan (r0[i]) || v[i] OP r0[i]) \ + r[i] = v[i]; \ + else \ + r[i] = r0[i]; \ + } \ + j++; \ + v += m; \ + r0 = r; \ + r += m; \ + } \ + while (j < n) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + if (v[i] OP r0[i]) \ + r[i] = v[i]; \ + else \ + r[i] = r0[i]; \ + j++; \ + v += m; \ + r0 = r; \ + r += m; \ + } \ + } \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type *ri, \ + octave_idx_type m, octave_idx_type n) \ + { \ + if (! n) \ + return; \ + bool nan = false; \ + const T *r0; \ + const octave_idx_type *r0i; \ + octave_idx_type j = 0; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + r[i] = v[i]; ri[i] = 0; \ + if (octave::math::isnan (v[i])) \ + nan = true; \ + } \ + j++; \ + v += m; \ + r0 = r; \ + r += m; \ + r0i = ri; \ + ri += m; \ + while (nan && j < n) \ + { \ + nan = false; \ + for (octave_idx_type i = 0; i < m; i++) \ + { \ + if (octave::math::isnan (v[i])) \ + { \ + r[i] = r0[i]; \ + ri[i] = r0i[i]; \ + nan = true; \ + } \ + else if (octave::math::isnan (r0[i]) || v[i] OP r0[i]) \ + { \ + r[i] = v[i]; \ + ri[i] = j; \ + } \ + else \ + { \ + r[i] = r0[i]; \ + ri[i] = r0i[i]; \ + } \ + } \ + j++; \ + v += m; \ + r0 = r; \ + r += m; \ + r0i = ri; \ + ri += m; \ + } \ + while (j < n) \ + { \ + for (octave_idx_type i = 0; i < m; i++) \ + if (v[i] OP r0[i]) \ + { \ + r[i] = v[i]; \ + ri[i] = j; \ + } \ + else \ + { \ + r[i] = r0[i]; \ + ri[i] = r0i[i]; \ + } \ + j++; \ + v += m; \ + r0 = r; \ + r += m; \ + r0i = ri; \ + ri += m; \ + } \ + } OP_CUMMINMAX_FCN2 (mx_inline_cummin, <) OP_CUMMINMAX_FCN2 (mx_inline_cummax, >) -#define OP_CUMMINMAX_FCNN(F) \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type l, \ - octave_idx_type n, octave_idx_type u) \ -{ \ - if (! n) return; \ - if (l == 1) \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, n); \ - v += n; r += n; \ - } \ - } \ - else \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, l, n); \ - v += l*n; \ - r += l*n; \ - } \ - } \ -} \ -template \ -inline void \ -F (const T *v, T *r, octave_idx_type *ri, \ - octave_idx_type l, octave_idx_type n, octave_idx_type u) \ -{ \ - if (! n) return; \ - if (l == 1) \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, ri, n); \ - v += n; r += n; ri += n; \ - } \ - } \ - else \ - { \ - for (octave_idx_type i = 0; i < u; i++) \ - { \ - F (v, r, ri, l, n); \ - v += l*n; \ - r += l*n; ri += l*n; \ - } \ - } \ -} +#define OP_CUMMINMAX_FCNN(F) \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type l, \ + octave_idx_type n, octave_idx_type u) \ + { \ + if (! n) \ + return; \ + if (l == 1) \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, n); \ + v += n; \ + r += n; \ + } \ + } \ + else \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, l, n); \ + v += l*n; \ + r += l*n; \ + } \ + } \ + } \ + template \ + inline void \ + F (const T *v, T *r, octave_idx_type *ri, \ + octave_idx_type l, octave_idx_type n, octave_idx_type u) \ + { \ + if (! n) \ + return; \ + if (l == 1) \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, ri, n); \ + v += n; \ + r += n; \ + ri += n; \ + } \ + } \ + else \ + { \ + for (octave_idx_type i = 0; i < u; i++) \ + { \ + F (v, r, ri, l, n); \ + v += l*n; \ + r += l*n; \ + ri += l*n; \ + } \ + } \ + } OP_CUMMINMAX_FCNN (mx_inline_cummin) OP_CUMMINMAX_FCNN (mx_inline_cummax) @@ -1310,8 +1617,8 @@ inline Array do_mx_diff_op (const Array& src, int dim, octave_idx_type order, void (*mx_diff_op) (const R *, R *, - octave_idx_type, octave_idx_type, octave_idx_type, - octave_idx_type)) + octave_idx_type, octave_idx_type, + octave_idx_type, octave_idx_type)) { octave_idx_type l, n, u; if (order <= 0) diff -r dd992fd74fce -r e43d83253e28 liboctave/operators/mx-op-decl.h --- a/liboctave/operators/mx-op-decl.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/operators/mx-op-decl.h Mon Aug 01 12:40:18 2016 -0400 @@ -27,278 +27,278 @@ #include "octave-config.h" -#define BIN_OP_DECL(R, OP, X, Y, API) \ +#define BIN_OP_DECL(R, OP, X, Y, API) \ extern API R OP (const X&, const Y&) class boolMatrix; class boolNDArray; -#define CMP_OP_DECL(OP, X, Y, API) \ +#define CMP_OP_DECL(OP, X, Y, API) \ extern API boolMatrix OP (const X&, const Y&) -#define NDCMP_OP_DECL(OP, X, Y, API) \ +#define NDCMP_OP_DECL(OP, X, Y, API) \ extern API boolNDArray OP (const X&, const Y&) -#define BOOL_OP_DECL(OP, X, Y, API) \ +#define BOOL_OP_DECL(OP, X, Y, API) \ extern API boolMatrix OP (const X&, const Y&) -#define NDBOOL_OP_DECL(OP, X, Y, API) \ +#define NDBOOL_OP_DECL(OP, X, Y, API) \ extern API boolNDArray OP (const X&, const Y&) // vector by scalar operations. -#define VS_BIN_OP_DECLS(R, V, S, API) \ - BIN_OP_DECL (R, operator +, V, S, API); \ - BIN_OP_DECL (R, operator -, V, S, API); \ - BIN_OP_DECL (R, operator *, V, S, API); \ +#define VS_BIN_OP_DECLS(R, V, S, API) \ + BIN_OP_DECL (R, operator +, V, S, API); \ + BIN_OP_DECL (R, operator -, V, S, API); \ + BIN_OP_DECL (R, operator *, V, S, API); \ BIN_OP_DECL (R, operator /, V, S, API); -#define VS_OP_DECLS(R, V, S, API) \ +#define VS_OP_DECLS(R, V, S, API) \ VS_BIN_OP_DECLS(R, V, S, API) // scalar by vector by operations. -#define SV_BIN_OP_DECLS(R, S, V, API) \ - BIN_OP_DECL (R, operator +, S, V, API); \ - BIN_OP_DECL (R, operator -, S, V, API); \ - BIN_OP_DECL (R, operator *, S, V, API); \ +#define SV_BIN_OP_DECLS(R, S, V, API) \ + BIN_OP_DECL (R, operator +, S, V, API); \ + BIN_OP_DECL (R, operator -, S, V, API); \ + BIN_OP_DECL (R, operator *, S, V, API); \ BIN_OP_DECL (R, operator /, S, V, API); -#define SV_OP_DECLS(R, S, V, API) \ +#define SV_OP_DECLS(R, S, V, API) \ SV_BIN_OP_DECLS(R, S, V, API) // vector by vector operations. -#define VV_BIN_OP_DECLS(R, V1, V2, API) \ - BIN_OP_DECL (R, operator +, V1, V2, API); \ - BIN_OP_DECL (R, operator -, V1, V2, API); \ - BIN_OP_DECL (R, product, V1, V2, API); \ +#define VV_BIN_OP_DECLS(R, V1, V2, API) \ + BIN_OP_DECL (R, operator +, V1, V2, API); \ + BIN_OP_DECL (R, operator -, V1, V2, API); \ + BIN_OP_DECL (R, product, V1, V2, API); \ BIN_OP_DECL (R, quotient, V1, V2, API); -#define VV_OP_DECLS(R, V1, V2, API) \ +#define VV_OP_DECLS(R, V1, V2, API) \ VV_BIN_OP_DECLS(R, V1, V2, API) // matrix by scalar operations. -#define MS_BIN_OP_DECLS(R, M, S, API) \ - BIN_OP_DECL (R, operator +, M, S, API); \ - BIN_OP_DECL (R, operator -, M, S, API); \ - BIN_OP_DECL (R, operator *, M, S, API); \ +#define MS_BIN_OP_DECLS(R, M, S, API) \ + BIN_OP_DECL (R, operator +, M, S, API); \ + BIN_OP_DECL (R, operator -, M, S, API); \ + BIN_OP_DECL (R, operator *, M, S, API); \ BIN_OP_DECL (R, operator /, M, S, API); -#define MS_CMP_OP_DECLS(M, S, API) \ - CMP_OP_DECL (mx_el_lt, M, S, API); \ - CMP_OP_DECL (mx_el_le, M, S, API); \ - CMP_OP_DECL (mx_el_ge, M, S, API); \ - CMP_OP_DECL (mx_el_gt, M, S, API); \ - CMP_OP_DECL (mx_el_eq, M, S, API); \ +#define MS_CMP_OP_DECLS(M, S, API) \ + CMP_OP_DECL (mx_el_lt, M, S, API); \ + CMP_OP_DECL (mx_el_le, M, S, API); \ + CMP_OP_DECL (mx_el_ge, M, S, API); \ + CMP_OP_DECL (mx_el_gt, M, S, API); \ + CMP_OP_DECL (mx_el_eq, M, S, API); \ CMP_OP_DECL (mx_el_ne, M, S, API); -#define MS_BOOL_OP_DECLS(M, S, API) \ - BOOL_OP_DECL (mx_el_and, M, S, API); \ - BOOL_OP_DECL (mx_el_or, M, S, API); \ +#define MS_BOOL_OP_DECLS(M, S, API) \ + BOOL_OP_DECL (mx_el_and, M, S, API); \ + BOOL_OP_DECL (mx_el_or, M, S, API); \ -#define MS_OP_DECLS(R, M, S, API) \ - MS_BIN_OP_DECLS (R, M, S, API) \ - MS_CMP_OP_DECLS (M, S, API) \ - MS_BOOL_OP_DECLS (M, S, API) \ +#define MS_OP_DECLS(R, M, S, API) \ + MS_BIN_OP_DECLS (R, M, S, API) \ + MS_CMP_OP_DECLS (M, S, API) \ + MS_BOOL_OP_DECLS (M, S, API) \ // scalar by matrix operations. -#define SM_BIN_OP_DECLS(R, S, M, API) \ - BIN_OP_DECL (R, operator +, S, M, API); \ - BIN_OP_DECL (R, operator -, S, M, API); \ - BIN_OP_DECL (R, operator *, S, M, API); \ +#define SM_BIN_OP_DECLS(R, S, M, API) \ + BIN_OP_DECL (R, operator +, S, M, API); \ + BIN_OP_DECL (R, operator -, S, M, API); \ + BIN_OP_DECL (R, operator *, S, M, API); \ BIN_OP_DECL (R, operator /, S, M, API); -#define SM_CMP_OP_DECLS(S, M, API) \ - CMP_OP_DECL (mx_el_lt, S, M, API); \ - CMP_OP_DECL (mx_el_le, S, M, API); \ - CMP_OP_DECL (mx_el_ge, S, M, API); \ - CMP_OP_DECL (mx_el_gt, S, M, API); \ - CMP_OP_DECL (mx_el_eq, S, M, API); \ +#define SM_CMP_OP_DECLS(S, M, API) \ + CMP_OP_DECL (mx_el_lt, S, M, API); \ + CMP_OP_DECL (mx_el_le, S, M, API); \ + CMP_OP_DECL (mx_el_ge, S, M, API); \ + CMP_OP_DECL (mx_el_gt, S, M, API); \ + CMP_OP_DECL (mx_el_eq, S, M, API); \ CMP_OP_DECL (mx_el_ne, S, M, API); -#define SM_BOOL_OP_DECLS(S, M, API) \ - BOOL_OP_DECL (mx_el_and, S, M, API); \ - BOOL_OP_DECL (mx_el_or, S, M, API); \ +#define SM_BOOL_OP_DECLS(S, M, API) \ + BOOL_OP_DECL (mx_el_and, S, M, API); \ + BOOL_OP_DECL (mx_el_or, S, M, API); \ -#define SM_OP_DECLS(R, S, M, API) \ - SM_BIN_OP_DECLS (R, S, M, API) \ - SM_CMP_OP_DECLS (S, M, API) \ - SM_BOOL_OP_DECLS (S, M, API) \ +#define SM_OP_DECLS(R, S, M, API) \ + SM_BIN_OP_DECLS (R, S, M, API) \ + SM_CMP_OP_DECLS (S, M, API) \ + SM_BOOL_OP_DECLS (S, M, API) \ // matrix by matrix operations. -#define MM_BIN_OP_DECLS(R, M1, M2, API) \ - BIN_OP_DECL (R, operator +, M1, M2, API); \ - BIN_OP_DECL (R, operator -, M1, M2, API); \ - BIN_OP_DECL (R, product, M1, M2, API); \ +#define MM_BIN_OP_DECLS(R, M1, M2, API) \ + BIN_OP_DECL (R, operator +, M1, M2, API); \ + BIN_OP_DECL (R, operator -, M1, M2, API); \ + BIN_OP_DECL (R, product, M1, M2, API); \ BIN_OP_DECL (R, quotient, M1, M2, API); -#define MM_CMP_OP_DECLS(M1, M2, API) \ - CMP_OP_DECL (mx_el_lt, M1, M2, API); \ - CMP_OP_DECL (mx_el_le, M1, M2, API); \ - CMP_OP_DECL (mx_el_ge, M1, M2, API); \ - CMP_OP_DECL (mx_el_gt, M1, M2, API); \ - CMP_OP_DECL (mx_el_eq, M1, M2, API); \ +#define MM_CMP_OP_DECLS(M1, M2, API) \ + CMP_OP_DECL (mx_el_lt, M1, M2, API); \ + CMP_OP_DECL (mx_el_le, M1, M2, API); \ + CMP_OP_DECL (mx_el_ge, M1, M2, API); \ + CMP_OP_DECL (mx_el_gt, M1, M2, API); \ + CMP_OP_DECL (mx_el_eq, M1, M2, API); \ CMP_OP_DECL (mx_el_ne, M1, M2, API); -#define MM_BOOL_OP_DECLS(M1, M2, API) \ - BOOL_OP_DECL (mx_el_and, M1, M2, API); \ +#define MM_BOOL_OP_DECLS(M1, M2, API) \ + BOOL_OP_DECL (mx_el_and, M1, M2, API); \ BOOL_OP_DECL (mx_el_or, M1, M2, API); -#define MM_OP_DECLS(R, M1, M2, API) \ - MM_BIN_OP_DECLS (R, M1, M2, API) \ - MM_CMP_OP_DECLS (M1, M2, API) \ +#define MM_OP_DECLS(R, M1, M2, API) \ + MM_BIN_OP_DECLS (R, M1, M2, API) \ + MM_CMP_OP_DECLS (M1, M2, API) \ MM_BOOL_OP_DECLS (M1, M2, API) // N-D matrix by scalar operations. -#define NDS_BIN_OP_DECLS(R, ND, S, API) \ - BIN_OP_DECL (R, operator +, ND, S, API); \ - BIN_OP_DECL (R, operator -, ND, S, API); \ - BIN_OP_DECL (R, operator *, ND, S, API); \ +#define NDS_BIN_OP_DECLS(R, ND, S, API) \ + BIN_OP_DECL (R, operator +, ND, S, API); \ + BIN_OP_DECL (R, operator -, ND, S, API); \ + BIN_OP_DECL (R, operator *, ND, S, API); \ BIN_OP_DECL (R, operator /, ND, S, API); -#define NDS_CMP_OP_DECLS(ND, S, API) \ - NDCMP_OP_DECL (mx_el_lt, ND, S, API); \ - NDCMP_OP_DECL (mx_el_le, ND, S, API); \ - NDCMP_OP_DECL (mx_el_ge, ND, S, API); \ - NDCMP_OP_DECL (mx_el_gt, ND, S, API); \ - NDCMP_OP_DECL (mx_el_eq, ND, S, API); \ +#define NDS_CMP_OP_DECLS(ND, S, API) \ + NDCMP_OP_DECL (mx_el_lt, ND, S, API); \ + NDCMP_OP_DECL (mx_el_le, ND, S, API); \ + NDCMP_OP_DECL (mx_el_ge, ND, S, API); \ + NDCMP_OP_DECL (mx_el_gt, ND, S, API); \ + NDCMP_OP_DECL (mx_el_eq, ND, S, API); \ NDCMP_OP_DECL (mx_el_ne, ND, S, API); -#define NDS_BOOL_OP_DECLS(ND, S, API) \ - NDBOOL_OP_DECL (mx_el_and, ND, S, API); \ - NDBOOL_OP_DECL (mx_el_or, ND, S, API); \ - NDBOOL_OP_DECL (mx_el_not_and, ND, S, API); \ +#define NDS_BOOL_OP_DECLS(ND, S, API) \ + NDBOOL_OP_DECL (mx_el_and, ND, S, API); \ + NDBOOL_OP_DECL (mx_el_or, ND, S, API); \ + NDBOOL_OP_DECL (mx_el_not_and, ND, S, API); \ NDBOOL_OP_DECL (mx_el_not_or, ND, S, API); -#define NDS_OP_DECLS(R, ND, S, API) \ - NDS_BIN_OP_DECLS (R, ND, S, API) \ - NDS_CMP_OP_DECLS (ND, S, API) \ +#define NDS_OP_DECLS(R, ND, S, API) \ + NDS_BIN_OP_DECLS (R, ND, S, API) \ + NDS_CMP_OP_DECLS (ND, S, API) \ NDS_BOOL_OP_DECLS (ND, S, API) // scalar by N-D matrix operations. -#define SND_BIN_OP_DECLS(R, S, ND, API) \ - BIN_OP_DECL (R, operator +, S, ND, API); \ - BIN_OP_DECL (R, operator -, S, ND, API); \ - BIN_OP_DECL (R, operator *, S, ND, API); \ +#define SND_BIN_OP_DECLS(R, S, ND, API) \ + BIN_OP_DECL (R, operator +, S, ND, API); \ + BIN_OP_DECL (R, operator -, S, ND, API); \ + BIN_OP_DECL (R, operator *, S, ND, API); \ BIN_OP_DECL (R, operator /, S, ND, API); -#define SND_CMP_OP_DECLS(S, ND, API) \ - NDCMP_OP_DECL (mx_el_lt, S, ND, API); \ - NDCMP_OP_DECL (mx_el_le, S, ND, API); \ - NDCMP_OP_DECL (mx_el_ge, S, ND, API); \ - NDCMP_OP_DECL (mx_el_gt, S, ND, API); \ - NDCMP_OP_DECL (mx_el_eq, S, ND, API); \ +#define SND_CMP_OP_DECLS(S, ND, API) \ + NDCMP_OP_DECL (mx_el_lt, S, ND, API); \ + NDCMP_OP_DECL (mx_el_le, S, ND, API); \ + NDCMP_OP_DECL (mx_el_ge, S, ND, API); \ + NDCMP_OP_DECL (mx_el_gt, S, ND, API); \ + NDCMP_OP_DECL (mx_el_eq, S, ND, API); \ NDCMP_OP_DECL (mx_el_ne, S, ND, API); -#define SND_BOOL_OP_DECLS(S, ND, API) \ - NDBOOL_OP_DECL (mx_el_and, S, ND, API); \ - NDBOOL_OP_DECL (mx_el_or, S, ND, API); \ - NDBOOL_OP_DECL (mx_el_and_not, S, ND, API); \ +#define SND_BOOL_OP_DECLS(S, ND, API) \ + NDBOOL_OP_DECL (mx_el_and, S, ND, API); \ + NDBOOL_OP_DECL (mx_el_or, S, ND, API); \ + NDBOOL_OP_DECL (mx_el_and_not, S, ND, API); \ NDBOOL_OP_DECL (mx_el_or_not, S, ND, API); -#define SND_OP_DECLS(R, S, ND, API) \ - SND_BIN_OP_DECLS (R, S, ND, API) \ - SND_CMP_OP_DECLS (S, ND, API) \ +#define SND_OP_DECLS(R, S, ND, API) \ + SND_BIN_OP_DECLS (R, S, ND, API) \ + SND_CMP_OP_DECLS (S, ND, API) \ SND_BOOL_OP_DECLS (S, ND, API) // N-D matrix by N-D matrix operations. -#define NDND_BIN_OP_DECLS(R, ND1, ND2, API) \ - BIN_OP_DECL (R, operator +, ND1, ND2, API); \ - BIN_OP_DECL (R, operator -, ND1, ND2, API); \ - BIN_OP_DECL (R, product, ND1, ND2, API); \ +#define NDND_BIN_OP_DECLS(R, ND1, ND2, API) \ + BIN_OP_DECL (R, operator +, ND1, ND2, API); \ + BIN_OP_DECL (R, operator -, ND1, ND2, API); \ + BIN_OP_DECL (R, product, ND1, ND2, API); \ BIN_OP_DECL (R, quotient, ND1, ND2, API); -#define NDND_CMP_OP_DECLS(ND1, ND2, API) \ - NDCMP_OP_DECL (mx_el_lt, ND1, ND2, API); \ - NDCMP_OP_DECL (mx_el_le, ND1, ND2, API); \ - NDCMP_OP_DECL (mx_el_ge, ND1, ND2, API); \ - NDCMP_OP_DECL (mx_el_gt, ND1, ND2, API); \ - NDCMP_OP_DECL (mx_el_eq, ND1, ND2, API); \ +#define NDND_CMP_OP_DECLS(ND1, ND2, API) \ + NDCMP_OP_DECL (mx_el_lt, ND1, ND2, API); \ + NDCMP_OP_DECL (mx_el_le, ND1, ND2, API); \ + NDCMP_OP_DECL (mx_el_ge, ND1, ND2, API); \ + NDCMP_OP_DECL (mx_el_gt, ND1, ND2, API); \ + NDCMP_OP_DECL (mx_el_eq, ND1, ND2, API); \ NDCMP_OP_DECL (mx_el_ne, ND1, ND2, API); -#define NDND_BOOL_OP_DECLS(ND1, ND2, API) \ - NDBOOL_OP_DECL (mx_el_and, ND1, ND2, API); \ - NDBOOL_OP_DECL (mx_el_or, ND1, ND2, API); \ - NDBOOL_OP_DECL (mx_el_and_not, ND1, ND2, API); \ - NDBOOL_OP_DECL (mx_el_or_not, ND1, ND2, API); \ - NDBOOL_OP_DECL (mx_el_not_and, ND1, ND2, API); \ +#define NDND_BOOL_OP_DECLS(ND1, ND2, API) \ + NDBOOL_OP_DECL (mx_el_and, ND1, ND2, API); \ + NDBOOL_OP_DECL (mx_el_or, ND1, ND2, API); \ + NDBOOL_OP_DECL (mx_el_and_not, ND1, ND2, API); \ + NDBOOL_OP_DECL (mx_el_or_not, ND1, ND2, API); \ + NDBOOL_OP_DECL (mx_el_not_and, ND1, ND2, API); \ NDBOOL_OP_DECL (mx_el_not_or, ND1, ND2, API); -#define NDND_OP_DECLS(R, ND1, ND2, API) \ - NDND_BIN_OP_DECLS (R, ND1, ND2, API) \ - NDND_CMP_OP_DECLS (ND1, ND2, API) \ +#define NDND_OP_DECLS(R, ND1, ND2, API) \ + NDND_BIN_OP_DECLS (R, ND1, ND2, API) \ + NDND_CMP_OP_DECLS (ND1, ND2, API) \ NDND_BOOL_OP_DECLS (ND1, ND2, API) // scalar by diagonal matrix operations. -#define SDM_BIN_OP_DECLS(R, S, DM, API) \ - BIN_OP_DECL (R, operator *, S, DM, API); \ +#define SDM_BIN_OP_DECLS(R, S, DM, API) \ + BIN_OP_DECL (R, operator *, S, DM, API); \ -#define SDM_OP_DECLS(R, S, DM, API) \ +#define SDM_OP_DECLS(R, S, DM, API) \ SDM_BIN_OP_DECLS(R, S, DM, API) // diagonal matrix by scalar operations. -#define DMS_BIN_OP_DECLS(R, DM, S, API) \ - BIN_OP_DECL (R, operator *, DM, S, API); \ +#define DMS_BIN_OP_DECLS(R, DM, S, API) \ + BIN_OP_DECL (R, operator *, DM, S, API); \ BIN_OP_DECL (R, operator /, DM, S, API); -#define DMS_OP_DECLS(R, DM, S, API) \ +#define DMS_OP_DECLS(R, DM, S, API) \ DMS_BIN_OP_DECLS(R, DM, S, API) // matrix by diagonal matrix operations. -#define MDM_BIN_OP_DECLS(R, M, DM, API) \ - BIN_OP_DECL (R, operator +, M, DM, API); \ - BIN_OP_DECL (R, operator -, M, DM, API); \ +#define MDM_BIN_OP_DECLS(R, M, DM, API) \ + BIN_OP_DECL (R, operator +, M, DM, API); \ + BIN_OP_DECL (R, operator -, M, DM, API); \ BIN_OP_DECL (R, operator *, M, DM, API); -#define MDM_OP_DECLS(R, M, DM, API) \ +#define MDM_OP_DECLS(R, M, DM, API) \ MDM_BIN_OP_DECLS(R, M, DM, API) // diagonal matrix by matrix operations. -#define DMM_BIN_OP_DECLS(R, DM, M, API) \ - BIN_OP_DECL (R, operator +, DM, M, API); \ - BIN_OP_DECL (R, operator -, DM, M, API); \ +#define DMM_BIN_OP_DECLS(R, DM, M, API) \ + BIN_OP_DECL (R, operator +, DM, M, API); \ + BIN_OP_DECL (R, operator -, DM, M, API); \ BIN_OP_DECL (R, operator *, DM, M, API); -#define DMM_OP_DECLS(R, DM, M, API) \ +#define DMM_OP_DECLS(R, DM, M, API) \ DMM_BIN_OP_DECLS(R, DM, M, API) // diagonal matrix by diagonal matrix operations. -#define DMDM_BIN_OP_DECLS(R, DM1, DM2, API) \ - BIN_OP_DECL (R, operator +, DM1, DM2, API); \ - BIN_OP_DECL (R, operator -, DM1, DM2, API); \ +#define DMDM_BIN_OP_DECLS(R, DM1, DM2, API) \ + BIN_OP_DECL (R, operator +, DM1, DM2, API); \ + BIN_OP_DECL (R, operator -, DM1, DM2, API); \ BIN_OP_DECL (R, product, DM1, DM2, API); -#define DMDM_OP_DECLS(R, DM1, DM2, API) \ +#define DMDM_OP_DECLS(R, DM1, DM2, API) \ DMDM_BIN_OP_DECLS (R, DM1, DM2, API) // scalar by N-D array min/max ops -#define MINMAX_DECLS(T, S, API) \ - extern API T min (S d, const T& m); \ - extern API T min (const T& m, S d); \ - extern API T min (const T& a, const T& b); \ - extern API T max (S d, const T& m); \ - extern API T max (const T& m, S d); \ +#define MINMAX_DECLS(T, S, API) \ + extern API T min (S d, const T& m); \ + extern API T min (const T& m, S d); \ + extern API T min (const T& a, const T& b); \ + extern API T max (S d, const T& m); \ + extern API T max (const T& m, S d); \ extern API T max (const T& a, const T& b); // permutation matrix by matrix ops and vice versa -#define PMM_BIN_OP_DECLS(R, PM, M, API) \ +#define PMM_BIN_OP_DECLS(R, PM, M, API) \ BIN_OP_DECL (R, operator *, PM, M, API); -#define MPM_BIN_OP_DECLS(R, M, PM, API) \ +#define MPM_BIN_OP_DECLS(R, M, PM, API) \ BIN_OP_DECL (R, operator *, M, PM, API); #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/operators/mx-op-defs.h --- a/liboctave/operators/mx-op-defs.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/operators/mx-op-defs.h Mon Aug 01 12:40:18 2016 -0400 @@ -31,228 +31,228 @@ #include "mx-op-decl.h" #include "mx-inlines.cc" -#define SNANCHK(s) \ - if (octave::math::isnan (s)) \ +#define SNANCHK(s) \ + if (octave::math::isnan (s)) \ err_nan_to_logical_conversion () -#define MNANCHK(m, MT) \ - if (do_mx_check (m, mx_inline_any_nan)) \ +#define MNANCHK(m, MT) \ + if (do_mx_check (m, mx_inline_any_nan)) \ err_nan_to_logical_conversion () // vector by scalar operations. -#define VS_BIN_OP(R, F, OP, V, S) \ - R \ - F (const V& v, const S& s) \ - { \ +#define VS_BIN_OP(R, F, OP, V, S) \ + R \ + F (const V& v, const S& s) \ + { \ return do_ms_binary_op (v, s, OP); \ } -#define VS_BIN_OPS(R, V, S) \ - VS_BIN_OP (R, operator +, mx_inline_add, V, S) \ - VS_BIN_OP (R, operator -, mx_inline_sub, V, S) \ - VS_BIN_OP (R, operator *, mx_inline_mul, V, S) \ +#define VS_BIN_OPS(R, V, S) \ + VS_BIN_OP (R, operator +, mx_inline_add, V, S) \ + VS_BIN_OP (R, operator -, mx_inline_sub, V, S) \ + VS_BIN_OP (R, operator *, mx_inline_mul, V, S) \ VS_BIN_OP (R, operator /, mx_inline_div, V, S) // scalar by vector by operations. -#define SV_BIN_OP(R, F, OP, S, V) \ - R \ - F (const S& s, const V& v) \ - { \ +#define SV_BIN_OP(R, F, OP, S, V) \ + R \ + F (const S& s, const V& v) \ + { \ return do_sm_binary_op (s, v, OP); \ } -#define SV_BIN_OPS(R, S, V) \ - SV_BIN_OP (R, operator +, mx_inline_add, S, V) \ - SV_BIN_OP (R, operator -, mx_inline_sub, S, V) \ - SV_BIN_OP (R, operator *, mx_inline_mul, S, V) \ +#define SV_BIN_OPS(R, S, V) \ + SV_BIN_OP (R, operator +, mx_inline_add, S, V) \ + SV_BIN_OP (R, operator -, mx_inline_sub, S, V) \ + SV_BIN_OP (R, operator *, mx_inline_mul, S, V) \ SV_BIN_OP (R, operator /, mx_inline_div, S, V) // vector by vector operations. -#define VV_BIN_OP(R, F, OP, V1, V2) \ - R \ - F (const V1& v1, const V2& v2) \ - { \ +#define VV_BIN_OP(R, F, OP, V1, V2) \ + R \ + F (const V1& v1, const V2& v2) \ + { \ return do_mm_binary_op (v1, v2, OP, OP, OP, #F); \ } -#define VV_BIN_OPS(R, V1, V2) \ - VV_BIN_OP (R, operator +, mx_inline_add, V1, V2) \ - VV_BIN_OP (R, operator -, mx_inline_sub, V1, V2) \ - VV_BIN_OP (R, product, mx_inline_mul, V1, V2) \ +#define VV_BIN_OPS(R, V1, V2) \ + VV_BIN_OP (R, operator +, mx_inline_add, V1, V2) \ + VV_BIN_OP (R, operator -, mx_inline_sub, V1, V2) \ + VV_BIN_OP (R, product, mx_inline_mul, V1, V2) \ VV_BIN_OP (R, quotient, mx_inline_div, V1, V2) // matrix by scalar operations. -#define MS_BIN_OP(R, OP, M, S, F) \ - R \ - OP (const M& m, const S& s) \ - { \ +#define MS_BIN_OP(R, OP, M, S, F) \ + R \ + OP (const M& m, const S& s) \ + { \ return do_ms_binary_op (m, s, F); \ } -#define MS_BIN_OPS(R, M, S) \ - MS_BIN_OP (R, operator +, M, S, mx_inline_add) \ - MS_BIN_OP (R, operator -, M, S, mx_inline_sub) \ - MS_BIN_OP (R, operator *, M, S, mx_inline_mul) \ +#define MS_BIN_OPS(R, M, S) \ + MS_BIN_OP (R, operator +, M, S, mx_inline_add) \ + MS_BIN_OP (R, operator -, M, S, mx_inline_sub) \ + MS_BIN_OP (R, operator *, M, S, mx_inline_mul) \ MS_BIN_OP (R, operator /, M, S, mx_inline_div) -#define MS_CMP_OP(F, OP, M, S) \ - boolMatrix \ - F (const M& m, const S& s) \ - { \ - return do_ms_binary_op (m, s, OP); \ +#define MS_CMP_OP(F, OP, M, S) \ + boolMatrix \ + F (const M& m, const S& s) \ + { \ + return do_ms_binary_op (m, s, OP); \ } -#define MS_CMP_OPS(M, S) \ - MS_CMP_OP (mx_el_lt, mx_inline_lt, M, S) \ - MS_CMP_OP (mx_el_le, mx_inline_le, M, S) \ - MS_CMP_OP (mx_el_ge, mx_inline_ge, M, S) \ - MS_CMP_OP (mx_el_gt, mx_inline_gt, M, S) \ - MS_CMP_OP (mx_el_eq, mx_inline_eq, M, S) \ +#define MS_CMP_OPS(M, S) \ + MS_CMP_OP (mx_el_lt, mx_inline_lt, M, S) \ + MS_CMP_OP (mx_el_le, mx_inline_le, M, S) \ + MS_CMP_OP (mx_el_ge, mx_inline_ge, M, S) \ + MS_CMP_OP (mx_el_gt, mx_inline_gt, M, S) \ + MS_CMP_OP (mx_el_eq, mx_inline_eq, M, S) \ MS_CMP_OP (mx_el_ne, mx_inline_ne, M, S) -#define MS_BOOL_OP(F, OP, M, S) \ - boolMatrix \ - F (const M& m, const S& s) \ - { \ - MNANCHK (m, M::element_type); \ - SNANCHK (s); \ - return do_ms_binary_op (m, s, OP); \ +#define MS_BOOL_OP(F, OP, M, S) \ + boolMatrix \ + F (const M& m, const S& s) \ + { \ + MNANCHK (m, M::element_type); \ + SNANCHK (s); \ + return do_ms_binary_op (m, s, OP); \ } -#define MS_BOOL_OPS(M, S) \ - MS_BOOL_OP (mx_el_and, mx_inline_and, M, S) \ +#define MS_BOOL_OPS(M, S) \ + MS_BOOL_OP (mx_el_and, mx_inline_and, M, S) \ MS_BOOL_OP (mx_el_or, mx_inline_or, M, S) // scalar by matrix operations. -#define SM_BIN_OP(R, OP, S, M, F) \ - R \ - OP (const S& s, const M& m) \ - { \ +#define SM_BIN_OP(R, OP, S, M, F) \ + R \ + OP (const S& s, const M& m) \ + { \ return do_sm_binary_op (s, m, F); \ } -#define SM_BIN_OPS(R, S, M) \ - SM_BIN_OP (R, operator +, S, M, mx_inline_add) \ - SM_BIN_OP (R, operator -, S, M, mx_inline_sub) \ - SM_BIN_OP (R, operator *, S, M, mx_inline_mul) \ +#define SM_BIN_OPS(R, S, M) \ + SM_BIN_OP (R, operator +, S, M, mx_inline_add) \ + SM_BIN_OP (R, operator -, S, M, mx_inline_sub) \ + SM_BIN_OP (R, operator *, S, M, mx_inline_mul) \ SM_BIN_OP (R, operator /, S, M, mx_inline_div) -#define SM_CMP_OP(F, OP, S, M) \ - boolMatrix \ - F (const S& s, const M& m) \ - { \ - return do_sm_binary_op (s, m, OP); \ +#define SM_CMP_OP(F, OP, S, M) \ + boolMatrix \ + F (const S& s, const M& m) \ + { \ + return do_sm_binary_op (s, m, OP); \ } -#define SM_CMP_OPS(S, M) \ - SM_CMP_OP (mx_el_lt, mx_inline_lt, S, M) \ - SM_CMP_OP (mx_el_le, mx_inline_le, S, M) \ - SM_CMP_OP (mx_el_ge, mx_inline_ge, S, M) \ - SM_CMP_OP (mx_el_gt, mx_inline_gt, S, M) \ - SM_CMP_OP (mx_el_eq, mx_inline_eq, S, M) \ +#define SM_CMP_OPS(S, M) \ + SM_CMP_OP (mx_el_lt, mx_inline_lt, S, M) \ + SM_CMP_OP (mx_el_le, mx_inline_le, S, M) \ + SM_CMP_OP (mx_el_ge, mx_inline_ge, S, M) \ + SM_CMP_OP (mx_el_gt, mx_inline_gt, S, M) \ + SM_CMP_OP (mx_el_eq, mx_inline_eq, S, M) \ SM_CMP_OP (mx_el_ne, mx_inline_ne, S, M) -#define SM_BOOL_OP(F, OP, S, M) \ - boolMatrix \ - F (const S& s, const M& m) \ - { \ - SNANCHK (s); \ - MNANCHK (m, M::element_type); \ - return do_sm_binary_op (s, m, OP); \ +#define SM_BOOL_OP(F, OP, S, M) \ + boolMatrix \ + F (const S& s, const M& m) \ + { \ + SNANCHK (s); \ + MNANCHK (m, M::element_type); \ + return do_sm_binary_op (s, m, OP); \ } -#define SM_BOOL_OPS(S, M) \ - SM_BOOL_OP (mx_el_and, mx_inline_and, S, M) \ +#define SM_BOOL_OPS(S, M) \ + SM_BOOL_OP (mx_el_and, mx_inline_and, S, M) \ SM_BOOL_OP (mx_el_or, mx_inline_or, S, M) // matrix by matrix operations. -#define MM_BIN_OP(R, OP, M1, M2, F) \ - R \ - OP (const M1& m1, const M2& m2) \ - { \ +#define MM_BIN_OP(R, OP, M1, M2, F) \ + R \ + OP (const M1& m1, const M2& m2) \ + { \ return do_mm_binary_op (m1, m2, F, F, F, #OP); \ } -#define MM_BIN_OPS(R, M1, M2) \ - MM_BIN_OP (R, operator +, M1, M2, mx_inline_add) \ - MM_BIN_OP (R, operator -, M1, M2, mx_inline_sub) \ - MM_BIN_OP (R, product, M1, M2, mx_inline_mul) \ +#define MM_BIN_OPS(R, M1, M2) \ + MM_BIN_OP (R, operator +, M1, M2, mx_inline_add) \ + MM_BIN_OP (R, operator -, M1, M2, mx_inline_sub) \ + MM_BIN_OP (R, product, M1, M2, mx_inline_mul) \ MM_BIN_OP (R, quotient, M1, M2, mx_inline_div) -#define MM_CMP_OP(F, OP, M1, M2) \ - boolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ +#define MM_CMP_OP(F, OP, M1, M2) \ + boolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ return do_mm_binary_op (m1, m2, OP, OP, OP, #F); \ } -#define MM_CMP_OPS(M1, M2) \ - MM_CMP_OP (mx_el_lt, mx_inline_lt, M1, M2) \ - MM_CMP_OP (mx_el_le, mx_inline_le, M1, M2) \ - MM_CMP_OP (mx_el_ge, mx_inline_ge, M1, M2) \ - MM_CMP_OP (mx_el_gt, mx_inline_gt, M1, M2) \ - MM_CMP_OP (mx_el_eq, mx_inline_eq, M1, M2) \ +#define MM_CMP_OPS(M1, M2) \ + MM_CMP_OP (mx_el_lt, mx_inline_lt, M1, M2) \ + MM_CMP_OP (mx_el_le, mx_inline_le, M1, M2) \ + MM_CMP_OP (mx_el_ge, mx_inline_ge, M1, M2) \ + MM_CMP_OP (mx_el_gt, mx_inline_gt, M1, M2) \ + MM_CMP_OP (mx_el_eq, mx_inline_eq, M1, M2) \ MM_CMP_OP (mx_el_ne, mx_inline_ne, M1, M2) -#define MM_BOOL_OP(F, OP, M1, M2) \ - boolMatrix \ - F (const M1& m1, const M2& m2) \ - { \ - MNANCHK (m1, M1::element_type); \ - MNANCHK (m2, M2::element_type); \ +#define MM_BOOL_OP(F, OP, M1, M2) \ + boolMatrix \ + F (const M1& m1, const M2& m2) \ + { \ + MNANCHK (m1, M1::element_type); \ + MNANCHK (m2, M2::element_type); \ return do_mm_binary_op (m1, m2, OP, OP, OP, #F); \ } -#define MM_BOOL_OPS(M1, M2) \ +#define MM_BOOL_OPS(M1, M2) \ MM_BOOL_OP (mx_el_and, mx_inline_and, M1, M2) \ MM_BOOL_OP (mx_el_or, mx_inline_or, M1, M2) // N-D matrix by scalar operations. -#define NDS_BIN_OP(R, OP, ND, S, F) \ - R \ - OP (const ND& m, const S& s) \ - { \ +#define NDS_BIN_OP(R, OP, ND, S, F) \ + R \ + OP (const ND& m, const S& s) \ + { \ return do_ms_binary_op (m, s, F); \ } -#define NDS_BIN_OPS(R, ND, S) \ - NDS_BIN_OP (R, operator +, ND, S, mx_inline_add) \ - NDS_BIN_OP (R, operator -, ND, S, mx_inline_sub) \ - NDS_BIN_OP (R, operator *, ND, S, mx_inline_mul) \ +#define NDS_BIN_OPS(R, ND, S) \ + NDS_BIN_OP (R, operator +, ND, S, mx_inline_add) \ + NDS_BIN_OP (R, operator -, ND, S, mx_inline_sub) \ + NDS_BIN_OP (R, operator *, ND, S, mx_inline_mul) \ NDS_BIN_OP (R, operator /, ND, S, mx_inline_div) -#define NDS_CMP_OP(F, OP, ND, S) \ - boolNDArray \ - F (const ND& m, const S& s) \ - { \ - return do_ms_binary_op (m, s, OP); \ +#define NDS_CMP_OP(F, OP, ND, S) \ + boolNDArray \ + F (const ND& m, const S& s) \ + { \ + return do_ms_binary_op (m, s, OP); \ } -#define NDS_CMP_OPS(ND, S) \ - NDS_CMP_OP (mx_el_lt, mx_inline_lt, ND, S) \ - NDS_CMP_OP (mx_el_le, mx_inline_le, ND, S) \ - NDS_CMP_OP (mx_el_ge, mx_inline_ge, ND, S) \ - NDS_CMP_OP (mx_el_gt, mx_inline_gt, ND, S) \ - NDS_CMP_OP (mx_el_eq, mx_inline_eq, ND, S) \ +#define NDS_CMP_OPS(ND, S) \ + NDS_CMP_OP (mx_el_lt, mx_inline_lt, ND, S) \ + NDS_CMP_OP (mx_el_le, mx_inline_le, ND, S) \ + NDS_CMP_OP (mx_el_ge, mx_inline_ge, ND, S) \ + NDS_CMP_OP (mx_el_gt, mx_inline_gt, ND, S) \ + NDS_CMP_OP (mx_el_eq, mx_inline_eq, ND, S) \ NDS_CMP_OP (mx_el_ne, mx_inline_ne, ND, S) -#define NDS_BOOL_OP(F, OP, ND, S) \ - boolNDArray \ - F (const ND& m, const S& s) \ - { \ - MNANCHK (m, ND::element_type); \ - SNANCHK (s); \ - return do_ms_binary_op (m, s, OP); \ +#define NDS_BOOL_OP(F, OP, ND, S) \ + boolNDArray \ + F (const ND& m, const S& s) \ + { \ + MNANCHK (m, ND::element_type); \ + SNANCHK (s); \ + return do_ms_binary_op (m, s, OP); \ } -#define NDS_BOOL_OPS(ND, S) \ +#define NDS_BOOL_OPS(ND, S) \ NDS_BOOL_OP (mx_el_and, mx_inline_and, ND, S) \ NDS_BOOL_OP (mx_el_or, mx_inline_or, ND, S) \ NDS_BOOL_OP (mx_el_not_and, mx_inline_not_and, ND, S) \ @@ -262,44 +262,44 @@ // scalar by N-D matrix operations. -#define SND_BIN_OP(R, OP, S, ND, F) \ - R \ - OP (const S& s, const ND& m) \ - { \ +#define SND_BIN_OP(R, OP, S, ND, F) \ + R \ + OP (const S& s, const ND& m) \ + { \ return do_sm_binary_op (s, m, F); \ } -#define SND_BIN_OPS(R, S, ND) \ - SND_BIN_OP (R, operator +, S, ND, mx_inline_add) \ - SND_BIN_OP (R, operator -, S, ND, mx_inline_sub) \ - SND_BIN_OP (R, operator *, S, ND, mx_inline_mul) \ +#define SND_BIN_OPS(R, S, ND) \ + SND_BIN_OP (R, operator +, S, ND, mx_inline_add) \ + SND_BIN_OP (R, operator -, S, ND, mx_inline_sub) \ + SND_BIN_OP (R, operator *, S, ND, mx_inline_mul) \ SND_BIN_OP (R, operator /, S, ND, mx_inline_div) -#define SND_CMP_OP(F, OP, S, ND) \ - boolNDArray \ - F (const S& s, const ND& m) \ - { \ - return do_sm_binary_op (s, m, OP); \ +#define SND_CMP_OP(F, OP, S, ND) \ + boolNDArray \ + F (const S& s, const ND& m) \ + { \ + return do_sm_binary_op (s, m, OP); \ } -#define SND_CMP_OPS(S, ND) \ - SND_CMP_OP (mx_el_lt, mx_inline_lt, S, ND) \ - SND_CMP_OP (mx_el_le, mx_inline_le, S, ND) \ - SND_CMP_OP (mx_el_ge, mx_inline_ge, S, ND) \ - SND_CMP_OP (mx_el_gt, mx_inline_gt, S, ND) \ - SND_CMP_OP (mx_el_eq, mx_inline_eq, S, ND) \ +#define SND_CMP_OPS(S, ND) \ + SND_CMP_OP (mx_el_lt, mx_inline_lt, S, ND) \ + SND_CMP_OP (mx_el_le, mx_inline_le, S, ND) \ + SND_CMP_OP (mx_el_ge, mx_inline_ge, S, ND) \ + SND_CMP_OP (mx_el_gt, mx_inline_gt, S, ND) \ + SND_CMP_OP (mx_el_eq, mx_inline_eq, S, ND) \ SND_CMP_OP (mx_el_ne, mx_inline_ne, S, ND) -#define SND_BOOL_OP(F, OP, S, ND) \ - boolNDArray \ - F (const S& s, const ND& m) \ - { \ - SNANCHK (s); \ - MNANCHK (m, ND::element_type); \ - return do_sm_binary_op (s, m, OP); \ +#define SND_BOOL_OP(F, OP, S, ND) \ + boolNDArray \ + F (const S& s, const ND& m) \ + { \ + SNANCHK (s); \ + MNANCHK (m, ND::element_type); \ + return do_sm_binary_op (s, m, OP); \ } -#define SND_BOOL_OPS(S, ND) \ +#define SND_BOOL_OPS(S, ND) \ SND_BOOL_OP (mx_el_and, mx_inline_and, S, ND) \ SND_BOOL_OP (mx_el_or, mx_inline_or, S, ND) \ SND_BOOL_OP (mx_el_not_and, mx_inline_not_and, S, ND) \ @@ -309,330 +309,330 @@ // N-D matrix by N-D matrix operations. -#define NDND_BIN_OP(R, OP, ND1, ND2, F) \ - R \ - OP (const ND1& m1, const ND2& m2) \ - { \ +#define NDND_BIN_OP(R, OP, ND1, ND2, F) \ + R \ + OP (const ND1& m1, const ND2& m2) \ + { \ return do_mm_binary_op (m1, m2, F, F, F, #OP); \ } -#define NDND_BIN_OPS(R, ND1, ND2) \ - NDND_BIN_OP (R, operator +, ND1, ND2, mx_inline_add) \ - NDND_BIN_OP (R, operator -, ND1, ND2, mx_inline_sub) \ - NDND_BIN_OP (R, product, ND1, ND2, mx_inline_mul) \ +#define NDND_BIN_OPS(R, ND1, ND2) \ + NDND_BIN_OP (R, operator +, ND1, ND2, mx_inline_add) \ + NDND_BIN_OP (R, operator -, ND1, ND2, mx_inline_sub) \ + NDND_BIN_OP (R, product, ND1, ND2, mx_inline_mul) \ NDND_BIN_OP (R, quotient, ND1, ND2, mx_inline_div) -#define NDND_CMP_OP(F, OP, ND1, ND2) \ - boolNDArray \ - F (const ND1& m1, const ND2& m2) \ - { \ +#define NDND_CMP_OP(F, OP, ND1, ND2) \ + boolNDArray \ + F (const ND1& m1, const ND2& m2) \ + { \ return do_mm_binary_op (m1, m2, OP, OP, OP, #F); \ } -#define NDND_CMP_OPS(ND1, ND2) \ - NDND_CMP_OP (mx_el_lt, mx_inline_lt, ND1, ND2) \ - NDND_CMP_OP (mx_el_le, mx_inline_le, ND1, ND2) \ - NDND_CMP_OP (mx_el_ge, mx_inline_ge, ND1, ND2) \ - NDND_CMP_OP (mx_el_gt, mx_inline_gt, ND1, ND2) \ - NDND_CMP_OP (mx_el_eq, mx_inline_eq, ND1, ND2) \ +#define NDND_CMP_OPS(ND1, ND2) \ + NDND_CMP_OP (mx_el_lt, mx_inline_lt, ND1, ND2) \ + NDND_CMP_OP (mx_el_le, mx_inline_le, ND1, ND2) \ + NDND_CMP_OP (mx_el_ge, mx_inline_ge, ND1, ND2) \ + NDND_CMP_OP (mx_el_gt, mx_inline_gt, ND1, ND2) \ + NDND_CMP_OP (mx_el_eq, mx_inline_eq, ND1, ND2) \ NDND_CMP_OP (mx_el_ne, mx_inline_ne, ND1, ND2) -#define NDND_BOOL_OP(F, OP, ND1, ND2) \ - boolNDArray \ - F (const ND1& m1, const ND2& m2) \ - { \ - MNANCHK (m1, ND1::element_type); \ - MNANCHK (m2, ND2::element_type); \ +#define NDND_BOOL_OP(F, OP, ND1, ND2) \ + boolNDArray \ + F (const ND1& m1, const ND2& m2) \ + { \ + MNANCHK (m1, ND1::element_type); \ + MNANCHK (m2, ND2::element_type); \ return do_mm_binary_op (m1, m2, OP, OP, OP, #F); \ } -#define NDND_BOOL_OPS(ND1, ND2) \ - NDND_BOOL_OP (mx_el_and, mx_inline_and, ND1, ND2) \ - NDND_BOOL_OP (mx_el_or, mx_inline_or, ND1, ND2) \ - NDND_BOOL_OP (mx_el_not_and, mx_inline_not_and, ND1, ND2) \ - NDND_BOOL_OP (mx_el_not_or, mx_inline_not_or, ND1, ND2) \ - NDND_BOOL_OP (mx_el_and_not, mx_inline_and_not, ND1, ND2) \ +#define NDND_BOOL_OPS(ND1, ND2) \ + NDND_BOOL_OP (mx_el_and, mx_inline_and, ND1, ND2) \ + NDND_BOOL_OP (mx_el_or, mx_inline_or, ND1, ND2) \ + NDND_BOOL_OP (mx_el_not_and, mx_inline_not_and, ND1, ND2) \ + NDND_BOOL_OP (mx_el_not_or, mx_inline_not_or, ND1, ND2) \ + NDND_BOOL_OP (mx_el_and_not, mx_inline_and_not, ND1, ND2) \ NDND_BOOL_OP (mx_el_or_not, mx_inline_or_not, ND1, ND2) // scalar by diagonal matrix operations. -#define SDM_BIN_OP(R, OP, S, DM) \ - R \ - operator OP (const S& s, const DM& dm) \ - { \ - R r (dm.rows (), dm.cols ()); \ - \ - for (octave_idx_type i = 0; i < dm.length (); i++) \ - r.dgxelem (i) = s OP dm.dgelem (i); \ - \ - return r; \ -} +#define SDM_BIN_OP(R, OP, S, DM) \ + R \ + operator OP (const S& s, const DM& dm) \ + { \ + R r (dm.rows (), dm.cols ()); \ + \ + for (octave_idx_type i = 0; i < dm.length (); i++) \ + r.dgxelem (i) = s OP dm.dgelem (i); \ + \ + return r; \ + } -#define SDM_BIN_OPS(R, S, DM) \ +#define SDM_BIN_OPS(R, S, DM) \ SDM_BIN_OP (R, *, S, DM) // diagonal matrix by scalar operations. -#define DMS_BIN_OP(R, OP, DM, S) \ - R \ - operator OP (const DM& dm, const S& s) \ - { \ - R r (dm.rows (), dm.cols ()); \ - \ - for (octave_idx_type i = 0; i < dm.length (); i++) \ - r.dgxelem (i) = dm.dgelem (i) OP s; \ - \ - return r; \ +#define DMS_BIN_OP(R, OP, DM, S) \ + R \ + operator OP (const DM& dm, const S& s) \ + { \ + R r (dm.rows (), dm.cols ()); \ + \ + for (octave_idx_type i = 0; i < dm.length (); i++) \ + r.dgxelem (i) = dm.dgelem (i) OP s; \ + \ + return r; \ } -#define DMS_BIN_OPS(R, DM, S) \ - DMS_BIN_OP (R, *, DM, S) \ +#define DMS_BIN_OPS(R, DM, S) \ + DMS_BIN_OP (R, *, DM, S) \ DMS_BIN_OP (R, /, DM, S) // matrix by diagonal matrix operations. -#define MDM_BIN_OP(R, OP, M, DM, OPEQ) \ -R \ -OP (const M& m, const DM& dm) \ -{ \ - R r; \ - \ - octave_idx_type m_nr = m.rows (); \ - octave_idx_type m_nc = m.cols (); \ - \ - octave_idx_type dm_nr = dm.rows (); \ - octave_idx_type dm_nc = dm.cols (); \ - \ - if (m_nr != dm_nr || m_nc != dm_nc) \ - err_nonconformant (#OP, m_nr, m_nc, dm_nr, dm_nc); \ - \ - r.resize (m_nr, m_nc); \ - \ - if (m_nr > 0 && m_nc > 0) \ - { \ - r = R (m); \ - \ - octave_idx_type len = dm.length (); \ - \ - for (octave_idx_type i = 0; i < len; i++) \ - r.elem (i, i) OPEQ dm.elem (i, i); \ - } \ - \ - return r; \ -} +#define MDM_BIN_OP(R, OP, M, DM, OPEQ) \ + R \ + OP (const M& m, const DM& dm) \ + { \ + R r; \ + \ + octave_idx_type m_nr = m.rows (); \ + octave_idx_type m_nc = m.cols (); \ + \ + octave_idx_type dm_nr = dm.rows (); \ + octave_idx_type dm_nc = dm.cols (); \ + \ + if (m_nr != dm_nr || m_nc != dm_nc) \ + err_nonconformant (#OP, m_nr, m_nc, dm_nr, dm_nc); \ + \ + r.resize (m_nr, m_nc); \ + \ + if (m_nr > 0 && m_nc > 0) \ + { \ + r = R (m); \ + \ + octave_idx_type len = dm.length (); \ + \ + for (octave_idx_type i = 0; i < len; i++) \ + r.elem (i, i) OPEQ dm.elem (i, i); \ + } \ + \ + return r; \ + } -#define MDM_MULTIPLY_OP(R, M, DM, R_ZERO) \ -R \ -operator * (const M& m, const DM& dm) \ -{ \ - R r; \ - \ - octave_idx_type m_nr = m.rows (); \ - octave_idx_type m_nc = m.cols (); \ - \ - octave_idx_type dm_nr = dm.rows (); \ - octave_idx_type dm_nc = dm.cols (); \ - \ - if (m_nc != dm_nr) \ - err_nonconformant ("operator *", m_nr, m_nc, dm_nr, dm_nc); \ - \ - r = R (m_nr, dm_nc); \ - R::element_type *rd = r.fortran_vec (); \ - const M::element_type *md = m.data (); \ - const DM::element_type *dd = dm.data (); \ - \ - octave_idx_type len = dm.length (); \ - for (octave_idx_type i = 0; i < len; i++) \ - { \ - mx_inline_mul (m_nr, rd, md, dd[i]); \ - rd += m_nr; md += m_nr; \ - } \ - mx_inline_fill (m_nr * (dm_nc - len), rd, R_ZERO); \ - \ - return r; \ -} +#define MDM_MULTIPLY_OP(R, M, DM, R_ZERO) \ + R \ + operator * (const M& m, const DM& dm) \ + { \ + R r; \ + \ + octave_idx_type m_nr = m.rows (); \ + octave_idx_type m_nc = m.cols (); \ + \ + octave_idx_type dm_nr = dm.rows (); \ + octave_idx_type dm_nc = dm.cols (); \ + \ + if (m_nc != dm_nr) \ + err_nonconformant ("operator *", m_nr, m_nc, dm_nr, dm_nc); \ + \ + r = R (m_nr, dm_nc); \ + R::element_type *rd = r.fortran_vec (); \ + const M::element_type *md = m.data (); \ + const DM::element_type *dd = dm.data (); \ + \ + octave_idx_type len = dm.length (); \ + for (octave_idx_type i = 0; i < len; i++) \ + { \ + mx_inline_mul (m_nr, rd, md, dd[i]); \ + rd += m_nr; md += m_nr; \ + } \ + mx_inline_fill (m_nr * (dm_nc - len), rd, R_ZERO); \ + \ + return r; \ + } -#define MDM_BIN_OPS(R, M, DM, R_ZERO) \ - MDM_BIN_OP (R, operator +, M, DM, +=) \ - MDM_BIN_OP (R, operator -, M, DM, -=) \ +#define MDM_BIN_OPS(R, M, DM, R_ZERO) \ + MDM_BIN_OP (R, operator +, M, DM, +=) \ + MDM_BIN_OP (R, operator -, M, DM, -=) \ MDM_MULTIPLY_OP (R, M, DM, R_ZERO) // diagonal matrix by matrix operations. -#define DMM_BIN_OP(R, OP, DM, M, OPEQ, PREOP) \ -R \ -OP (const DM& dm, const M& m) \ -{ \ - R r; \ - \ - octave_idx_type dm_nr = dm.rows (); \ - octave_idx_type dm_nc = dm.cols (); \ - \ - octave_idx_type m_nr = m.rows (); \ - octave_idx_type m_nc = m.cols (); \ - \ - if (dm_nr != m_nr || dm_nc != m_nc) \ - err_nonconformant (#OP, dm_nr, dm_nc, m_nr, m_nc); \ - else \ - { \ - if (m_nr > 0 && m_nc > 0) \ - { \ - r = R (PREOP m); \ - \ - octave_idx_type len = dm.length (); \ - \ - for (octave_idx_type i = 0; i < len; i++) \ - r.elem (i, i) OPEQ dm.elem (i, i); \ - } \ - else \ - r.resize (m_nr, m_nc); \ - } \ - \ - return r; \ -} +#define DMM_BIN_OP(R, OP, DM, M, OPEQ, PREOP) \ + R \ + OP (const DM& dm, const M& m) \ + { \ + R r; \ + \ + octave_idx_type dm_nr = dm.rows (); \ + octave_idx_type dm_nc = dm.cols (); \ + \ + octave_idx_type m_nr = m.rows (); \ + octave_idx_type m_nc = m.cols (); \ + \ + if (dm_nr != m_nr || dm_nc != m_nc) \ + err_nonconformant (#OP, dm_nr, dm_nc, m_nr, m_nc); \ + else \ + { \ + if (m_nr > 0 && m_nc > 0) \ + { \ + r = R (PREOP m); \ + \ + octave_idx_type len = dm.length (); \ + \ + for (octave_idx_type i = 0; i < len; i++) \ + r.elem (i, i) OPEQ dm.elem (i, i); \ + } \ + else \ + r.resize (m_nr, m_nc); \ + } \ + \ + return r; \ + } -#define DMM_MULTIPLY_OP(R, DM, M, R_ZERO) \ -R \ -operator * (const DM& dm, const M& m) \ -{ \ - R r; \ - \ - octave_idx_type dm_nr = dm.rows (); \ - octave_idx_type dm_nc = dm.cols (); \ - \ - octave_idx_type m_nr = m.rows (); \ - octave_idx_type m_nc = m.cols (); \ - \ - if (dm_nc != m_nr) \ - err_nonconformant ("operator *", dm_nr, dm_nc, m_nr, m_nc); \ - \ - r = R (dm_nr, m_nc); \ - R::element_type *rd = r.fortran_vec (); \ - const M::element_type *md = m.data (); \ - const DM::element_type *dd = dm.data (); \ - \ - octave_idx_type len = dm.length (); \ - for (octave_idx_type i = 0; i < m_nc; i++) \ - { \ - mx_inline_mul (len, rd, md, dd); \ - rd += len; md += m_nr; \ - mx_inline_fill (dm_nr - len, rd, R_ZERO); \ - rd += dm_nr - len; \ - } \ - \ - return r; \ -} +#define DMM_MULTIPLY_OP(R, DM, M, R_ZERO) \ + R \ + operator * (const DM& dm, const M& m) \ + { \ + R r; \ + \ + octave_idx_type dm_nr = dm.rows (); \ + octave_idx_type dm_nc = dm.cols (); \ + \ + octave_idx_type m_nr = m.rows (); \ + octave_idx_type m_nc = m.cols (); \ + \ + if (dm_nc != m_nr) \ + err_nonconformant ("operator *", dm_nr, dm_nc, m_nr, m_nc); \ + \ + r = R (dm_nr, m_nc); \ + R::element_type *rd = r.fortran_vec (); \ + const M::element_type *md = m.data (); \ + const DM::element_type *dd = dm.data (); \ + \ + octave_idx_type len = dm.length (); \ + for (octave_idx_type i = 0; i < m_nc; i++) \ + { \ + mx_inline_mul (len, rd, md, dd); \ + rd += len; md += m_nr; \ + mx_inline_fill (dm_nr - len, rd, R_ZERO); \ + rd += dm_nr - len; \ + } \ + \ + return r; \ + } -#define DMM_BIN_OPS(R, DM, M, R_ZERO) \ - DMM_BIN_OP (R, operator +, DM, M, +=, ) \ - DMM_BIN_OP (R, operator -, DM, M, +=, -) \ +#define DMM_BIN_OPS(R, DM, M, R_ZERO) \ + DMM_BIN_OP (R, operator +, DM, M, +=, ) \ + DMM_BIN_OP (R, operator -, DM, M, +=, -) \ DMM_MULTIPLY_OP (R, DM, M, R_ZERO) // diagonal matrix by diagonal matrix operations. -#define DMDM_BIN_OP(R, OP, DM1, DM2, F) \ - R \ - OP (const DM1& dm1, const DM2& dm2) \ - { \ - R r; \ - \ - octave_idx_type dm1_nr = dm1.rows (); \ - octave_idx_type dm1_nc = dm1.cols (); \ - \ - octave_idx_type dm2_nr = dm2.rows (); \ - octave_idx_type dm2_nc = dm2.cols (); \ - \ - if (dm1_nr != dm2_nr || dm1_nc != dm2_nc) \ - err_nonconformant (#OP, dm1_nr, dm1_nc, dm2_nr, dm2_nc); \ - \ - r.resize (dm1_nr, dm1_nc); \ - \ - if (dm1_nr > 0 && dm1_nc > 0) \ - F (dm1.length (), r.fortran_vec (), dm1.data (), dm2.data ()); \ - \ - return r; \ +#define DMDM_BIN_OP(R, OP, DM1, DM2, F) \ + R \ + OP (const DM1& dm1, const DM2& dm2) \ + { \ + R r; \ + \ + octave_idx_type dm1_nr = dm1.rows (); \ + octave_idx_type dm1_nc = dm1.cols (); \ + \ + octave_idx_type dm2_nr = dm2.rows (); \ + octave_idx_type dm2_nc = dm2.cols (); \ + \ + if (dm1_nr != dm2_nr || dm1_nc != dm2_nc) \ + err_nonconformant (#OP, dm1_nr, dm1_nc, dm2_nr, dm2_nc); \ + \ + r.resize (dm1_nr, dm1_nc); \ + \ + if (dm1_nr > 0 && dm1_nc > 0) \ + F (dm1.length (), r.fortran_vec (), dm1.data (), dm2.data ()); \ + \ + return r; \ } -#define DMDM_BIN_OPS(R, DM1, DM2) \ - DMDM_BIN_OP (R, operator +, DM1, DM2, mx_inline_add) \ - DMDM_BIN_OP (R, operator -, DM1, DM2, mx_inline_sub) \ +#define DMDM_BIN_OPS(R, DM1, DM2) \ + DMDM_BIN_OP (R, operator +, DM1, DM2, mx_inline_add) \ + DMDM_BIN_OP (R, operator -, DM1, DM2, mx_inline_sub) \ DMDM_BIN_OP (R, product, DM1, DM2, mx_inline_mul) // scalar by N-D array min/max ops -#define SND_MINMAX_FCN(FCN, OP, T, S) \ -T \ -FCN (S d, const T& m) \ -{ \ - return do_sm_binary_op (d, m, mx_inline_x##FCN); \ -} +#define SND_MINMAX_FCN(FCN, OP, T, S) \ + T \ + FCN (S d, const T& m) \ + { \ + return do_sm_binary_op (d, m, mx_inline_x##FCN); \ + } -#define NDS_MINMAX_FCN(FCN, OP, T, S) \ -T \ -FCN (const T& m, S d) \ -{ \ - return do_ms_binary_op (m, d, mx_inline_x##FCN); \ -} +#define NDS_MINMAX_FCN(FCN, OP, T, S) \ + T \ + FCN (const T& m, S d) \ + { \ + return do_ms_binary_op (m, d, mx_inline_x##FCN); \ + } -#define NDND_MINMAX_FCN(FCN, OP, T, S) \ -T \ -FCN (const T& a, const T& b) \ -{ \ - return do_mm_binary_op (a, b, mx_inline_x##FCN, mx_inline_x##FCN, mx_inline_x##FCN, #FCN); \ -} +#define NDND_MINMAX_FCN(FCN, OP, T, S) \ + T \ + FCN (const T& a, const T& b) \ + { \ + return do_mm_binary_op (a, b, mx_inline_x##FCN, mx_inline_x##FCN, mx_inline_x##FCN, #FCN); \ + } -#define MINMAX_FCNS(T, S) \ - SND_MINMAX_FCN (min, <, T, S) \ - NDS_MINMAX_FCN (min, <, T, S) \ - NDND_MINMAX_FCN (min, <, T, S) \ - SND_MINMAX_FCN (max, >, T, S) \ - NDS_MINMAX_FCN (max, >, T, S) \ +#define MINMAX_FCNS(T, S) \ + SND_MINMAX_FCN (min, <, T, S) \ + NDS_MINMAX_FCN (min, <, T, S) \ + NDND_MINMAX_FCN (min, <, T, S) \ + SND_MINMAX_FCN (max, >, T, S) \ + NDS_MINMAX_FCN (max, >, T, S) \ NDND_MINMAX_FCN (max, >, T, S) // permutation matrix by matrix ops and vice versa -#define PMM_MULTIPLY_OP(PM, M) \ -M operator * (const PM& p, const M& x) \ -{ \ - octave_idx_type nr = x.rows (); \ - octave_idx_type nc = x.columns (); \ - M result; \ - if (p.columns () != nr) \ - err_nonconformant ("operator *", p.rows (), p.columns (), nr, nc); \ - else \ - { \ - result = M (nr, nc); \ - result.assign (p.col_perm_vec (), idx_vector::colon, x); \ - } \ - \ - return result; \ -} +#define PMM_MULTIPLY_OP(PM, M) \ + M operator * (const PM& p, const M& x) \ + { \ + octave_idx_type nr = x.rows (); \ + octave_idx_type nc = x.columns (); \ + M result; \ + if (p.columns () != nr) \ + err_nonconformant ("operator *", p.rows (), p.columns (), nr, nc); \ + else \ + { \ + result = M (nr, nc); \ + result.assign (p.col_perm_vec (), idx_vector::colon, x); \ + } \ + \ + return result; \ + } -#define MPM_MULTIPLY_OP(M, PM) \ -M operator * (const M& x, const PM& p) \ -{ \ - octave_idx_type nr = x.rows (); \ - octave_idx_type nc = x.columns (); \ - M result; \ - if (p.rows () != nc) \ - err_nonconformant ("operator *", nr, nc, p.rows (), p.columns ()); \ - \ - result = x.index (idx_vector::colon, p.col_perm_vec ()); \ - \ - return result; \ -} +#define MPM_MULTIPLY_OP(M, PM) \ + M operator * (const M& x, const PM& p) \ + { \ + octave_idx_type nr = x.rows (); \ + octave_idx_type nc = x.columns (); \ + M result; \ + if (p.rows () != nc) \ + err_nonconformant ("operator *", nr, nc, p.rows (), p.columns ()); \ + \ + result = x.index (idx_vector::colon, p.col_perm_vec ()); \ + \ + return result; \ + } -#define PMM_BIN_OPS(R, PM, M) \ +#define PMM_BIN_OPS(R, PM, M) \ PMM_MULTIPLY_OP(PM, M); -#define MPM_BIN_OPS(R, M, PM) \ +#define MPM_BIN_OPS(R, M, PM) \ MPM_MULTIPLY_OP(M, PM); -#define NDND_MAPPER_BODY(R, NAME) \ - R retval (dims ()); \ - octave_idx_type n = numel (); \ - for (octave_idx_type i = 0; i < n; i++) \ - retval.xelem (i) = NAME (elem (i)); \ +#define NDND_MAPPER_BODY(R, NAME) \ + R retval (dims ()); \ + octave_idx_type n = numel (); \ + for (octave_idx_type i = 0; i < n; i++) \ + retval.xelem (i) = NAME (elem (i)); \ return retval; #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/system/mach-info.cc --- a/liboctave/system/mach-info.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/system/mach-info.cc Mon Aug 01 12:40:18 2016 -0400 @@ -51,16 +51,16 @@ equiv fp_par[4]; }; - #define INIT_FLT_PAR(fp, fmt, sm1, sm2, lrg1, lrg2, rt1, rt2, dv1, dv2) \ - do \ - { \ - fp.fp_fmt = (fmt); \ - fp.fp_par[0].i[0] = (sm1); fp.fp_par[0].i[1] = (sm2); \ - fp.fp_par[1].i[0] = (lrg1); fp.fp_par[1].i[1] = (lrg2); \ - fp.fp_par[2].i[0] = (rt1); fp.fp_par[2].i[1] = (rt2); \ - fp.fp_par[3].i[0] = (dv1); fp.fp_par[3].i[1] = (dv2); \ - } \ - while (0) +#define INIT_FLT_PAR(fp, fmt, sm1, sm2, lrg1, lrg2, rt1, rt2, dv1, dv2) \ + do \ + { \ + fp.fp_fmt = (fmt); \ + fp.fp_par[0].i[0] = (sm1); fp.fp_par[0].i[1] = (sm2); \ + fp.fp_par[1].i[0] = (lrg1); fp.fp_par[1].i[1] = (lrg2); \ + fp.fp_par[2].i[0] = (rt1); fp.fp_par[2].i[1] = (rt2); \ + fp.fp_par[3].i[0] = (dv1); fp.fp_par[3].i[1] = (dv2); \ + } \ + while (0) static int equiv_compare (const equiv *std, const equiv *v, int len) diff -r dd992fd74fce -r e43d83253e28 liboctave/system/oct-group.cc --- a/liboctave/system/oct-group.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/system/oct-group.cc Mon Aug 01 12:40:18 2016 -0400 @@ -34,7 +34,7 @@ #include "oct-group.h" #include "str-vec.h" -#define NOT_SUPPORTED(nm) \ +#define NOT_SUPPORTED(nm) \ nm ": not supported on this system" OCTAVE_NORETURN static diff -r dd992fd74fce -r e43d83253e28 liboctave/system/oct-passwd.cc --- a/liboctave/system/oct-passwd.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/system/oct-passwd.cc Mon Aug 01 12:40:18 2016 -0400 @@ -33,7 +33,7 @@ #include "lo-error.h" #include "oct-passwd.h" -#define NOT_SUPPORTED(nm) \ +#define NOT_SUPPORTED(nm) \ nm ": not supported on this system" OCTAVE_NORETURN static diff -r dd992fd74fce -r e43d83253e28 liboctave/system/oct-syscalls.cc --- a/liboctave/system/oct-syscalls.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/system/oct-syscalls.cc Mon Aug 01 12:40:18 2016 -0400 @@ -39,7 +39,7 @@ #include "unistd-wrappers.h" #include "wait-wrappers.h" -#define NOT_SUPPORTED(nm) \ +#define NOT_SUPPORTED(nm) \ nm ": not supported on this system" namespace octave diff -r dd992fd74fce -r e43d83253e28 liboctave/system/oct-time.cc --- a/liboctave/system/oct-time.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/system/oct-time.cc Mon Aug 01 12:40:18 2016 -0400 @@ -108,10 +108,10 @@ // So, we no longer check limits here. #define DEFINE_SET_FIELD_FCN(type, f, lo, hi) \ - base_tm& \ - base_tm::f (type v) \ + base_tm& \ + base_tm::f (type v) \ { \ - m_ ## f = v; \ + m_ ## f = v; \ \ return *this; \ } diff -r dd992fd74fce -r e43d83253e28 liboctave/util/data-conv.cc --- a/liboctave/util/data-conv.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/data-conv.cc Mon Aug 01 12:40:18 2016 -0400 @@ -38,54 +38,54 @@ #include "oct-locbuf.h" #if defined (OCTAVE_HAVE_LONG_LONG_INT) -# define FIND_SIZED_INT_TYPE(VAL, BITS, TQ, Q) \ - do \ - { \ - int sz = BITS / std::numeric_limits::digits; \ - if (sizeof (TQ char) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## char; \ - else if (sizeof (TQ short) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## short; \ - else if (sizeof (TQ int) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## int; \ - else if (sizeof (TQ long) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## long; \ - else if (sizeof (TQ long long) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## longlong; \ - else \ - VAL = oct_data_conv::dt_unknown; \ - } \ - while (0) +# define FIND_SIZED_INT_TYPE(VAL, BITS, TQ, Q) \ + do \ + { \ + int sz = BITS / std::numeric_limits::digits; \ + if (sizeof (TQ char) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## char; \ + else if (sizeof (TQ short) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## short; \ + else if (sizeof (TQ int) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## int; \ + else if (sizeof (TQ long) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## long; \ + else if (sizeof (TQ long long) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## longlong; \ + else \ + VAL = oct_data_conv::dt_unknown; \ + } \ + while (0) #else -# define FIND_SIZED_INT_TYPE(VAL, BITS, TQ, Q) \ - do \ - { \ - int sz = BITS / std::numeric_limits::digits; \ - if (sizeof (TQ char) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## char; \ - else if (sizeof (TQ short) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## short; \ - else if (sizeof (TQ int) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## int; \ - else if (sizeof (TQ long) == sz) \ - VAL = oct_data_conv::dt_ ## Q ## long; \ - else \ - VAL = oct_data_conv::dt_unknown; \ - } \ - while (0) +# define FIND_SIZED_INT_TYPE(VAL, BITS, TQ, Q) \ + do \ + { \ + int sz = BITS / std::numeric_limits::digits; \ + if (sizeof (TQ char) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## char; \ + else if (sizeof (TQ short) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## short; \ + else if (sizeof (TQ int) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## int; \ + else if (sizeof (TQ long) == sz) \ + VAL = oct_data_conv::dt_ ## Q ## long; \ + else \ + VAL = oct_data_conv::dt_unknown; \ + } \ + while (0) #endif -#define FIND_SIZED_FLOAT_TYPE(VAL, BITS) \ - do \ - { \ - int sz = BITS / std::numeric_limits::digits; \ - if (sizeof (float) == sz) \ - VAL = oct_data_conv::dt_float; \ - else if (sizeof (double) == sz) \ - VAL = oct_data_conv::dt_double; \ - else \ - VAL = oct_data_conv::dt_unknown; \ - } \ +#define FIND_SIZED_FLOAT_TYPE(VAL, BITS) \ + do \ + { \ + int sz = BITS / std::numeric_limits::digits; \ + if (sizeof (float) == sz) \ + VAL = oct_data_conv::dt_float; \ + else if (sizeof (double) == sz) \ + VAL = oct_data_conv::dt_double; \ + else \ + VAL = oct_data_conv::dt_unknown; \ + } \ while (0) // I'm not sure it is worth the trouble, but let's use a lookup table @@ -146,32 +146,32 @@ return s; } -#define GET_SIZED_INT_TYPE(T, U) \ - do \ - { \ - switch (sizeof (T)) \ - { \ - case 1: \ - retval = dt_ ## U ## int8; \ - break; \ - \ - case 2: \ - retval = dt_ ## U ## int16; \ - break; \ - \ - case 4: \ - retval = dt_ ## U ## int32; \ - break; \ - \ - case 8: \ - retval = dt_ ## U ## int64; \ - break; \ - \ - default: \ - retval = dt_unknown; \ - break; \ - } \ - } \ +#define GET_SIZED_INT_TYPE(T, U) \ + do \ + { \ + switch (sizeof (T)) \ + { \ + case 1: \ + retval = dt_ ## U ## int8; \ + break; \ + \ + case 2: \ + retval = dt_ ## U ## int16; \ + break; \ + \ + case 4: \ + retval = dt_ ## U ## int32; \ + break; \ + \ + case 8: \ + retval = dt_ ## U ## int64; \ + break; \ + \ + default: \ + retval = dt_unknown; \ + break; \ + } \ + } \ while (0) size_t @@ -572,39 +572,39 @@ return retval; } -#define LS_DO_READ(TYPE, swap, data, size, len, stream) \ - do \ - { \ - if (len > 0) \ - { \ - OCTAVE_LOCAL_BUFFER (TYPE, ptr, len); \ +#define LS_DO_READ(TYPE, swap, data, size, len, stream) \ + do \ + { \ + if (len > 0) \ + { \ + OCTAVE_LOCAL_BUFFER (TYPE, ptr, len); \ std::streamsize n_bytes = size * static_cast (len); \ - stream.read (reinterpret_cast (ptr), n_bytes); \ - if (swap) \ - swap_bytes< size > (ptr, len); \ - for (octave_idx_type i = 0; i < len; i++) \ - data[i] = ptr[i]; \ - } \ - } \ + stream.read (reinterpret_cast (ptr), n_bytes); \ + if (swap) \ + swap_bytes< size > (ptr, len); \ + for (octave_idx_type i = 0; i < len; i++) \ + data[i] = ptr[i]; \ + } \ + } \ while (0) // Have to use copy here to avoid writing over data accessed via // Matrix::data (). -#define LS_DO_WRITE(TYPE, data, size, len, stream) \ - do \ - { \ - if (len > 0) \ - { \ - char tmp_type = type; \ - stream.write (&tmp_type, 1); \ - OCTAVE_LOCAL_BUFFER (TYPE, ptr, len); \ - for (octave_idx_type i = 0; i < len; i++) \ - ptr[i] = static_cast (data[i]); \ - std::streamsize n_bytes = size * static_cast (len); \ - stream.write (reinterpret_cast (ptr), n_bytes); \ - } \ - } \ +#define LS_DO_WRITE(TYPE, data, size, len, stream) \ + do \ + { \ + if (len > 0) \ + { \ + char tmp_type = type; \ + stream.write (&tmp_type, 1); \ + OCTAVE_LOCAL_BUFFER (TYPE, ptr, len); \ + for (octave_idx_type i = 0; i < len; i++) \ + ptr[i] = static_cast (data[i]); \ + std::streamsize n_bytes = size * static_cast (len); \ + stream.write (reinterpret_cast (ptr), n_bytes); \ + } \ + } \ while (0) // Loading variables from files. diff -r dd992fd74fce -r e43d83253e28 liboctave/util/kpse.cc --- a/liboctave/util/kpse.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/kpse.cc Mon Aug 01 12:40:18 2016 -0400 @@ -58,7 +58,7 @@ // environment variable paths. #define IS_DEVICE_SEP(ch) octave::sys::file_ops::is_dev_sep (ch) -#define NAME_BEGINS_WITH_DEVICE(name) \ +#define NAME_BEGINS_WITH_DEVICE(name) \ (name.length () > 0 && IS_DEVICE_SEP ((name)[1])) #define DIR_SEP_STRING octave::sys::file_ops::dir_sep_str () diff -r dd992fd74fce -r e43d83253e28 liboctave/util/lo-ieee.h --- a/liboctave/util/lo-ieee.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/lo-ieee.h Mon Aug 01 12:40:18 2016 -0400 @@ -100,19 +100,29 @@ } #endif -#define lo_ieee_isnan(x) (sizeof (x) == sizeof (float) ? \ - __lo_ieee_float_isnan (x) : __lo_ieee_isnan (x)) -#define lo_ieee_finite(x) (sizeof (x) == sizeof (float) ? \ - __lo_ieee_float_finite (x) : __lo_ieee_finite (x)) -#define lo_ieee_isinf(x) (sizeof (x) == sizeof (float) ? \ - __lo_ieee_float_isinf (x) : __lo_ieee_isinf (x)) +#define lo_ieee_isnan(x) \ + (sizeof (x) == sizeof (float) \ + ? __lo_ieee_float_isnan (x) : __lo_ieee_isnan (x)) + +#define lo_ieee_finite(x) \ + (sizeof (x) == sizeof (float) \ + ? __lo_ieee_float_finite (x) : __lo_ieee_finite (x)) + +#define lo_ieee_isinf(x) \ + (sizeof (x) == sizeof (float) \ + ? __lo_ieee_float_isinf (x) : __lo_ieee_isinf (x)) -#define lo_ieee_is_NA(x) (sizeof (x) == sizeof (float) ? \ - __lo_ieee_float_is_NA (x) : __lo_ieee_is_NA (x)) -#define lo_ieee_is_NaN_or_NA(x) (sizeof (x) == sizeof (float) ? \ - __lo_ieee_float_is_NaN_or_NA (x) : __lo_ieee_is_NaN_or_NA (x)) -#define lo_ieee_signbit(x) (sizeof (x) == sizeof (float) ? \ - __lo_ieee_float_signbit (x) : __lo_ieee_signbit (x)) +#define lo_ieee_is_NA(x) \ + (sizeof (x) == sizeof (float) \ + ? __lo_ieee_float_is_NA (x) : __lo_ieee_is_NA (x)) + +#define lo_ieee_is_NaN_or_NA(x) \ + (sizeof (x) == sizeof (float) \ + ? __lo_ieee_float_is_NaN_or_NA (x) : __lo_ieee_is_NaN_or_NA (x)) + +#define lo_ieee_signbit(x) \ + (sizeof (x) == sizeof (float) \ + ? __lo_ieee_float_signbit (x) : __lo_ieee_signbit (x)) #if defined (__cplusplus) diff -r dd992fd74fce -r e43d83253e28 liboctave/util/lo-macros.h --- a/liboctave/util/lo-macros.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/lo-macros.h Mon Aug 01 12:40:18 2016 -0400 @@ -28,29 +28,30 @@ // Core macros. Iteration is supported up to count 10. #define OCT_ITERATE_PARAM_MACRO0(MACRO, PARAM) -#define OCT_ITERATE_PARAM_MACRO1(MACRO, PARAM) \ -MACRO(0, PARAM) -#define OCT_ITERATE_PARAM_MACRO2(MACRO, PARAM) \ -MACRO(0, PARAM) MACRO(1, PARAM) -#define OCT_ITERATE_PARAM_MACRO3(MACRO, PARAM) \ -MACRO(0, PARAM) MACRO(1, PARAM) MACRO(2, PARAM) -#define OCT_ITERATE_PARAM_MACRO4(MACRO, PARAM) \ -MACRO(0, PARAM) MACRO(1, PARAM) MACRO(2, PARAM) MACRO(3, PARAM) -#define OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) \ -MACRO(0, PARAM) MACRO(1, PARAM) MACRO(2, PARAM) MACRO(3, PARAM) MACRO(4, PARAM) -#define OCT_ITERATE_PARAM_MACRO6(MACRO, PARAM) \ -OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) MACRO(5, PARAM) -#define OCT_ITERATE_PARAM_MACRO7(MACRO, PARAM) \ -OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) MACRO(5, PARAM) MACRO(6, PARAM) -#define OCT_ITERATE_PARAM_MACRO8(MACRO, PARAM) \ -OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) MACRO(5, PARAM) MACRO(6, PARAM) MACRO(7, PARAM) -#define OCT_ITERATE_PARAM_MACRO9(MACRO, PARAM) \ -OCT_ITERATE_PARAM_MACRO8(MACRO, PARAM) MACRO(8, PARAM) -#define OCT_ITERATE_PARAM_MACRO10(MACRO, PARAM) \ -OCT_ITERATE_PARAM_MACRO8(MACRO, PARAM) MACRO(8, PARAM) MACRO(9, PARAM) +#define OCT_ITERATE_PARAM_MACRO1(MACRO, PARAM) \ + MACRO(0, PARAM) +#define OCT_ITERATE_PARAM_MACRO2(MACRO, PARAM) \ + MACRO(0, PARAM) MACRO(1, PARAM) +#define OCT_ITERATE_PARAM_MACRO3(MACRO, PARAM) \ + MACRO(0, PARAM) MACRO(1, PARAM) MACRO(2, PARAM) +#define OCT_ITERATE_PARAM_MACRO4(MACRO, PARAM) \ + MACRO(0, PARAM) MACRO(1, PARAM) MACRO(2, PARAM) MACRO(3, PARAM) +#define OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) \ + MACRO(0, PARAM) MACRO(1, PARAM) MACRO(2, PARAM) MACRO(3, PARAM) MACRO(4, PARAM) +#define OCT_ITERATE_PARAM_MACRO6(MACRO, PARAM) \ + OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) MACRO(5, PARAM) +#define OCT_ITERATE_PARAM_MACRO7(MACRO, PARAM) \ + OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) MACRO(5, PARAM) MACRO(6, PARAM) +#define OCT_ITERATE_PARAM_MACRO8(MACRO, PARAM) \ + OCT_ITERATE_PARAM_MACRO5(MACRO, PARAM) MACRO(5, PARAM) MACRO(6, PARAM) MACRO(7, PARAM) +#define OCT_ITERATE_PARAM_MACRO9(MACRO, PARAM) \ + OCT_ITERATE_PARAM_MACRO8(MACRO, PARAM) MACRO(8, PARAM) +#define OCT_ITERATE_PARAM_MACRO10(MACRO, PARAM) \ + OCT_ITERATE_PARAM_MACRO8(MACRO, PARAM) MACRO(8, PARAM) MACRO(9, PARAM) // expands to MACRO(0, PARAM) MACRO(1, PARAM) ... MACRO(NUM-1, PARAM) -#define OCT_ITERATE_PARAM_MACRO(MACRO, PARAM, NUM) OCT_ITERATE_PARAM_MACRO##NUM(MACRO,PARAM) +#define OCT_ITERATE_PARAM_MACRO(MACRO, PARAM, NUM) \ + OCT_ITERATE_PARAM_MACRO##NUM(MACRO,PARAM) #define OCT_IF_PARAM0(MACRO, PARAM) #define OCT_IF_PARAM1(MACRO, PARAM) MACRO(PARAM) @@ -87,15 +88,15 @@ #define OCT_MAKE_LIST(MACRO, NUM) OCT_ITERATE_PARAM_MACRO(OCT_MAKE_LIST_HELPER, MACRO, NUM) -#define OCT_MAKE_DECL_LIST_HELPER(NUM, PREFIX) \ +#define OCT_MAKE_DECL_LIST_HELPER(NUM, PREFIX) \ OCT_IF_PARAM(NUM,OCT_MAKE_LIST_HELPER1,) OCT_CONCAT2(PREFIX, NUM) // expands to TYPE PREFIX0, TYPE PREFIX1, ..., TYPE PREFIX ## (NUM-1) -#define OCT_MAKE_DECL_LIST(TYPE, PREFIX, NUM) \ +#define OCT_MAKE_DECL_LIST(TYPE, PREFIX, NUM) \ OCT_ITERATE_PARAM_MACRO(OCT_MAKE_DECL_LIST_HELPER, TYPE PREFIX, NUM) // expands to PREFIX0, PREFIX1, ..., PREFIX ## (NUM-1) -#define OCT_MAKE_ARG_LIST(PREFIX, NUM) \ +#define OCT_MAKE_ARG_LIST(PREFIX, NUM) \ OCT_ITERATE_PARAM_MACRO(OCT_MAKE_DECL_LIST_HELPER, PREFIX, NUM) #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/util/oct-cmplx.h --- a/liboctave/util/oct-cmplx.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/oct-cmplx.h Mon Aug 01 12:40:18 2016 -0400 @@ -42,60 +42,60 @@ // non-unique representation for numbers along the negative real axis branch // cut. Change this to principal value (-pi,pi] by mapping -pi to pi. -#define DEF_COMPLEXR_COMP(OP, OPS) \ -template \ -inline bool operator OP (const std::complex& a, const std::complex& b) \ -{ \ - OCTAVE_FLOAT_TRUNCATE const T ax = std::abs (a); \ - OCTAVE_FLOAT_TRUNCATE const T bx = std::abs (b); \ - if (ax == bx) \ - { \ - OCTAVE_FLOAT_TRUNCATE const T ay = std::arg (a); \ - OCTAVE_FLOAT_TRUNCATE const T by = std::arg (b); \ - if (ay == static_cast (-M_PI)) \ - { \ - if (by != static_cast (-M_PI)) \ - return static_cast (M_PI) OP by; \ - } \ - else if (by == static_cast (-M_PI)) \ - { \ - return ay OP static_cast (M_PI); \ - } \ - return ay OP by; \ - } \ - else \ - return ax OPS bx; \ -} \ -template \ -inline bool operator OP (const std::complex& a, T b) \ -{ \ - OCTAVE_FLOAT_TRUNCATE const T ax = std::abs (a); \ - OCTAVE_FLOAT_TRUNCATE const T bx = std::abs (b); \ - if (ax == bx) \ - { \ - OCTAVE_FLOAT_TRUNCATE const T ay = std::arg (a); \ - if (ay == static_cast (-M_PI)) \ - return static_cast (M_PI) OP 0; \ - return ay OP 0; \ - } \ - else \ - return ax OPS bx; \ -} \ -template \ -inline bool operator OP (T a, const std::complex& b) \ -{ \ - OCTAVE_FLOAT_TRUNCATE const T ax = std::abs (a); \ - OCTAVE_FLOAT_TRUNCATE const T bx = std::abs (b); \ - if (ax == bx) \ - { \ - OCTAVE_FLOAT_TRUNCATE const T by = std::arg (b); \ - if (by == static_cast (-M_PI)) \ - return 0 OP static_cast (M_PI); \ - return 0 OP by; \ - } \ - else \ - return ax OPS bx; \ -} +#define DEF_COMPLEXR_COMP(OP, OPS) \ + template \ + inline bool operator OP (const std::complex& a, const std::complex& b) \ + { \ + OCTAVE_FLOAT_TRUNCATE const T ax = std::abs (a); \ + OCTAVE_FLOAT_TRUNCATE const T bx = std::abs (b); \ + if (ax == bx) \ + { \ + OCTAVE_FLOAT_TRUNCATE const T ay = std::arg (a); \ + OCTAVE_FLOAT_TRUNCATE const T by = std::arg (b); \ + if (ay == static_cast (-M_PI)) \ + { \ + if (by != static_cast (-M_PI)) \ + return static_cast (M_PI) OP by; \ + } \ + else if (by == static_cast (-M_PI)) \ + { \ + return ay OP static_cast (M_PI); \ + } \ + return ay OP by; \ + } \ + else \ + return ax OPS bx; \ + } \ + template \ + inline bool operator OP (const std::complex& a, T b) \ + { \ + OCTAVE_FLOAT_TRUNCATE const T ax = std::abs (a); \ + OCTAVE_FLOAT_TRUNCATE const T bx = std::abs (b); \ + if (ax == bx) \ + { \ + OCTAVE_FLOAT_TRUNCATE const T ay = std::arg (a); \ + if (ay == static_cast (-M_PI)) \ + return static_cast (M_PI) OP 0; \ + return ay OP 0; \ + } \ + else \ + return ax OPS bx; \ + } \ + template \ + inline bool operator OP (T a, const std::complex& b) \ + { \ + OCTAVE_FLOAT_TRUNCATE const T ax = std::abs (a); \ + OCTAVE_FLOAT_TRUNCATE const T bx = std::abs (b); \ + if (ax == bx) \ + { \ + OCTAVE_FLOAT_TRUNCATE const T by = std::arg (b); \ + if (by == static_cast (-M_PI)) \ + return 0 OP static_cast (M_PI); \ + return 0 OP by; \ + } \ + else \ + return ax OPS bx; \ + } DEF_COMPLEXR_COMP (>, >) DEF_COMPLEXR_COMP (<, <) diff -r dd992fd74fce -r e43d83253e28 liboctave/util/oct-glob.cc --- a/liboctave/util/oct-glob.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/oct-glob.cc Mon Aug 01 12:40:18 2016 -0400 @@ -87,7 +87,7 @@ if (! xpat.empty ()) { -#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ +#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ && ! defined (OCTAVE_HAVE_POSIX_FILESYSTEM)) std::replace_if (xpat.begin (), xpat.end (), std::bind2nd (std::equal_to (), '\\'), @@ -168,10 +168,10 @@ for (size_t j = 0; j < xpat.length (); j++) { -#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ +#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ && ! defined (OCTAVE_HAVE_POSIX_FILESYSTEM)) if (xpat[j] == '\\') - escaped += '/'; + escaped += '/'; else #endif { @@ -218,10 +218,10 @@ for (size_t m = 0; m < tmp.length (); m++) { -#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ +#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ && ! defined (OCTAVE_HAVE_POSIX_FILESYSTEM)) if (tmp[m] == '/') - unescaped += '\\'; + unescaped += '\\'; else #endif { diff -r dd992fd74fce -r e43d83253e28 liboctave/util/oct-inttypes.cc --- a/liboctave/util/oct-inttypes.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/oct-inttypes.cc Mon Aug 01 12:40:18 2016 -0400 @@ -36,9 +36,9 @@ const octave_int octave_int::one (static_cast (1)); // define type names. -#define DECLARE_OCTAVE_INT_TYPENAME(TYPE, TYPENAME) \ - template <> \ - OCTAVE_API const char * \ +#define DECLARE_OCTAVE_INT_TYPENAME(TYPE, TYPENAME) \ + template <> \ + OCTAVE_API const char * \ octave_int::type_name () { return TYPENAME; } DECLARE_OCTAVE_INT_TYPENAME (int8_t, "int8") @@ -79,20 +79,20 @@ } } -#define INSTANTIATE_CONVERT_REAL_1(T, S) \ - template \ - OCTAVE_API \ - T \ +#define INSTANTIATE_CONVERT_REAL_1(T, S) \ + template \ + OCTAVE_API \ + T \ octave_int_base::convert_real (const S&) -#define INSTANTIATE_CONVERT_REAL(S) \ - INSTANTIATE_CONVERT_REAL_1 (int8_t, S); \ - INSTANTIATE_CONVERT_REAL_1 (uint8_t, S); \ - INSTANTIATE_CONVERT_REAL_1 (int16_t, S); \ - INSTANTIATE_CONVERT_REAL_1 (uint16_t, S); \ - INSTANTIATE_CONVERT_REAL_1 (int32_t, S); \ - INSTANTIATE_CONVERT_REAL_1 (uint32_t, S); \ - INSTANTIATE_CONVERT_REAL_1 (int64_t, S); \ +#define INSTANTIATE_CONVERT_REAL(S) \ + INSTANTIATE_CONVERT_REAL_1 (int8_t, S); \ + INSTANTIATE_CONVERT_REAL_1 (uint8_t, S); \ + INSTANTIATE_CONVERT_REAL_1 (int16_t, S); \ + INSTANTIATE_CONVERT_REAL_1 (uint16_t, S); \ + INSTANTIATE_CONVERT_REAL_1 (int32_t, S); \ + INSTANTIATE_CONVERT_REAL_1 (uint32_t, S); \ + INSTANTIATE_CONVERT_REAL_1 (int64_t, S); \ INSTANTIATE_CONVERT_REAL_1 (uint64_t, S) INSTANTIATE_CONVERT_REAL (double); @@ -105,50 +105,50 @@ #if defined (OCTAVE_ENSURE_LONG_DOUBLE_OPERATIONS_ARE_NOT_TRUNCATED) -#define DEFINE_OCTAVE_LONG_DOUBLE_CMP_OP_TEMPLATES(T) \ - template \ - bool \ - octave_int_cmp_op::external_mop (double x, T y) \ - { \ - unsigned int oldcw = octave_begin_long_double_rounding (); \ - \ - bool retval = xop::op (static_cast (x), \ - static_cast (y)); \ - \ - octave_end_long_double_rounding (oldcw); \ - \ - return retval; \ - } \ - \ - template \ - bool \ - octave_int_cmp_op::external_mop (T x, double y) \ - { \ - unsigned int oldcw = octave_begin_long_double_rounding (); \ - \ - bool retval = xop::op (static_cast (x), \ - static_cast (y)); \ - \ - octave_end_long_double_rounding (oldcw); \ - \ - return retval; \ +#define DEFINE_OCTAVE_LONG_DOUBLE_CMP_OP_TEMPLATES(T) \ + template \ + bool \ + octave_int_cmp_op::external_mop (double x, T y) \ + { \ + unsigned int oldcw = octave_begin_long_double_rounding (); \ + \ + bool retval = xop::op (static_cast (x), \ + static_cast (y)); \ + \ + octave_end_long_double_rounding (oldcw); \ + \ + return retval; \ + } \ + \ + template \ + bool \ + octave_int_cmp_op::external_mop (T x, double y) \ + { \ + unsigned int oldcw = octave_begin_long_double_rounding (); \ + \ + bool retval = xop::op (static_cast (x), \ + static_cast (y)); \ + \ + octave_end_long_double_rounding (oldcw); \ + \ + return retval; \ } DEFINE_OCTAVE_LONG_DOUBLE_CMP_OP_TEMPLATES (int64_t) DEFINE_OCTAVE_LONG_DOUBLE_CMP_OP_TEMPLATES (uint64_t) -#define INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP(OP, T) \ - template OCTAVE_API bool \ - octave_int_cmp_op::external_mop (double, T); \ - template OCTAVE_API bool \ +#define INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP(OP, T) \ + template OCTAVE_API bool \ + octave_int_cmp_op::external_mop (double, T); \ + template OCTAVE_API bool \ octave_int_cmp_op::external_mop (T, double) -#define INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OPS(T) \ - INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (lt, T); \ - INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (le, T); \ - INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (gt, T); \ - INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (ge, T); \ - INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (eq, T); \ +#define INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OPS(T) \ + INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (lt, T); \ + INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (le, T); \ + INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (gt, T); \ + INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (ge, T); \ + INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (eq, T); \ INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OP (ne, T) INSTANTIATE_LONG_DOUBLE_LONG_DOUBLE_CMP_OPS (int64_t); @@ -186,35 +186,35 @@ // Similarly, the conversion from the 64-bit integer type to long double // must also occur in long double rounding mode. -#define OCTAVE_LONG_DOUBLE_OP(T, OP, NAME) \ - T \ - external_double_ ## T ## _ ## NAME (double x, T y) \ - { \ - unsigned int oldcw = octave_begin_long_double_rounding (); \ - \ - T retval = T (x OP static_cast (y.value ())); \ - \ - octave_end_long_double_rounding (oldcw); \ - \ - return retval; \ - } \ - \ - T \ - external_ ## T ## _double_ ## NAME (T x, double y) \ - { \ - unsigned int oldcw = octave_begin_long_double_rounding (); \ - \ - T retval = T (static_cast (x.value ()) OP y); \ - \ - octave_end_long_double_rounding (oldcw); \ - \ - return retval; \ +#define OCTAVE_LONG_DOUBLE_OP(T, OP, NAME) \ + T \ + external_double_ ## T ## _ ## NAME (double x, T y) \ + { \ + unsigned int oldcw = octave_begin_long_double_rounding (); \ + \ + T retval = T (x OP static_cast (y.value ())); \ + \ + octave_end_long_double_rounding (oldcw); \ + \ + return retval; \ + } \ + \ + T \ + external_ ## T ## _double_ ## NAME (T x, double y) \ + { \ + unsigned int oldcw = octave_begin_long_double_rounding (); \ + \ + T retval = T (static_cast (x.value ()) OP y); \ + \ + octave_end_long_double_rounding (oldcw); \ + \ + return retval; \ } -#define OCTAVE_LONG_DOUBLE_OPS(T) \ - OCTAVE_LONG_DOUBLE_OP (T, +, add); \ - OCTAVE_LONG_DOUBLE_OP (T, -, sub); \ - OCTAVE_LONG_DOUBLE_OP (T, *, mul); \ +#define OCTAVE_LONG_DOUBLE_OPS(T) \ + OCTAVE_LONG_DOUBLE_OP (T, +, add); \ + OCTAVE_LONG_DOUBLE_OP (T, -, sub); \ + OCTAVE_LONG_DOUBLE_OP (T, *, mul); \ OCTAVE_LONG_DOUBLE_OP (T, /, div) OCTAVE_LONG_DOUBLE_OPS(octave_int64); @@ -280,12 +280,12 @@ typedef xop op; }; -#define DEFINE_REVERTED_OPERATOR(OP1,OP2) \ - template <> \ - class rev_op \ - { \ - public: \ - typedef octave_int_cmp_op::OP2 op; \ +#define DEFINE_REVERTED_OPERATOR(OP1,OP2) \ + template <> \ + class rev_op \ + { \ + public: \ + typedef octave_int_cmp_op::OP2 op; \ } DEFINE_REVERTED_OPERATOR(lt,gt); @@ -441,14 +441,14 @@ } -#define INT_DOUBLE_BINOP_DECL(OP,SUFFIX) \ - template <> \ - OCTAVE_API octave_ ## SUFFIX \ +#define INT_DOUBLE_BINOP_DECL(OP,SUFFIX) \ + template <> \ + OCTAVE_API octave_ ## SUFFIX \ operator OP (const octave_ ## SUFFIX & x, const double& y) -#define DOUBLE_INT_BINOP_DECL(OP,SUFFIX) \ - template <> \ - OCTAVE_API octave_ ## SUFFIX \ +#define DOUBLE_INT_BINOP_DECL(OP,SUFFIX) \ + template <> \ + OCTAVE_API octave_ ## SUFFIX \ operator OP (const double& x, const octave_ ## SUFFIX & y) INT_DOUBLE_BINOP_DECL (+, uint64) @@ -679,14 +679,14 @@ return x * (1.0/y); } -#define INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP,T1,T2) \ - template OCTAVE_API bool \ +#define INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP,T1,T2) \ + template OCTAVE_API bool \ octave_int_cmp_op::emulate_mop (T1 x, T2 y) -#define INSTANTIATE_INT64_DOUBLE_CMP_OP(OP) \ - INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, double, int64_t); \ - INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, double, uint64_t); \ - INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, int64_t, double); \ +#define INSTANTIATE_INT64_DOUBLE_CMP_OP(OP) \ + INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, double, int64_t); \ + INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, double, uint64_t); \ + INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, int64_t, double); \ INSTANTIATE_INT64_DOUBLE_CMP_OP0(OP, uint64_t, double) INSTANTIATE_INT64_DOUBLE_CMP_OP(lt); @@ -785,16 +785,16 @@ : octave_int (pow (a.double_value (), static_cast (b)))); } -#define INSTANTIATE_INTTYPE(T) \ - template class OCTAVE_API octave_int; \ +#define INSTANTIATE_INTTYPE(T) \ + template class OCTAVE_API octave_int; \ template OCTAVE_API octave_int pow (const octave_int&, const octave_int&); \ template OCTAVE_API octave_int pow (const double&, const octave_int&); \ template OCTAVE_API octave_int pow (const octave_int&, const double&); \ - template OCTAVE_API octave_int pow (const float&, const octave_int&); \ - template OCTAVE_API octave_int pow (const octave_int&, const float&); \ + template OCTAVE_API octave_int pow (const float&, const octave_int&); \ + template OCTAVE_API octave_int pow (const octave_int&, const float&); \ template OCTAVE_API octave_int powf (const float&, const octave_int&); \ template OCTAVE_API octave_int powf (const octave_int&, const float&); \ - template OCTAVE_API octave_int \ + template OCTAVE_API octave_int \ bitshift (const octave_int&, int, const octave_int&); INSTANTIATE_INTTYPE (int8_t); diff -r dd992fd74fce -r e43d83253e28 liboctave/util/oct-inttypes.h --- a/liboctave/util/oct-inttypes.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/oct-inttypes.h Mon Aug 01 12:40:18 2016 -0400 @@ -87,14 +87,14 @@ // attempt to use it in computations. }; -#define REGISTER_INT_TYPE(TYPE) \ -template <> \ -class query_integer_type::is_signed> \ -{ \ -public: \ - static const bool registered = true; \ - typedef TYPE type; \ -} +#define REGISTER_INT_TYPE(TYPE) \ + template <> \ + class query_integer_type::is_signed> \ + { \ + public: \ + static const bool registered = true; \ + typedef TYPE type; \ + } // No two registered integers can share sizeof and signedness. REGISTER_INT_TYPE (int8_t); @@ -109,27 +109,27 @@ // Rationale: Comparators have a single static method, rel(), that returns the // result of the binary relation. They also have two static boolean fields: // ltval, gtval determine the value of x OP y if x < y, x > y, respectively. -#define REGISTER_OCTAVE_CMP_OP(NM,OP) \ - class NM \ - { \ - public: \ - static const bool ltval = (0 OP 1); \ - static const bool gtval = (1 OP 0); \ - template \ - static bool op (T x, T y) { return x OP y; } \ - } +#define REGISTER_OCTAVE_CMP_OP(NM,OP) \ + class NM \ + { \ + public: \ + static const bool ltval = (0 OP 1); \ + static const bool gtval = (1 OP 0); \ + template \ + static bool op (T x, T y) { return x OP y; } \ + } // We also provide two special relations: ct, yielding always true, and cf, // yielding always false. -#define REGISTER_OCTAVE_CONST_OP(NM,value) \ - class NM \ - { \ - public: \ - static const bool ltval = value; \ - static const bool gtval = value; \ - template \ - static bool op (T, T) { return value; } \ - } +#define REGISTER_OCTAVE_CONST_OP(NM,value) \ + class NM \ + { \ + public: \ + static const bool ltval = value; \ + static const bool gtval = value; \ + template \ + static bool op (T, T) { return value; } \ + } // Handles non-homogeneous integer comparisons. Avoids doing useless tests. class octave_int_cmp_op @@ -210,14 +210,14 @@ { return xop::op (x, static_cast (y)); } #if defined (OCTAVE_ENSURE_LONG_DOUBLE_OPERATIONS_ARE_NOT_TRUNCATED) -# define DECLARE_EXTERNAL_LONG_DOUBLE_CMP_OPS(T) \ - template static OCTAVE_API bool \ - external_mop (double, T); \ - template static OCTAVE_API bool \ - external_mop (T, double) +# define DECLARE_EXTERNAL_LONG_DOUBLE_CMP_OPS(T) \ + template static OCTAVE_API bool \ + external_mop (double, T); \ + template static OCTAVE_API bool \ + external_mop (T, double) - DECLARE_EXTERNAL_LONG_DOUBLE_CMP_OPS (int64_t); - DECLARE_EXTERNAL_LONG_DOUBLE_CMP_OPS (uint64_t); + DECLARE_EXTERNAL_LONG_DOUBLE_CMP_OPS (int64_t); + DECLARE_EXTERNAL_LONG_DOUBLE_CMP_OPS (uint64_t); #endif // Typecasting to doubles won't work properly for 64-bit integers -- @@ -225,35 +225,35 @@ // If we have long doubles, use them... #if defined (OCTAVE_INT_USE_LONG_DOUBLE) # if defined (OCTAVE_ENSURE_LONG_DOUBLE_OPERATIONS_ARE_NOT_TRUNCATED) -# define DEFINE_LONG_DOUBLE_CMP_OP(T) \ - template \ - static bool \ - mop (double x, T y) \ - { \ - return external_mop (x, y); \ - } \ - template \ - static bool \ - mop (T x, double y) \ - { \ - return external_mop (x, y); \ - } +# define DEFINE_LONG_DOUBLE_CMP_OP(T) \ + template \ + static bool \ + mop (double x, T y) \ + { \ + return external_mop (x, y); \ + } \ + template \ + static bool \ + mop (T x, double y) \ + { \ + return external_mop (x, y); \ + } # else -# define DEFINE_LONG_DOUBLE_CMP_OP(T) \ - template \ - static bool \ - mop (double x, T y) \ - { \ - return xop::op (static_cast (x), \ - static_cast (y)); \ - } \ - template \ - static bool \ - mop (T x, double y) \ - { \ - return xop::op (static_cast (x), \ - static_cast (y)); \ - } +# define DEFINE_LONG_DOUBLE_CMP_OP(T) \ + template \ + static bool \ + mop (double x, T y) \ + { \ + return xop::op (static_cast (x), \ + static_cast (y)); \ + } \ + template \ + static bool \ + mop (T x, double y) \ + { \ + return xop::op (static_cast (x), \ + static_cast (y)); \ + } # endif #else // ... otherwise, use external handlers @@ -261,23 +261,23 @@ // FIXME: We could declare directly the mop methods as external, // but we can't do this because bugs in gcc (<= 4.3) prevent // explicit instantiations later in that case. -# define DEFINE_LONG_DOUBLE_CMP_OP(T) \ - template static OCTAVE_API bool \ - emulate_mop (double, T); \ - template \ - static bool \ - mop (double x, T y) \ - { \ - return emulate_mop (x, y); \ - } \ - template static OCTAVE_API bool \ - emulate_mop (T, double); \ - template \ - static bool \ - mop (T x, double y) \ - { \ - return emulate_mop (x, y); \ - } +# define DEFINE_LONG_DOUBLE_CMP_OP(T) \ + template static OCTAVE_API bool \ + emulate_mop (double, T); \ + template \ + static bool \ + mop (double x, T y) \ + { \ + return emulate_mop (x, y); \ + } \ + template static OCTAVE_API bool \ + emulate_mop (T, double); \ + template \ + static bool \ + mop (T x, double y) \ + { \ + return emulate_mop (x, y); \ + } #endif DEFINE_LONG_DOUBLE_CMP_OP(int64_t) @@ -913,10 +913,12 @@ { return *this; } // unary operators & mappers -#define OCTAVE_INT_UN_OP(OPNAME,NAME) \ - inline octave_int \ - OPNAME () const \ - { return octave_int_arith::NAME (ival); } +#define OCTAVE_INT_UN_OP(OPNAME,NAME) \ + inline octave_int \ + OPNAME () const \ + { \ + return octave_int_arith::NAME (ival); \ + } OCTAVE_INT_UN_OP(operator -, minus) OCTAVE_INT_UN_OP(abs, abs) @@ -925,15 +927,17 @@ #undef OCTAVE_INT_UN_OP // Homogeneous binary integer operations. -#define OCTAVE_INT_BIN_OP(OP, NAME, ARGT) \ - inline octave_int \ - operator OP (const ARGT& y) const \ - { return octave_int_arith::NAME (ival, y); } \ - inline octave_int& \ - operator OP##= (const ARGT& y) \ - { \ +#define OCTAVE_INT_BIN_OP(OP, NAME, ARGT) \ + inline octave_int \ + operator OP (const ARGT& y) const \ + { \ + return octave_int_arith::NAME (ival, y); \ + } \ + inline octave_int& \ + operator OP##= (const ARGT& y) \ + { \ ival = octave_int_arith::NAME (ival, y); \ - return *this; \ + return *this; \ } OCTAVE_INT_BIN_OP(+, add, octave_int) @@ -970,12 +974,16 @@ template inline octave_int rem (const octave_int& x, const octave_int& y) -{ return octave_int_arith::rem (x.value (), y.value ()); } +{ + return octave_int_arith::rem (x.value (), y.value ()); +} template inline octave_int mod (const octave_int& x, const octave_int& y) -{ return octave_int_arith::mod (x.value (), y.value ()); } +{ + return octave_int_arith::mod (x.value (), y.value ()); +} // No mixed integer binary operations! @@ -1039,12 +1047,13 @@ // Binary relations -#define OCTAVE_INT_CMP_OP(OP, NAME) \ - template \ - inline bool \ - operator OP (const octave_int& x, const octave_int& y) \ - { return octave_int_cmp_op::op \ - (x.value (), y.value ()); } +#define OCTAVE_INT_CMP_OP(OP, NAME) \ + template \ + inline bool \ + operator OP (const octave_int& x, const octave_int& y) \ + { \ + return octave_int_cmp_op::op (x.value (), y.value ()); \ + } OCTAVE_INT_CMP_OP (<, lt) OCTAVE_INT_CMP_OP (<=, le) @@ -1117,11 +1126,13 @@ // Bitwise operations -#define OCTAVE_INT_BITCMP_OP(OP) \ - template \ - octave_int \ - operator OP (const octave_int& x, const octave_int& y) \ - { return x.value () OP y.value (); } +#define OCTAVE_INT_BITCMP_OP(OP) \ + template \ + octave_int \ + operator OP (const octave_int& x, const octave_int& y) \ + { \ + return x.value () OP y.value (); \ + } OCTAVE_INT_BITCMP_OP (&) OCTAVE_INT_BITCMP_OP (|) @@ -1145,16 +1156,16 @@ #if defined (OCTAVE_ENSURE_LONG_DOUBLE_OPERATIONS_ARE_NOT_TRUNCATED) -#define DECLARE_EXTERNAL_LONG_DOUBLE_OP(T, OP) \ - extern OCTAVE_API T \ - external_double_ ## T ## _ ## OP (double x, T y); \ - extern OCTAVE_API T \ +#define DECLARE_EXTERNAL_LONG_DOUBLE_OP(T, OP) \ + extern OCTAVE_API T \ + external_double_ ## T ## _ ## OP (double x, T y); \ + extern OCTAVE_API T \ external_ ## T ## _double_ ## OP (T x, double y) -#define DECLARE_EXTERNAL_LONG_DOUBLE_OPS(T) \ - DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, add); \ - DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, sub); \ - DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, mul); \ +#define DECLARE_EXTERNAL_LONG_DOUBLE_OPS(T) \ + DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, add); \ + DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, sub); \ + DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, mul); \ DECLARE_EXTERNAL_LONG_DOUBLE_OP (T, div) DECLARE_EXTERNAL_LONG_DOUBLE_OPS (octave_int64); @@ -1165,85 +1176,89 @@ #define OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ template \ inline octave_int \ - operator OP (const octave_int& x, const double& y) \ - { return octave_int (static_cast (x) OP y); } \ - template \ - inline octave_int \ - operator OP (const double& x, const octave_int& y) \ - { return octave_int (x OP static_cast (y)); } + operator OP (const octave_int& x, const double& y) \ + { \ + return octave_int (static_cast (x) OP y); \ + } \ + template \ + inline octave_int \ + operator OP (const double& x, const octave_int& y) \ + { \ + return octave_int (x OP static_cast (y)); \ + } #if defined (OCTAVE_INT_USE_LONG_DOUBLE) // Handle mixed op using long double #if defined (OCTAVE_ENSURE_LONG_DOUBLE_OPERATIONS_ARE_NOT_TRUNCATED) -# define OCTAVE_INT_DOUBLE_BIN_OP(OP, NAME) \ - OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ - template <> \ - inline octave_int64 \ - operator OP (const double& x, const octave_int64& y) \ - { \ - return external_double_octave_int64_ ## NAME (x, y); \ - } \ - template <> \ - inline octave_uint64 \ - operator OP (const double& x, const octave_uint64& y) \ - { \ - return external_double_octave_uint64_ ## NAME (x, y); \ - } \ - template <> \ - inline octave_int64 \ - operator OP (const octave_int64& x, const double& y) \ - { \ - return external_octave_int64_double_ ## NAME (x, y); \ - } \ - template <> \ - inline octave_uint64 \ - operator OP (const octave_uint64& x, const double& y) \ - { \ - return external_octave_uint64_double_ ## NAME (x, y); \ +# define OCTAVE_INT_DOUBLE_BIN_OP(OP, NAME) \ + OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ + template <> \ + inline octave_int64 \ + operator OP (const double& x, const octave_int64& y) \ + { \ + return external_double_octave_int64_ ## NAME (x, y); \ + } \ + template <> \ + inline octave_uint64 \ + operator OP (const double& x, const octave_uint64& y) \ + { \ + return external_double_octave_uint64_ ## NAME (x, y); \ + } \ + template <> \ + inline octave_int64 \ + operator OP (const octave_int64& x, const double& y) \ + { \ + return external_octave_int64_double_ ## NAME (x, y); \ + } \ + template <> \ + inline octave_uint64 \ + operator OP (const octave_uint64& x, const double& y) \ + { \ + return external_octave_uint64_double_ ## NAME (x, y); \ } #else # define OCTAVE_INT_DOUBLE_BIN_OP(OP, NAME) \ - OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ - template <> \ - inline octave_int64 \ - operator OP (const double& x, const octave_int64& y) \ - { \ - return octave_int64 (x OP static_cast (y.value ())); \ - } \ - template <> \ - inline octave_uint64 \ - operator OP (const double& x, const octave_uint64& y) \ - { \ - return octave_uint64 (x OP static_cast (y.value ())); \ - } \ - template <> \ - inline octave_int64 \ - operator OP (const octave_int64& x, const double& y) \ - { \ + OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ + template <> \ + inline octave_int64 \ + operator OP (const double& x, const octave_int64& y) \ + { \ + return octave_int64 (x OP static_cast (y.value ())); \ + } \ + template <> \ + inline octave_uint64 \ + operator OP (const double& x, const octave_uint64& y) \ + { \ + return octave_uint64 (x OP static_cast (y.value ())); \ + } \ + template <> \ + inline octave_int64 \ + operator OP (const octave_int64& x, const double& y) \ + { \ return octave_int64 (static_cast (x.value ()) OP y); \ - } \ - template <> \ - inline octave_uint64 \ - operator OP (const octave_uint64& x, const double& y) \ - { \ - return octave_uint64 (static_cast (x.value ()) OP y); \ + } \ + template <> \ + inline octave_uint64 \ + operator OP (const octave_uint64& x, const double& y) \ + { \ + return octave_uint64 (static_cast (x.value ()) OP y); \ } #endif #else // external handlers #define OCTAVE_INT_DOUBLE_BIN_OP(OP, NAME) \ - OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ - template <> \ - OCTAVE_API octave_int64 \ - operator OP (const double&, const octave_int64&); \ - template <> \ - OCTAVE_API octave_uint64 \ - operator OP (const double&, const octave_uint64&); \ - template <> \ - OCTAVE_API octave_int64 \ - operator OP (const octave_int64&, const double&); \ - template <> \ - OCTAVE_API octave_uint64 \ + OCTAVE_INT_DOUBLE_BIN_OP0(OP) \ + template <> \ + OCTAVE_API octave_int64 \ + operator OP (const double&, const octave_int64&); \ + template <> \ + OCTAVE_API octave_uint64 \ + operator OP (const double&, const octave_uint64&); \ + template <> \ + OCTAVE_API octave_int64 \ + operator OP (const octave_int64&, const double&); \ + template <> \ + OCTAVE_API octave_uint64 \ operator OP (const octave_uint64&, const double&); #endif @@ -1258,15 +1273,19 @@ #undef DECLARE_EXTERNAL_LONG_DOUBLE_OP #undef DECLARE_EXTERNAL_LONG_DOUBLE_OPS -#define OCTAVE_INT_DOUBLE_CMP_OP(OP,NAME) \ - template \ - inline bool \ - operator OP (const octave_int& x, const double& y) \ - { return octave_int_cmp_op::mop (x.value (), y); } \ - template \ - inline bool \ - operator OP (const double& x, const octave_int& y) \ - { return octave_int_cmp_op::mop (x, y.value ()); } +#define OCTAVE_INT_DOUBLE_CMP_OP(OP,NAME) \ + template \ + inline bool \ + operator OP (const octave_int& x, const double& y) \ + { \ + return octave_int_cmp_op::mop (x.value (), y); \ + } \ + template \ + inline bool \ + operator OP (const double& x, const octave_int& y) \ + { \ + return octave_int_cmp_op::mop (x, y.value ()); \ + } OCTAVE_INT_DOUBLE_CMP_OP (<, lt) OCTAVE_INT_DOUBLE_CMP_OP (<=, le) @@ -1279,15 +1298,19 @@ // Floats are handled by simply converting to doubles. -#define OCTAVE_INT_FLOAT_BIN_OP(OP) \ - template \ - inline octave_int \ +#define OCTAVE_INT_FLOAT_BIN_OP(OP) \ + template \ + inline octave_int \ operator OP (const octave_int& x, float y) \ - { return x OP static_cast (y); } \ - template \ - inline octave_int \ + { \ + return x OP static_cast (y); \ + } \ + template \ + inline octave_int \ operator OP (float x, const octave_int& y) \ - { return static_cast (x) OP y; } + { \ + return static_cast (x) OP y; \ + } OCTAVE_INT_FLOAT_BIN_OP (+) OCTAVE_INT_FLOAT_BIN_OP (-) @@ -1297,14 +1320,18 @@ #undef OCTAVE_INT_FLOAT_BIN_OP #define OCTAVE_INT_FLOAT_CMP_OP(OP) \ - template \ - inline bool \ - operator OP (const octave_int& x, const float& y) \ - { return x OP static_cast (y); } \ - template \ - bool \ - operator OP (const float& x, const octave_int& y) \ - { return static_cast (x) OP y; } + template \ + inline bool \ + operator OP (const octave_int& x, const float& y) \ + { \ + return x OP static_cast (y); \ + } \ + template \ + bool \ + operator OP (const float& x, const octave_int& y) \ + { \ + return static_cast (x) OP y; \ + } OCTAVE_INT_FLOAT_CMP_OP (<) OCTAVE_INT_FLOAT_CMP_OP (<=) diff -r dd992fd74fce -r e43d83253e28 liboctave/util/oct-locbuf.h --- a/liboctave/util/oct-locbuf.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/oct-locbuf.h Mon Aug 01 12:40:18 2016 -0400 @@ -111,19 +111,19 @@ // This specializes octave_local_buffer to use the chunked buffer // mechanism for POD types. -#define SPECIALIZE_POD_BUFFER(TYPE) \ -template <> \ -class octave_local_buffer : private octave_chunk_buffer \ -{ \ -public: \ - octave_local_buffer (size_t size) \ - : octave_chunk_buffer (size * sizeof (TYPE)) { } \ - \ - operator TYPE *() const \ - { \ - return reinterpret_cast (this->data ()); \ - } \ -} +#define SPECIALIZE_POD_BUFFER(TYPE) \ + template <> \ + class octave_local_buffer : private octave_chunk_buffer \ + { \ + public: \ + octave_local_buffer (size_t size) \ + : octave_chunk_buffer (size * sizeof (TYPE)) { } \ + \ + operator TYPE *() const \ + { \ + return reinterpret_cast (this->data ()); \ + } \ + } SPECIALIZE_POD_BUFFER (bool); SPECIALIZE_POD_BUFFER (char); @@ -183,23 +183,22 @@ // stack array and the octave_local_buffer object, but only one of // them will be nonempty. -#define OCTAVE_LOCAL_BUFFER(T, buf, size) \ - const size_t _bufsize_ ## buf = size; \ - const bool _lbufaut_ ## buf = _bufsize_ ## buf * sizeof (T) \ - <= OCTAVE_LOCAL_BUFFER_MAX_STACK_SIZE; \ - T _bufaut_ ## buf [_lbufaut_ ## buf ? _bufsize_ ## buf : 0]; \ - octave_local_buffer _bufheap_ ## buf \ - (! _lbufaut_ ## buf ? _bufsize_ ## buf : 0); \ - T *buf = _lbufaut_ ## buf \ - ? _bufaut_ ## buf : static_cast (_bufheap_ ## buf) +#define OCTAVE_LOCAL_BUFFER(T, buf, size) \ + const size_t _bufsize_ ## buf = size; \ + const bool _lbufaut_ ## buf = _bufsize_ ## buf * sizeof (T) \ + <= OCTAVE_LOCAL_BUFFER_MAX_STACK_SIZE; \ + T _bufaut_ ## buf [_lbufaut_ ## buf ? _bufsize_ ## buf : 0]; \ + octave_local_buffer _bufheap_ ## buf (! _lbufaut_ ## buf ? _bufsize_ ## buf : 0); \ + T *buf = (_lbufaut_ ## buf \ + ? _bufaut_ ## buf : static_cast (_bufheap_ ## buf)) #else // If we don't have automatic arrays, we simply always use // octave_local_buffer. -#define OCTAVE_LOCAL_BUFFER(T, buf, size) \ - octave_local_buffer _buffer_ ## buf (size); \ +#define OCTAVE_LOCAL_BUFFER(T, buf, size) \ + octave_local_buffer _buffer_ ## buf (size); \ T *buf = _buffer_ ## buf #endif @@ -207,10 +206,10 @@ // Note: we use weird variables in the for loop to avoid warnings // about shadowed parameters. -#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value) \ - OCTAVE_LOCAL_BUFFER (T, buf, size); \ - for (size_t _buf_iter = 0, _buf_size = size; \ - _buf_iter < _buf_size; _buf_iter++) \ +#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value) \ + OCTAVE_LOCAL_BUFFER (T, buf, size); \ + for (size_t _buf_iter = 0, _buf_size = size; \ + _buf_iter < _buf_size; _buf_iter++) \ buf[_buf_iter] = value #endif diff -r dd992fd74fce -r e43d83253e28 liboctave/util/oct-sparse.h --- a/liboctave/util/oct-sparse.h Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/oct-sparse.h Mon Aug 01 12:40:18 2016 -0400 @@ -85,9 +85,9 @@ # include #endif -#if (defined (HAVE_SUITESPARSE_CHOLMOD_H) \ - || defined (HAVE_UFSPARSE_CHOLMOD_H) \ - || defined (HAVE_CHOLMOD_CHOLMOD_H) \ +#if (defined (HAVE_SUITESPARSE_CHOLMOD_H) \ + || defined (HAVE_UFSPARSE_CHOLMOD_H) \ + || defined (HAVE_CHOLMOD_CHOLMOD_H) \ || defined (HAVE_CHOLMOD_H)) # if defined (OCTAVE_ENABLE_64) # define CHOLMOD_NAME(name) cholmod_l_ ## name diff -r dd992fd74fce -r e43d83253e28 liboctave/util/url-transfer.cc --- a/liboctave/util/url-transfer.cc Tue Jul 12 14:28:07 2016 -0400 +++ b/liboctave/util/url-transfer.cc Mon Aug 01 12:40:18 2016 -0400 @@ -260,17 +260,17 @@ while (0) // Same as above but with a return value. -#define SETOPTR(option, parameter) \ - do \ - { \ - CURLcode res = curl_easy_setopt (curl, option, parameter); \ - if (res != CURLE_OK) \ - { \ - ok = false; \ - errmsg = curl_easy_strerror (res); \ - return retval; \ -} \ -} \ +#define SETOPTR(option, parameter) \ + do \ + { \ + CURLcode res = curl_easy_setopt (curl, option, parameter); \ + if (res != CURLE_OK) \ + { \ + ok = false; \ + errmsg = curl_easy_strerror (res); \ + return retval; \ + } \ + } \ while (0) class curl_transfer : public base_url_transfer diff -r dd992fd74fce -r e43d83253e28 oct-conf-post.in.h --- a/oct-conf-post.in.h Tue Jul 12 14:28:07 2016 -0400 +++ b/oct-conf-post.in.h Mon Aug 01 12:40:18 2016 -0400 @@ -114,7 +114,7 @@ FIXME: Maybe substitute this by a more precise check in the future? */ #if (SIZEOF_LONG_DOUBLE >= 10) && defined (HAVE_ROUNDL) # define OCTAVE_INT_USE_LONG_DOUBLE -# if (SIZEOF_LONG_DOUBLE < 16 \ +# if (SIZEOF_LONG_DOUBLE < 16 \ && (defined __i386__ || defined __x86_64__) && defined __GNUC__) # define OCTAVE_ENSURE_LONG_DOUBLE_OPERATIONS_ARE_NOT_TRUNCATED 1 # endif diff -r dd992fd74fce -r e43d83253e28 src/shared-fcns.h --- a/src/shared-fcns.h Tue Jul 12 14:28:07 2016 -0400 +++ b/src/shared-fcns.h Mon Aug 01 12:40:18 2016 -0400 @@ -91,7 +91,7 @@ // Find the directory where the octave binary is supposed to be // installed. -#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ +#if (defined (OCTAVE_HAVE_WINDOWS_FILESYSTEM) \ && ! defined (OCTAVE_HAVE_POSIX_FILESYSTEM)) static const char dir_sep_char = '\\'; #else