changeset 6872:6a68ee213a70 octave-forge

(none)
author rikcorradini
date Fri, 12 Mar 2010 15:07:24 +0000
parents c1b2936d351d
children 1a97b88df317
files extra/openmpi_ext/COPYING extra/openmpi_ext/DESCRIPTION extra/openmpi_ext/INDEX extra/openmpi_ext/doc/README extra/openmpi_ext/inst/Pi.m extra/openmpi_ext/inst/allnodes extra/openmpi_ext/inst/hello2dimmat.m extra/openmpi_ext/inst/hellocell.m extra/openmpi_ext/inst/hellosparsemat.m extra/openmpi_ext/inst/hellostruct.m extra/openmpi_ext/inst/helloworld.m extra/openmpi_ext/src/MPI_Barrier.cc extra/openmpi_ext/src/MPI_Comm_Load.cc extra/openmpi_ext/src/MPI_Comm_Test.cc extra/openmpi_ext/src/MPI_Comm_rank.cc extra/openmpi_ext/src/MPI_Comm_size.cc extra/openmpi_ext/src/MPI_Finalize.cc extra/openmpi_ext/src/MPI_Finalized.cc extra/openmpi_ext/src/MPI_Init.cc extra/openmpi_ext/src/MPI_Initialized.cc extra/openmpi_ext/src/MPI_Iprobe.cc extra/openmpi_ext/src/MPI_Op_Load.cc extra/openmpi_ext/src/MPI_Op_Test.cc extra/openmpi_ext/src/MPI_Probe.cc extra/openmpi_ext/src/MPI_Recv.cc extra/openmpi_ext/src/MPI_Send.cc extra/openmpi_ext/src/Makefile extra/openmpi_ext/src/Makefile.OPENMPI1.3.3 extra/openmpi_ext/src/simple.h extra/openmpi_ext/src/simpleop.h
diffstat 30 files changed, 0 insertions(+), 3691 deletions(-) [+]
line wrap: on
line diff
--- a/extra/openmpi_ext/COPYING	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,337 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Library General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-                            NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-  `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-  <signature of Ty Coon>, 1 April 1989
-  Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Library General
-Public License instead of this License.
--- a/extra/openmpi_ext/DESCRIPTION	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,11 +0,0 @@
-Name: openmpi_ext
-Version: 1.0.0
-Date: 2009-11-16
-Author: Riccardo Corradini <riccardocorradini@yahoo.it>
-Maintainer: Riccardo Corradini <riccardocorradini@yahoo.it>
-Title: openmpi_ext
-Description: MPI functions for parallel computing using simple MPI Derived Datatypes.
-Depends: octave (>= 3.2.3)
-Autoload: yes
-License: GPL version 2 or later
-Url: http://octave.sf.net
--- a/extra/openmpi_ext/INDEX	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-TO be filled
--- a/extra/openmpi_ext/doc/README	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-The code is general-purpose, but  I would like to use it for econometrics.
-So the first step will be to install the following tarball from
-http://www.open-mpi.org/software/ompi/v1.3/downloads/openmpi-1.3.3.tar.bz2
-possibly in a multi-core computer to run my simple examples
-and configure it for instance in the following way (/home/user is your $HOME)
-./configure --enable-mpirun-prefix-by-default --enable-heterogeneous --prefix=/home/user/openmpi-1.3.3/ --enable-static
-
-and modify .bashrc in your home
- OMPIBIN=`$ompi_info -path     bindir  -parsable | cut -d: -f3`
- OMPILIB=`$ompi_info -path     libdir  -parsable | cut -d: -f3`
- OMPISCD=`$ompi_info -path sysconfdir  -parsable | cut -d: -f3`
-
-
-
-export            PATH=$OMPIBIN:$PATH
-
-export LD_LIBRARY_PATH=:$OMPILIB:$LD_LIBRARY_PATH
-
-unset  ompi_info OMPIBIN OMPILIB OMPISCD 
-
-If you want to install it on a simple toy network, just assign a static ip address on every linux computer and set up 
-an ssh connection with no password (see for instance http://linuxproblem.org/art_9.html ) and then install openmpi and octave always with the same versions and with the same info on .bashrc for the same user.
-
-After this type in a terminal mpiCC --showme
-In my case I will have something like
-
-g++ -I/home/user/openmpi-1.3.3/include -pthread -L/home/user/openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -Wl,--export-dynamic -lnsl -lutil -lm -ldl
-
-This will be useful for mkoctfile
-for instance for MPI_Init.cc we shall have
-mkoctfile -I/home/user/openmpi-1.3.3/include -lpthread -L/home/user/openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -lnsl -lutil -lm -ldl MPI_Init.cc
-
-
-
-The m files just contain some very simple examples
-More complex examples will be provided in the next future.
-See also
-http://static.msi.umn.edu/tutorial/scicomp/general/MPI/content6.html
-to understand the logic of MPI Derived Datatypes and how could they be easily handled by openmpi_ext package.
-Bests regards
-Riccardo Corradini
-
-
-
--- a/extra/openmpi_ext/inst/Pi.m	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,113 +0,0 @@
-## Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-## This code has been adjusted for octave3.2.3 and octave 3.3.50+ in 
-## 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-# Please add the oct files openmpi_ext folder 
-# For instance addpath("../src");
-# mpirun -np 5 octave -q --eval "Pi(2E7,'s')"
-
-function Pi(N,mod)
-addpath("../src");
-# Pi:	Classic PI computation by numeric integration of arctan'(x) in [0..1]
-#
-#	Pi [ ( N [ ,mod ] ) ]
-#
-#  N	[1E7]	#subdivisions of the [0, 1] interval
-#  mod	['s']	communication modality:  (s)end (r)educe
-#
-#  printed results struct contains
-#	pi	estimated pi value
-#	err	error
-#	time	from argument xmit to pi computed
-#
-	
-
-##########
-# ArgChk #
-##########
-if nargin<1,	N=1E7;	end
-if nargin<2,  mod='s';	end
-if nargin>2,	usage("Pi(N,mod)"); end		# let all ranks complain
-flag=0;						# code much simpler
-flag=flag || ~isscalar(N) || ~isnumeric(N);
-flag=flag  |   fix(N)~=N   |           N<1;
-		   mod=lower(mod); mods='sr';
-flag=flag  | isempty(findstr(mod,  mods));	# let them all error out
-if flag,	usage("Pi( <int> N>0, <char> mod=='s|r' )"); end
-
-##################
-# Results struct #
-##################
-results.pi   =0;
-results.err  =0;
-results.time =0;
-
-
-############
-# PARALLEL # initialization, include MPI_Init time in measurement
-############
-  T=clock; #
-############
-   MPI_ANY_SOURCE = -1;
-   MPI_Init();	
-   MPI_COMM_WORLD = MPI_Comm_Load("NEWORLD");		
-   rnk   =	MPI_Comm_rank (MPI_COMM_WORLD);	# let it abort if it fails
-   siz   =	MPI_Comm_size (MPI_COMM_WORLD);
-
-    SLV = logical(rnk);			# handy shortcuts, master is rank 0
-    MST = ~ SLV;			# slaves are all other
-
-############
-# PARALLEL # computation (depends on rank/size)
-############			# vectorized code, equivalent to
-  width=1/N; lsum=0;		# for i=rnk:siz:N-1
-  i=rnk:siz:N-1;		#   x=(i+0.5)*width;
-  x=(i+0.5)*width;		#   lsum=lsum+4/(1+x^2);
-  lsum=sum(4./(1+x.^2));	# end
-
-############
-# PARALLEL # reduction and finish
-############
-switch mod
-  case 's',			TAG=7;	# Any tag would do
-    if SLV				# All slaves send result back
-	MPI_Send(lsum,             0,TAG,MPI_COMM_WORLD);
-    else				# Here at master
-	    Sum =lsum;			# save local result
-      for slv=1:siz-1			# collect in any order
-	    lsum = MPI_Recv(MPI_ANY_SOURCE,TAG,MPI_COMM_WORLD);
-	    Sum+=lsum;			# and accumulate
-      end				# order: slv or MPI_ANY_SOURCE
-    end
-  case 'r',
-        disp("not yet implemented");
-#	Sum=0;		
-# reduction master = rank 0 @ WORLD
-#       MPI_Reduce(lsum,Sum, MPI_SUM,  0,MPI_COMM_WORLD);
-end
-
-MPI_Finalize();
-
-if MST
-    Sum      = Sum/N ; 			# better at end: don't loose resolution
-#################################	# stopwatch measurement
-results.time = etime(clock,T);  #	# but only at master after PI computed
-#################################	# all them started T=clock;
-results.err  = Sum-pi;
-results.pi   = Sum # ;
-
-end 
\ No newline at end of file
--- a/extra/openmpi_ext/inst/allnodes	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-#!/bin/bash
-# this script will run an Octave script using MPI with a given hostfile,
-# using a given number of ranks. Edit it to set the hostfile and number of nodes
-# then use it as follows: allnodes <your_script_name.m> 
-HOSTFILE="/home/user/tmp/bhosts"
-NUMBER_OF_NODES="33"
-mpirun --hostfile $HOSTFILE -np $NUMBER_OF_NODES octave -q --eval $1
--- a/extra/openmpi_ext/inst/hello2dimmat.m	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-# Please add the oct files openmpi_ext folder 
-# For instance 
-  addpath("../src");
-  MPI_SUCCESS =0;
-  MPI_Init();
-
-  # the string NEWORLD is just a label could be whatever you want    
-  CW = MPI_Comm_Load("NEWORLD");
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  mytag = 48;
-
-
- 
-  if (my_rank != 0)
-#        Generate a random matrix
-       message=rand(90,90);
-#        load message
-#       rankvect is the vector containing the list of rank  destination process
-     rankvect = 0;
-     [info] = MPI_Send(message,rankvect,mytag,CW);
-  else
-        for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [messager, info] = MPI_Recv(source,mytag,CW);
-          
-#	You could also save each result and make comparisons if you don't trust MPI
-          disp("Rank 0 is the master receiving ... :");
-            if (info == MPI_SUCCESS)
-              disp('OK!');
-          endif
-          endfor
-  end   
-
-
-   MPI_Finalize();
--- a/extra/openmpi_ext/inst/hellocell.m	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-## Copyright (C) 2009  Riccardo Corradini <riccardocorradini@yahoo.it>
-## under the terms of the GNU General Public License.
-##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-# Please add the oct files openmpi_ext folder 
-# For instance 
-addpath("../src");
-# if you have 4 cores or a network of 4 computers with a ssh connection with no password and same openmpi 1.3.3 installation
-# type at the terminal mpirun -np 4 octave --eval hellocell
-
-
-   MPI_Init();
-   # the string NEWORLD is just a label could be whatever you want 
-   CW = MPI_Comm_Load("NEWORLD");
-
-   
-
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  # TAG is very important to identify the message
-  TAG = 1;
-
-
-  message="";
-  if (my_rank != 0)
-     message = {magic(3) 17 'fred'; ...
-     'AliceBettyCarolDianeEllen' 'yp' 42; ...
-     {1} 2 3};
-     rankvect = 0;
-     [info] = MPI_Send(message,rankvect,TAG,CW);
-  else
-        for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [messager, info] = MPI_Recv(source,TAG,CW);
-	  info
-          messager
-        endfor
-  end   
-
-
-  MPI_Finalize();
-
--- a/extra/openmpi_ext/inst/hellosparsemat.m	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-# Please add the oct files openmpi_ext folder 
-# For instance 
-  addpath("../src");
-  MPI_Init();
-  # the string NEWORLD is just a label could be whatever you want  
-  CW = MPI_Comm_Load("NEWORLD");
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-# tag[0] ----> type of octave_value
-# tag[1] ----> array of three elements 1) num of rows 2) number of columns 3) number of non zero elements
-# tag[2] ---->  vector of rowindex
-# tag[3] ---->  vector of columnindex
-# tag[4] ---->  vector of  non zero elements
-# These tags will be generated after mytag by the MPI_Send and MPI_Recv (see source code)
-
-  mytag = 48;
-
-
-
-
-# This is just to fill the sparse matrix
-  M=5;
-  N=5;
-  D=0.9;
-    message = sprand (M, N, D);
-#  load message
- 
-
- 
-  if (my_rank != 0)
-      dest = 0;
-#       rankvect is the vector containing the list of rank  destination process
-     rankvect(1,1) = 0;
-     [info] = MPI_Send(message,rankvect,mytag,CW);
-     disp("This is flag for sending the message --")
-     info
-  else
-        for source = 1:p-1
-          messager='';
-          disp("We are at rank 0 that is master etc..");
-          [messager, info] = MPI_Recv(source,mytag,CW);
-          disp("Rank 0 is the master receiving ... :");
-          if (messager/message)
-                disp('OK!');
-          endif
-      messager
-          endfor
-  end   
-
-   MPI_Finalize();
--- a/extra/openmpi_ext/inst/hellostruct.m	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-## Copyright (C) 2009  Riccardo Corradini <riccardocorradini@yahoo.it>
-## under the terms of the GNU General Public License.
-##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-# Please add the oct files openmpi_ext folder 
-# For instance 
-addpath("../src");
-# if you have 4 cores or a network of 4 computers with a ssh connection with no password and same openmpi 1.3.3 installation
-# type at the terminal mpirun -np 4 octave --eval hellostruct
-
-
-  MPI_Init();
-  # the string NEWORLD is just a label could be whatever you want 
-  CW = MPI_Comm_Load("NEWORLD");
-
-   
-
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  # TAG is very important to identify the message
-  TAG = 1;
-
-
-  message="";
-  if (my_rank != 0)
-     message = struct('f1', {1 3; 2 4}, 'f2', 25);
-     # Could be a vector containing the list of ranks identifiers; 
-     rankvect = 0;
-     [info] = MPI_Send(message,rankvect,TAG,CW);
-  else
-        for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [message, info] = MPI_Recv(source,TAG,CW);
-          message
-        endfor
-  end   
-  MPI_Finalize();
-
--- a/extra/openmpi_ext/inst/helloworld.m	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-## Copyright (C) 2009  Riccardo Corradini <riccardocorradini@yahoo.it>
-## under the terms of the GNU General Public License.
-##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-# Please add the oct files openmpi_ext folder 
-# For instance 
-addpath("../src");
-# if you have 4 cores or a network of 4 computers with a ssh connection with no password and same openmpi 1.3.3 installation
-# type at the terminal mpirun -np 4 octave --eval helloworld
-
-
-   MPI_Init();
-   # the string NEWORLD is just a label could be whatever you want 
-   CW = MPI_Comm_Load("NEWORLD");
-
-   
-
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  # Could be any number
-  TAG=1;
-
-
-  message="";
-  if (my_rank != 0)
-      message = sprintf('Greetings from process: %d!',my_rank);
-      # rankvect is the vector containing the list of rank  destination process
-      rankvect = 0;
-      [info] = MPI_Send(message,rankvect,TAG,CW);
-  else
-      for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [message, info] = MPI_Recv(source,TAG,CW);
-          printf('%s\n', message);
-      endfor
-  end   
-
-  MPI_Finalize();
-
--- a/extra/openmpi_ext/src/MPI_Barrier.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-// Copyright (C) 2009 VZLU Prague
-
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-#define   NAME  MPI_Barrier
-/*
- * ----------------------------------------------------
- * Blocks until all processes in the communicator have reached this routine
- * [info ] = MPI_Barrier (comm)
- * ----------------------------------------------------
- */
-
-
-#include "simple.h"    
-DEFUN_DLD(NAME, args, ,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} @var{INFO} = MPI_Barrier (@var{COMM})\n\
-Blocks until all processes in the communicator have reached this routine.\n\
-If @var{COMM} octave comunicator object loaded with MPI_Comm_Load is omitted \n\
-returns an error. \n\
- @example\n\
- @group\n\
-    @var{INFO} (int) return code\n\
-       0 MPI_SUCCESS    No error\n\
-       5 MPI_ERR_COMM   Invalid communicator (NULL?)\n\
-      13 MPI_ERR_ARG    Invalid argument (typically a NULL pointer?)\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-
-    octave_value results;
-    int nargin = args.length ();
-   if (nargin != 1)
-     {
-       error ("expecting  1 input argument");
-       return results;
-     }
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if((args.length() != 1 )
-	   || args(0).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(0).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();
-        if (! error_state)
-          {
-            int my_size;
-            int info = MPI_Barrier (comm);
-
-              results = info;
-	  }
-    else
-      print_usage ();
-   comm= NULL;
-    /* [info] = MPI_Barrier (comm) */
-   
-    return results;
-}
-
--- a/extra/openmpi_ext/src/MPI_Comm_Load.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-// Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
-// under the terms of the GNU General Public License.
-// Copyright (C) 2009 VZLU Prague
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-#include "simple.h"
-
-DEFUN_DLD(MPI_Comm_Load, args, ,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} @var{COMM} = MPI_Comm_Load (@var{DESCRIPTION})\n\
-Return @var{COMM} the MPI_Communicator object whose description is  @var{DESCRIPTION}, as a string.\n\
-The default value will be MPI_COMM_WORLD. \n\
-If @var{DESCRIPTION} is omitted, return anyway an MPI_COMM_WORLD comunicator object \n\
-with no decription.\n\
-For\n\
-example,\n\
-\n\
-@example\n\
-@group\n\
-MPI_Init();\n\
-X = MPI_Comm_Load(\"description\"); \n\
-whos X\n\
-MPI_Finalize();\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-  octave_value retval;
-  if (args.length () != 1 || !args (0).is_string ())
-    {
-      error ("MPI_Comm_Load: first argument must be a string");
-      return retval;
-    }
-   
-  const std::string name = args (0).string_value ();
-  retval = new simple (name,MPI_COMM_WORLD);
- 
-  return retval;
-}
\ No newline at end of file
--- a/extra/openmpi_ext/src/MPI_Comm_Test.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-#include "simple.h"
-DEFUN_DLD(MPI_Comm_Test, args, ,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} @var{DESCRIPTION} = MPI_Comm_Test (@var{COMM})\n\
-Return @var{DESCRIPTION} string description of the MPI_Communicator  @var{COMM}\n\
-For\n\
-example,\n\
-\n\
-@example\n\
-@group\n\
-MPI_Init();\n\
-X = MPI_Comm_Load(\"description\"); \n\
-whos X\n\
-MPI_Comm_Test(X) \n\
-@result{} \"description\"\n\
-MPI_Finalize();\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-  octave_value retval;
-	if(args.length() != 1 
-	   || args(0).type_id()!=simple::static_type_id()){
-		
-		error("usage: MPI_Comm_Test(octave_comunicator_object)");
-		return octave_value(-1);
-	}
-	const octave_base_value& rep = args(0).get_rep();
-	const simple& b = ((const simple &)rep);
-        octave_stdout << "MPI_Comm_Test has " << b.name_value()  << " output arguments.\n";
-       MPI_Comm res = b.comunicator_value();
-       retval = b.name_value();
-  return retval;
-}
\ No newline at end of file
--- a/extra/openmpi_ext/src/MPI_Comm_rank.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-// Copyright (C) 2009 VZLU Prague
-
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-#define   NAME  MPI_Comm_rank
-/*
- * ----------------------------------------------------
- * Determines the rank of the calling process in the communicator
- * [info rank] = MPI_Comm_rank (comm)
- * ----------------------------------------------------
- */
-
-#include "simple.h"       
-DEFUN_DLD(NAME, args,nargout ,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{RANK} @var{INFO}] = MPI_Comm_rank (@var{COMM})\n\
-Determines rank of calling process in communicator.\n\
-If @var{COMM} octave comunicator object loaded with MPI_Comm_Load is omitted \n\
-returns an error. \n\
- @example\n\
- @group\n\
-    @var{RANK} rank of the calling process in group of communicator\n\
-    @var{INFO} (int) return code\n\
-       0 MPI_SUCCESS    No error\n\
-       5 MPI_ERR_COMM   Invalid communicator (NULL?)\n\
-      13 MPI_ERR_ARG    Invalid argument (typically a NULL pointer?)\n\
-SEE ALSO: MPI_Comm_size\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-
-{
-    octave_value_list results;
-    int nargin = args.length ();
-   if (nargin != 1)
-     {
-       error ("expecting  1 input argument");
-       return results;
-     }
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if((args.length() != 1 )
-	   || args(0).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(0).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();	
-        if (! error_state)
-          {
-            int my_rank;
-            int info = MPI_Comm_rank (comm, &my_rank);
-            if (nargout > 1)
-              results(1) = info;
-            results(0) = my_rank;
-          }
-    else
-      print_usage ();
-    comm= NULL;
-    /* [rank info] = MPI_Comm_rank (comm) */
-   
-    return results;
-
-}
--- a/extra/openmpi_ext/src/MPI_Comm_size.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,89 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-// Copyright (C) 2009 VZLU Prague
-
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-#define   NAME  MPI_Comm_size
-/*
- * ----------------------------------------------------
- * Determines the size of the calling process in the communicator
- * [info rank] = MPI_Comm_size (comm)
- * ----------------------------------------------------
- */
-
-#include "simple.h"  
-DEFUN_DLD(NAME, args,nargout ,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{RANK} @var{INFO}] = MPI_Comm_size (@var{COMM})\n\
-Determines size of calling process in communicator.\n\
-If @var{COMM} octave comunicator object loaded with MPI_Comm_Load is omitted \n\
-returns an error. \n\
- @example\n\
- @group\n\
-    @var{exprank} rank of the calling process in group of communicator\n\
-    @var{exprinfo} (int) return code\n\
-       0 MPI_SUCCESS    No error\n\
-       5 MPI_ERR_COMM   Invalid communicator (NULL?)\n\
-      13 MPI_ERR_ARG    Invalid argument (typically a NULL pointer?)\n\
-SEE ALSO: MPI_Comm_rank\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-
-{
-    octave_value_list results;
-    int nargin = args.length ();
-   if (nargin != 1)
-     {
-       error ("expecting  1 input argument");
-       return results;
-     }
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if((args.length() != 1 )
-	   || args(0).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(0).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();
-
-
-        if (! error_state)
-          {
-            int my_size;
-            int info = MPI_Comm_size (comm, &my_size);
-            if (nargout > 1)
-              results(1) = info;
-            results(0) = my_size;
-          }
-    else
-      print_usage ();
-    comm= NULL;
-    /* [size info] = MPI_Comm_size (comm) */
-   
-    return results;
-}
-
--- a/extra/openmpi_ext/src/MPI_Finalize.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-#define   NAME    MPI_Finalize
-/*
- * ----------------------------------------------------
- * Terminates MPI execution environment
- * info = MPI_Finalize
- * ----------------------------------------------------
- */
-#include "mpi.h"       
-#include <octave/oct.h>
-
-DEFUN_DLD(NAME, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} @var{INFO} = MPI_Finalize()\n\
-           Terminates MPI execution environment\n\
-\n\
- @example\n\
- @group\n\
-    @var{INFO} (int) return code\n\
-       0 MPI_SUCCESS    No error\n\
-       5 MPI_ERR_COMM   Invalid communicator (NULL?)\n\
-      13 MPI_ERR_ARG    Invalid argument (typically a NULL pointer?)\n\
-SEE ALSO: MPI_Init\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-
-    int info = MPI_Finalize();
-   
-    return octave_value(info);
-}
--- a/extra/openmpi_ext/src/MPI_Finalized.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-
-// under the terms of the GNU General Public License.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-#define   NAME  MPI_Finalized
-/*
- * ----------------------------------------------------
- * Indicates whether MPI_Finalize has completed
- * [info flag] = MPI_Finalized
- * ----------------------------------------------------
- */
-#include "mpi.h"       
-#include <octave/oct.h>
-DEFUN_DLD(NAME, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{FLAG} @var{INFO}] = MPI_Finalized\n\
-           Indicates whether MPI_Finalize has completed\n\
-\n\
- @example\n\
- @group\n\
-    @var{FLAG} (int) return code\n\
-	    0 false\n\
-            1 true\n\
-    @var{INFO} (int) return code\n\
-       0 MPI_SUCCESS    This function always returns MPI_SUCCESS\n\
-SEE ALSO: MPI_Init, MPI_Finalize\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-   octave_value_list results;
-   int flag;           
-
-    int info = MPI_Finalized(&flag);
-    if (nargout > 1)
-      results(1) = info;
-    results(0) = flag != 0;
-    return results;
-
-    /* [flag info] = MPI_Finalized */
-}
--- a/extra/openmpi_ext/src/MPI_Init.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-
-
-
-
-/*
- * ----------------------------------------------------
- * Initialize the MPI execution environment
- * info = MPI_Init [ ( 'arg' [, 'arg']... ) ]
- * ----------------------------------------------------
- */
-
-#define   NAME  MPI_Init
-
-#include "mpi.h"        // mpi.h, oct.h
-#include <octave/oct.h>
-DEFUN_DLD(NAME, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} @var{INFO} = MPI_Init()\n\
-           Initialize the MPI execution environment\n\
-\n\
- @example\n\
- @group\n\
-    @var{INFO} (int) return code\n\
-       0 MPI_SUCCESS    No error\n\
-      16 MPI_ERR_OTHER  Attempt was made to call MPI_Init a  second  time\n\
-                       MPI_Init may only be called once in a program\n\
-                       \n\
-SEE ALSO: MPI_Finalize, MPI_Initialized, MPI_Finalized\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-    int nargin = args.length();            
-    for (int i=0; i<nargin; i++){
-    if( ! args(i).is_string() ) {
-        error("MPI_Init: args must be strings");
-        return octave_value (MPI_ERR_ARG);    // error returns nothing
-    }
-    }
-
-    string_vector argvec = args.make_argv("MPI_Init");
-    char **argve= argvec.c_str_vec();
-    char **argv =&argve[1];
-
-//  printf("args: "); for (int i=0; i<nargin; i++) printf("%s ",argv[i]);
-//  printf("\n");
-
-    int info = MPI_Init(&nargin, &argv);
-    free(argve);
-    return octave_value (info);
-}
- 
\ No newline at end of file
--- a/extra/openmpi_ext/src/MPI_Initialized.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-
-// under the terms of the GNU General Public License.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-#define   NAME  MPI_Initialized
-/*
- * ----------------------------------------------------
- * Indicates whether MPI_Initialize has been called
- * [info flag] = MPI_Initialized
- * ----------------------------------------------------
- */
-#include "mpi.h"
-#include <octave/oct.h>
-DEFUN_DLD(NAME, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{FLAG} @var{INFO}] = MPI_Initialized\n\
-           Indicates whether MPI_Init has been called\n\
-\n\
- @example\n\
- @group\n\
-    @var{FLAG} (int) return code\n\
-	    0 false\n\
-            1 true\n\
-    @var{INFO} (int) return code\n\
-       0 MPI_SUCCESS    This function always returns MPI_SUCCESS\n\
-SEE ALSO: MPI_Init, MPI_Finalize\n\
-@end group\n\
-@end example\n\
-@end deftypefn")
-{
-  octave_value_list results;
-   int flag;
-
-   int info = MPI_Initialized(&flag);
-    if (nargout > 1)
-      results(1) = info;
-    results(0) = flag != 0;
-   return results;
-
-   /* [flag info] = MPI_Initialized */
-}
--- a/extra/openmpi_ext/src/MPI_Iprobe.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,129 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-
-// under the terms of the GNU General Public License.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-#define   NAME  MPI_Iprobe
-/*
- * ----------------------------------------------------
- * Nonblocking test for a message
- * [info flag stat] = MPI_Iprobe (src, tag, comm)
- * ----------------------------------------------------
- */
-#include "simple.h"	
-#include <octave/ov-struct.h>
-
-Octave_map put_MPI_Stat (const MPI_Status &stat){
-/*---------------------------------------------*/
-    Octave_map map;
-    octave_value tmp = stat.MPI_SOURCE;
-    map.assign("src", tmp);
-    tmp = stat.MPI_TAG;
-    map.assign("tag", tmp );
-    tmp = stat.MPI_ERROR;
-    map.assign("err", tmp );
-    tmp = stat._count;
-    map.assign("cnt", tmp);
-    tmp = stat._cancelled;
-    map.assign("can", tmp);
-
-    return map;
-}
-
-
-
-DEFUN_DLD(NAME, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{FLAG} @var{STAT} @var{INFO}] = MPI_Iprobe(@var{SRCRANK}, @var{TAG}, @var{COMM})\n\
-           Nonblocking test for a message\n\
- @example\n\
- @group\n\
- \n\
-     @var{FLAG} int \n\
-	    1 if the message is ready to be received\n\
-           0 if it is not (boolean)\n\
-     @var{STAT} struct object\n\
-       src (int)       source rank for the accepted message\n\
-       tag (int)       message tag for the accepted message\n\
-       err(int)        error \n\
-       cnt (int)       count\n\
-       can (int)       cancel\n\
-    @var{INFO} (int) return code\n\
-      0 MPI_SUCCESS    No error\n\
-     13 MPI_ERR_ARG    Invalid argument\n\
-      5 MPI_ERR_COMM   Invalid communicator (null?)\n\
-      4 MPI_ERR_TAG    Invalid tag argument (MPI_ANY_TAG, 0..MPI_TAG_UB attr)\n\
-      6 MPI_ERR_RANK   Invalid src/dst rank (MPI_ANY_SOURCE, 0..Comm_size-1)\n\
- @end group\n\
- @end example\n\
- \n\
-  SEE ALSO: MPI_Probe, MPI_Recv, MPI documentation for examples\n\
-@end deftypefn")
-{
-   octave_value_list results;
-   int nargin = args.length ();
-   if (nargin != 3)
-     {
-       error ("expecting  3 input arguments");
-       return results;
-     }
-
-
-
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if( args(2).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(2).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();
-   if (error_state)
-     return results;
-
-    int src = args(0).int_value();    
-  if (error_state)
-    {
-      error ("expecting first argument to be an integer");
-      return results;
-    }
-
-    int tag = args(1).int_value();    
-  if (error_state)
-    {
-      error ("expecting second argument to be an integer");
-      return results;
-    }	
-    int flag;
-    MPI_Status stat = {0,0,0,0};    
-    int info = MPI_Iprobe(src,tag,comm,&flag,&stat);
-    comm= NULL;
-    results(0) = flag;
-    results(1) = put_MPI_Stat(stat);
-    results(2) = info;
-    return results;
-	/* [flag stat info] = MPI_Iprobe (src, tag, comm) */
-}
-
-	
--- a/extra/openmpi_ext/src/MPI_Op_Load.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-#include "simpleop.h"
-
-DEFUN_DLD(MPI_Op_Load, args, ,"")
-{
-  if (!simpleop_type_loaded)
-    {
-      simpleop::register_type ();
-      simpleop_type_loaded = true;
-      mlock ();
-    }
-
-  octave_value retval;
-  if (args.length () != 1 || !args (0).is_string ())
-    {
-      error ("simpleop: first argument must be a string");
-      return retval;
-    }
-   
-  const std::string name = args (0).string_value ();
-  MPI_Op OP;
-  retval = new simpleop (name,OP);
- 
-  return retval;
-}
\ No newline at end of file
--- a/extra/openmpi_ext/src/MPI_Op_Test.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-#include "simpleop.h"
-
-DEFUN_DLD(MPI_Op_Test, args, ,"")
-{
-  if (!simpleop_type_loaded)
-    {
-      simpleop::register_type ();
-      simpleop_type_loaded = true;
-      mlock ();
-    }
-
-  octave_value retval;
-	if(args.length() != 1 
-	   || args(0).type_id()!=simpleop::static_type_id()){
-		
-		error("usage: simpleoptest(simpleopobject)");
-		return octave_value(-1);
-	}
-	const octave_base_value& rep = args(0).get_rep();
-	const simpleop& b = ((const simpleop &)rep);
-        octave_stdout << "simpleoptest has " << b.name_value()  << " output arguments.\n";
-       MPI_Op res = b.operator_value();
-   
-  return retval;
-}
\ No newline at end of file
--- a/extra/openmpi_ext/src/MPI_Probe.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,129 +0,0 @@
-// Copyright (C) 2004-2007 Javier Fernández Baldomero, Mancia Anguita López
-// This code has been adjusted for octave3.2.3 and more in 
-// 2009 by  Riccardo Corradini <riccardocorradini@yahoo.it>
-
-// under the terms of the GNU General Public License.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-
-#define   NAME  MPI_Probe
-/*
- * ----------------------------------------------------
- * Blocking test for a message
- * [info stat] = MPI_Probe (src, tag, comm)
- * ----------------------------------------------------
- */
-#include "simple.h"
-	
-#include <octave/ov-struct.h>
-
-
-
-Octave_map put_MPI_Stat (const MPI_Status &stat){
-/*---------------------------------------------*/
-    Octave_map map;
-    octave_value tmp = stat.MPI_SOURCE;
-    map.assign("src", tmp);
-    tmp = stat.MPI_TAG;
-    map.assign("tag", tmp );
-    tmp = stat.MPI_ERROR;
-    map.assign("err", tmp );
-    tmp = stat._count;
-    map.assign("cnt", tmp);
-    tmp = stat._cancelled;
-    map.assign("can", tmp);
-
-    return map;
-}
-
-DEFUN_DLD(NAME, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{STAT} @var{INFO}] = MPI_Probe(@var{SRCRANK}, @var{TAG}, @var{COMM})\n\
-           blocking test for a message\n\
- @example\n\
- @group\n\
- \n\
-     @var{STAT} struct object\n\
-       src (int)       source rank for the accepted message\n\
-       tag (int)       message tag for the accepted message\n\
-       err(int)        error \n\
-       cnt (int)       count\n\
-       can (int)       cancel\n\
-    @var{INFO} (int) return code\n\
-      0 MPI_SUCCESS    No error\n\
-     13 MPI_ERR_ARG    Invalid argument\n\
-      5 MPI_ERR_COMM   Invalid communicator (null?)\n\
-      4 MPI_ERR_TAG    Invalid tag argument (MPI_ANY_TAG, 0..MPI_TAG_UB attr)\n\
-      6 MPI_ERR_RANK   Invalid src/dst rank (MPI_ANY_SOURCE, 0..Comm_size-1)\n\
- @end group\n\
- @end example\n\
- \n\
-  SEE ALSO: MPI_Iprobe, MPI_Recv, and MPI documentation for C examples\n\
-@end deftypefn")
-{
-   octave_value_list results;
-   int nargin = args.length ();
-   if (nargin != 3)
-     {
-       error ("expecting  3 input arguments");
-       return results;
-     }
-
-
-
-
-
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if( args(2).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(2).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();
-    int src = args(0).int_value();    
-  if (error_state)
-    {
-      error ("expecting first argument to be an integer");
-      return results;
-    }
-
-    int tag = args(1).int_value();    
-  if (error_state)
-    {
-      error ("expecting second argument to be an integer");
-      return results;
-    }
-    MPI_Status stat = {0,0,0,0};
-    int info = MPI_Probe(src,tag,comm,&stat);
-    comm= NULL;
-    results(0) = put_MPI_Stat(stat);
-    results(1) = info;
-    return results;
-	/* [ stat info ] = MPI_Probe (src, tag, comm) */
-}
-
-
-
-
--- a/extra/openmpi_ext/src/MPI_Recv.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,694 +0,0 @@
-// Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
-// under the terms of the GNU General Public License.
-// Copyright (C) 2009 VZLU Prague
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-#include "simple.h"
-#include <ov-cell.h>    // avoid errmsg "cell -- incomplete datatype"
-#include <oct-map.h>    // avoid errmsg "Oct.map -- invalid use undef type"
-
-
-
-
-/*----------------------------------*/        /* forward declaration */
-
-
-
-int recv_class( MPI_Comm comm, octave_value &ov,  int source, int mytag);        /* along the datatype */
-/*----------------------------------*/    /* to receive any octave_value */
- 
-int recv_cell(MPI_Comm comm,octave_value &ov, int source, int mytag);
-int recv_struct( MPI_Comm comm, octave_value &ov, int source, int mytag);
-int recv_string( MPI_Comm comm, octave_value &ov,int source, int mytag);
-int recv_range(MPI_Comm comm, Range &range,int source, int mytag);
-
-template<class AnyElem>
-int recv_vec(MPI_Comm comm, AnyElem &LBNDA, int nitem, MPI_Datatype TRCV ,int source, int mytag);
-
-int recv_matrix(bool is_complex,MPI_Datatype TRcv, MPI_Comm comm, octave_value &ov,int source, int mytag);
-int recv_sp_mat(bool is_complex,MPI_Datatype TRcv, MPI_Comm comm, octave_value &ov,int source, int mytag);
-
-template <class Any>
-int recv_scalar(MPI_Datatype TRcv, MPI_Comm comm, Any *d, int source, int mytag);
-template <class Any>
-int recv_scalar(MPI_Datatype TRcv, MPI_Comm comm, std::complex<Any> *d, int source, int mytag);
-
-
-
-
-int recv_range(MPI_Comm comm, Range &range,int source, int mytag){        /* put base,limit,incr,nelem */
-/*-------------------------------*/        /* just 3 doubles + 1 int */
-// octave_range (double base, double limit, double inc)
-  MPI_Status stat;
-  OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  OCTAVE_LOCAL_BUFFER(double,d,3);
-
-  d[0]= range.base();
-  d[1]= range.limit();
-  d[2]= range.inc();  
-  int info = MPI_Recv(d, 3, MPI_INT,  source, tanktag[1] , comm,&stat);
-
-
-  
-  
-return(info);
-}
-
-// This will get the fortran_vec vector for Any type Octave can handle
-template<class AnyElem>
-int recv_vec(MPI_Comm comm, AnyElem &LBNDA, int nitem  ,MPI_Datatype TRCV ,int source, int mytag)
-{
-		      MPI_Datatype fortvec;
-		      MPI_Type_contiguous(nitem,TRCV, &fortvec);
-		      MPI_Type_commit(&fortvec);
-		      MPI_Status stat;
-		      int info = MPI_Recv((LBNDA), 1,fortvec, source, mytag , comm,&stat);
-  return(info);
-}
-
-
-
-// template specialization for complex case
-template <class Any>
-int recv_scalar(MPI_Datatype TRcv ,MPI_Comm comm, std::complex<Any> &d, int source, int mytag){        
-  int info;
-  MPI_Status stat;
-  OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  OCTAVE_LOCAL_BUFFER(std::complex<Any>,Deco,2);
-  Deco[0] = real(d);
-  Deco[1] = imag(d);
-		
-  info = MPI_Recv((&Deco), 2,TRcv, source, tanktag[1] , comm,&stat);
-  if (info !=MPI_SUCCESS) return info;
-
-  return(info);
-}
-
-template <class Any>
-int recv_scalar(MPI_Datatype TRcv , MPI_Comm comm, Any &d, int source, int mytag){        /* directly MPI_Recv it, */
-/*-----------------------------*/        /* it's just a value */
-OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0]=mytag;
-  tanktag[1]=mytag+1;
-  int info;
-  MPI_Status stat;
-  info = MPI_Recv((&d), 1,TRcv, source, tanktag[1] , comm,&stat);
-  if (info !=MPI_SUCCESS) return info;
-   return(info);
-}
-int recv_string( MPI_Comm comm, octave_value &ov,int source, int mytag){        
-/*-----------------------------*/        /* it's just a  string value */
-std::string cpp_string;
-OCTAVE_LOCAL_BUFFER(int, tanktag, 2);
-tanktag[0]=mytag;
-tanktag[1]=mytag+1;
-tanktag[2]=mytag+2;
-
-  int info,nitem;
-  MPI_Status stat;
-  info = MPI_Recv((&nitem), 1,MPI_INT, source, tanktag[1] , comm,&stat);
-//   printf("I have received number of elements  %i \n",nitem);
-  OCTAVE_LOCAL_BUFFER(char,mess,nitem+1);
-  if (info !=MPI_SUCCESS) return info;
-  MPI_Datatype fortvec;
-  MPI_Type_contiguous(nitem+1,MPI_CHAR, &fortvec);
-  MPI_Type_commit(&fortvec);
-
-
-   info = MPI_Recv(mess, 1,fortvec, source, tanktag[2] , comm,&stat);
-//    printf("Flag for string received  %i \n",info);
-   
-   cpp_string = mess;
-   ov = cpp_string;
-   if (info !=MPI_SUCCESS) return info;
-   return(MPI_SUCCESS);
-}
-
-int recv_matrix( bool is_complex,MPI_Datatype TRCV,const MPI_Comm comm, octave_value &ov,  int source, int mytag){       
-
-OCTAVE_LOCAL_BUFFER(int, tanktag, 6);
-tanktag[0] = mytag;
-tanktag[1] = mytag+1;
-tanktag[2] = mytag+2;
-tanktag[3] = mytag+3;
-tanktag[4] = mytag+4;
-tanktag[5] = mytag+5;
-  int info;
-  int nitem,nd;
-  MPI_Status stat;
-  dim_vector dv;
- 
-          info = MPI_Recv((&nitem), 1,MPI_INT, source, tanktag[1] , comm,&stat);
-//  	  printf("info for nitem=%i\n",info);
-//  	  printf("nitem=%i\n",nitem);
-      if (info !=MPI_SUCCESS) return info;
-          info = MPI_Recv((&nd), 1,MPI_INT, source, tanktag[2] , comm,&stat);
-//  	  printf("info for nd=%i\n",info);
-//  	  printf("nd=%i\n",nd);
-      if (info !=MPI_SUCCESS) return info;
-
-//  Now create contiguous datatype for dim vector
-  dv.resize(nd);
-  OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-  MPI_Datatype dimvec;
-  MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-  MPI_Type_commit(&dimvec);
-
-          info = MPI_Recv((dimV), 1,dimvec, source, tanktag[3] , comm,&stat);
-//  	  printf("info for dim vector=%i\n",info);
-      if (info !=MPI_SUCCESS) return info;
-
-// Now reverse the content of int vector into dim vector
- for (octave_idx_type i=0; i<nd; i++)
- {
-   dv(i) = dimV[i] ;
- }
-		if (TRCV == MPI_DOUBLE and is_complex == false )
-		      {
-			NDArray myNDA(dv);
-		      OCTAVE_LOCAL_BUFFER(double, LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			ov=myNDA;  
-		      } 
-		      
-		else if (TRCV == MPI_DOUBLE and is_complex== true )
-		      {
-		      OCTAVE_LOCAL_BUFFER(double,LBNDA1,nitem);
-		      info = recv_vec(comm, LBNDA1,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      ComplexNDArray myNDA(dv);  
-		      OCTAVE_LOCAL_BUFFER(double,LBNDA2,nitem);
-		      info = recv_vec(comm, LBNDA2,nitem ,TRCV ,source, tanktag[5]);
-		      if (info !=MPI_SUCCESS) return info;
-			for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=real(LBNDA1[i])+imag(LBNDA2[i]);
-			  }
-			  ov=myNDA;
-		      }  
-		else if (TRCV == MPI_INT)
-		      {
-		      OCTAVE_LOCAL_BUFFER(bool,LBNDA,nitem);// tested on Octave 3.2.4
-		      TRCV = MPI_INT;
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      int32NDArray   myNDA(dv);
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			  ov=myNDA;
-		      }	  
-		else if (TRCV == MPI_FLOAT)
-		      {
-		      OCTAVE_LOCAL_BUFFER(float,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      FloatNDArray   myNDA(dv);   
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			  ov=myNDA;
-		      } 	  
-		else if (TRCV == MPI_FLOAT and is_complex == true)
-		     {
-		      OCTAVE_LOCAL_BUFFER(float,LBNDA1,nitem);
-		      info = recv_vec(comm, LBNDA1,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;		  
-		      OCTAVE_LOCAL_BUFFER(float,LBNDA2,nitem);
-		      info = recv_vec(comm, LBNDA2,nitem ,TRCV ,source, tanktag[5]);
-		      if (info !=MPI_SUCCESS) return info;
-		      FloatComplexNDArray myNDA(dv); 
-			for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=real(LBNDA1[i])+imag(LBNDA2[i]);
-			  }
-			  ov=myNDA;
-		      }
-		else if  (TRCV == MPI_BYTE )   
-		      {  	
-			OCTAVE_LOCAL_BUFFER(octave_int8,LBNDA1,nitem);
-			info = recv_vec(comm, LBNDA1,nitem ,TRCV ,source, tanktag[4]);
-			if (info !=MPI_SUCCESS) return info;
-			int8NDArray myNDA(dv);
-			for (octave_idx_type i=0; i<nitem; i++)
-			    {
-				myNDA(i)=LBNDA1[i];
-			    }
-			    ov=myNDA;
-		      }
-		else if (TRCV == MPI_SHORT)  
-		{  	
-		      OCTAVE_LOCAL_BUFFER(octave_int16,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      int16NDArray myNDA(dv);
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			  ov=myNDA;
-		} 		
-		
-		else if (TRCV == MPI_LONG_LONG)  
-		      {  	
-		      OCTAVE_LOCAL_BUFFER(octave_int64,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      int64NDArray myNDA(dv);
-		      if (info !=MPI_SUCCESS) return info;
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			  ov=myNDA;
-		      } 		
-		else if (TRCV == MPI_UNSIGNED_CHAR)  
-		      { 	
-		      OCTAVE_LOCAL_BUFFER(octave_uint8,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      uint8NDArray myNDA(dv);
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			  ov=myNDA;
-		      } 		
-		else if (TRCV == MPI_UNSIGNED_SHORT) 
-		      {  	
-		      OCTAVE_LOCAL_BUFFER(octave_uint16,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      uint16NDArray myNDA(dv);
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			  ov=myNDA;
-		      } 		
-		else if (TRCV == MPI_UNSIGNED) { 	
-		      OCTAVE_LOCAL_BUFFER(octave_uint32,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      uint32NDArray myNDA(dv);
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  }
-			ov = myNDA;  
-		      } 		
-		else if (TRCV == MPI_UNSIGNED_LONG_LONG) 
-		      { 	
-		      OCTAVE_LOCAL_BUFFER(octave_uint64,LBNDA,nitem);
-		      info = recv_vec(comm, LBNDA,nitem ,TRCV ,source, tanktag[4]);
-		      if (info !=MPI_SUCCESS) return info;
-		      uint64NDArray myNDA(dv);
-		      for (octave_idx_type i=0; i<nitem; i++)
-			  {
-			      myNDA(i)=LBNDA[i];
-			  } 
-		      ov=myNDA;
-		      }
-return(info);
-
-}
-
-
-int recv_sp_mat(bool is_complex,MPI_Datatype TRcv, MPI_Comm comm, octave_value &ov,int source, int mytag){   
-int info;   
-                
-OCTAVE_LOCAL_BUFFER(int, tanktag,6);
-tanktag[0] = mytag;
-tanktag[1] = mytag+1;
-tanktag[2] = mytag+2;
-tanktag[3] = mytag+3;
-tanktag[4] = mytag+4;
-tanktag[5] = mytag+5;
-
-MPI_Status stat;
-
-OCTAVE_LOCAL_BUFFER(int,s,3);  
-
-// Create a contiguous derived datatype
-MPI_Datatype sintsparse;
-MPI_Type_contiguous(3,MPI_INT, &sintsparse);
-MPI_Type_commit(&sintsparse);
-
-
-
-
-// receive the sintsparse vector named s
-info = MPI_Recv(s, 1, sintsparse, source, tanktag[1], comm, &stat);
-// printf("This is info for sintsparse %i\n",info);
-if (info !=MPI_SUCCESS) return info;
-// MPI_Datatype datavect1;
-// MPI_Type_contiguous(s[2],TRcv, &datavect1);
-// MPI_Type_commit(&datavect1);
-// printf("This is info for sintsparse %i\n");
-// Create a contiguous derived datatype for row and column index
- 
-OCTAVE_LOCAL_BUFFER(int,sridx,s[2]); 
-MPI_Datatype rowindex;
-MPI_Type_contiguous(s[2],MPI_INT, &rowindex);
-MPI_Type_commit(&rowindex);
-
-OCTAVE_LOCAL_BUFFER(int,scidx,s[1]+1); 
-MPI_Datatype columnindex;
-MPI_Type_contiguous(s[1]+1,MPI_INT, &columnindex);
-MPI_Type_commit(&columnindex);
-
-
-      info =  MPI_Recv(sridx,1,rowindex,source,tanktag[2],comm,&stat);
-//        printf("Hope everything is fine here with ridx %i =\n",info);
-      if (info !=MPI_SUCCESS) return info;
-
-// receive the vector with column indexes
-      info =  MPI_Recv(scidx,1,columnindex,source,tanktag[3],comm, &stat);
-//      printf("Hope everything is fine here with scidx %i =\n",info);
-      if (info !=MPI_SUCCESS) return info;
-
-// Now we have a different vector of non zero elements according to datatype
-
-	      if (TRcv == MPI_INT)
-		{  
-		  SparseBoolMatrix m(s[0],s[1],s[2]);
-		  OCTAVE_LOCAL_BUFFER(bool,LBNDA,s[2]);
-		  //Now receive the vector of non zero elements
-		  info = recv_vec(comm, LBNDA,s[2] ,TRcv ,source, tanktag[4]);
-//  		  printf("This is info for vector of non zero elements %i\n",info);
-		  if (info !=MPI_SUCCESS) return info;
-		  for (octave_idx_type i = 0; i < s[1]+1; i++)
-		  {
-		    m.cidx(i) = scidx[i];
-		  }
-		  for (octave_idx_type i = 0; i < s[2]; i++)
-		  {
-		  m.ridx(i) = sridx[i];
-// 		  printf("LBNDA[i]= %f\n",LBNDA[i]);
-		  m.data(i) = LBNDA[i];
-		  }
-		  ov = m;
-		}
-		if (TRcv == MPI_DOUBLE  and is_complex==false)
-		{  
-		  SparseMatrix m(s[0],s[1],s[2]);
-		  OCTAVE_LOCAL_BUFFER(double,LBNDA,s[2]);
-		  //Now receive the vector of non zero elements
-		  info = recv_vec(comm, LBNDA,s[2] ,TRcv ,source, tanktag[4]);
-//  		  printf("This is info for receiving vector of non zero elements %i\n",info);
-		  if (info !=MPI_SUCCESS) return info;
-		  for (octave_idx_type i = 0; i < s[1]+1; i++)
-		  {
-		    m.cidx(i) = scidx[i];
-		  }
-		  for (octave_idx_type i = 0; i < s[2]; i++)
-		  {
-		  m.ridx(i) = sridx[i];
-// 		  printf("LBNDA[i]= %f\n",LBNDA[i]);
-		  m.data(i) = LBNDA[i];
-		  }
-		  ov = m;
-		}
-		if (TRcv == MPI_DOUBLE  and is_complex==true)
-		{  
-		  TRcv = MPI_DOUBLE;
-		  SparseComplexMatrix m(s[0],s[1],s[2]);
-		  OCTAVE_LOCAL_BUFFER(double,LBNDA1,s[2]);
-		  OCTAVE_LOCAL_BUFFER(double,LBNDA2,s[2]);
-		  info = recv_vec(comm, LBNDA1,s[2] ,TRcv ,source, tanktag[4]);
-		  if (info !=MPI_SUCCESS) return info;
-		  info = recv_vec(comm, LBNDA2,s[2] ,TRcv ,source, tanktag[5]);
-		  if (info !=MPI_SUCCESS) return info;		  
-		  for (octave_idx_type i = 0; i < s[1]+1; i++)
-		  {
-		    m.cidx(i) = scidx[i];
-		  }
-		  for (octave_idx_type i = 0; i < s[2]; i++)
-		  {
-		  m.ridx(i) = sridx[i];
-		  m.data(i) = real(LBNDA1[i])+imag(LBNDA2[i]);
-		  }
-		  ov = m;
-
-		}
-return(info);
-
-
-}
-int recv_cell(MPI_Comm comm,octave_value &ov, int source, int mytag){
-// Not tested yet
-OCTAVE_LOCAL_BUFFER(int, tanktag, 5);
-tanktag[0] = mytag;
-tanktag[1] = mytag+1;
-tanktag[2] = mytag+2;
-tanktag[3] = mytag+3;
-tanktag[4] = mytag+4;
-
-  int info;
-  int nitem,nd;
-  MPI_Status stat;
-  dim_vector dv;
- 
-//       nitem is the total number of elements 
-      info = MPI_Recv((&nitem), 1,MPI_INT, source, tanktag[1] , comm,&stat);
-//          printf("I have received number of elements  %i \n",nitem);
-      if (info !=MPI_SUCCESS) return info;
-//      ndims is number of dimensions
-          info = MPI_Recv((&nd), 1,MPI_INT, source, tanktag[2] , comm,&stat);
-//            printf("I have received number of dimensions %i \n",nd);
-      if (info !=MPI_SUCCESS) return info;
-//  Now create contiguous datatype for dim vector
-  dv.resize(nd);
-  OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-  MPI_Datatype dimvec;
-  MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-  MPI_Type_commit(&dimvec);
-
-          info = MPI_Recv((dimV), 1,dimvec, source, tanktag[3] , comm,&stat);
-//        printf("I have received number dimension vector and this is the flag .. %i \n",info);
-      if (info !=MPI_SUCCESS) return info;
-
-// Now reverse the content of int vector into dim vector
- for (octave_idx_type i=0; i<nd; i++)
- {
-   
-   dv(i) = dimV[i] ;
-//      printf("I am printing dimvector  %i \n",dimV[i]);
- }
-
-Cell	    oc (dv);
-// Now focus on every single octave_value
-int newtag = tanktag[4];
-int ocap;
-         for (octave_idx_type i=0; i<nitem; i++)
-	    {
-	      octave_value celem;				
-	      info = MPI_Recv((&ocap), 1,MPI_INT, source, newtag , comm,&stat);
-//                printf("I have received the identifier's TAG  of the specific  octave_value %i \n",ocap);
-	      if (info !=MPI_SUCCESS) return info;
-	      newtag = newtag+ocap;
-	      
-// 	       printf("I have received NEWTAG+1  = %i\n",newtag+1);
-	      info=recv_class(comm,celem,source,newtag);
-//                printf("This is info for the specific  octave_value %i \n",info);
-	      if (info !=MPI_SUCCESS) return info;
-	      oc.Array<octave_value>::elem(i)=celem;        
-	    }
-
-ov = oc;
-   if (info !=MPI_SUCCESS) return info;
-   return(MPI_SUCCESS);
-
-}
-
-int recv_struct( MPI_Comm comm, octave_value &ov, int source, int mytag){      
-Octave_map om;
-int n; // map.fields();
-
-OCTAVE_LOCAL_BUFFER(int, tanktag, 2);
-tanktag[0]=mytag; //t_id
-tanktag[1]=mytag+1; // n
-int tagcap = mytag+2;
-int   ntagkey = mytag+3; // string
-int   ctag = mytag + 4; // cell
-  int info;
-  MPI_Status stat;
-  info = MPI_Recv((&n), 1,MPI_INT, source, tanktag[1] , comm,&stat);
-//   printf("I have received n with info = % i \n",info);
-  int scap;  
-  for (int i=0; i<n; i++){			/* nkeys: foreach, get key */
-    octave_value ov_string;
-    ntagkey = ntagkey + 3;
-    info = recv_class(comm, ov_string,source,ntagkey);
-//     printf("I have received the string with info = % i \n",info);
-    std::string key = ov_string.string_value();
-    if( (info!=MPI_SUCCESS) )	return(info);
-    octave_value conts;				/* all elements on this fname */
-//     Receives capacity
-    info = MPI_Recv(&scap, 1,MPI_INT,source,tagcap, comm, &stat);
-//     printf("I have received capacity with info = % i \n",info);
-    tagcap = tagcap+1;
-    ctag = ctag + scap;
-    info = recv_class(comm, conts,source,ctag);
-//      printf("I have received cell with info = % i \n",info);
-    if (! conts.is_cell())			return(MPI_ERR_UNKNOWN);
-    om.assign (key, conts.cell_value());
-  }
-  if (n != om.nfields()){
-// 	  printf("MPI_Recv: inconsistent map length\n");return(MPI_ERR_UNKNOWN);
-  }
-
-  ov=om;
-  
-  return(MPI_SUCCESS);
-}
-
-
-int recv_class(MPI_Comm comm, octave_value &ov, int source, int mytag ){    /* varname-strlength 1st, dims[ndim] */
-/*----------------------------------*/    /* and then appropriate specific info */
-  int t_id;
-  MPI_Status status;
-//       printf("1-> source =%i\n",source);
-//       printf("2-> tag for id =%i\n",mytag);
-     
-  int info = MPI_Recv(&t_id,1, MPI_INT, source,mytag,comm,&status);
-//   printf("3-> t_id =%i\n",t_id);
-   
-   static string_vector pattern = octave_value_typeinfo::installed_type_names ();
-//          printf(" I have received t_id =%i\n",t_id);
-  const std::string tstring = pattern(t_id); 
-//   octave_stdout << "MPI_Recv has " << tstring  << " string argument.\n";
-    if (tstring == "cell")   return(recv_cell ( comm,  ov,source,mytag));
-    if (tstring == "struct") return(recv_struct(comm,  ov,source,mytag)); 
-    if (tstring == "scalar")  {double 	 d=0; MPI_Datatype TRcv = MPI_DOUBLE ;info =(recv_scalar (TRcv,comm, d,source,mytag));ov=d;return(info);};
-    if (tstring == "bool")    {bool 	 b; MPI_Datatype TRcv = MPI_INT;info = (recv_scalar (TRcv,comm, b,source,mytag));   ov=b ;return(info);};
-    if (tstring == "int8 scalar")       {octave_int8   d; MPI_Datatype TRcv = MPI_BYTE;   info = (recv_scalar (TRcv,comm, d,source,mytag)); ov=d ;return(info);};
-    if (tstring == "int16 scalar")        {octave_int16  d; MPI_Datatype TRcv = MPI_SHORT;  info = (recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);};
-    if (tstring == "int32 scalar")        {octave_int32  d; MPI_Datatype TRcv = MPI_INT;  info = (recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);};
-    if (tstring == "int64 scalar")        {octave_int64  d; MPI_Datatype TRcv = MPI_LONG_LONG;  info = (recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);};
-    if (tstring == "uint8 scalar")        {octave_uint8  d; MPI_Datatype TRcv = MPI_UNSIGNED_CHAR;   info = (TRcv,recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);}; 
-    if (tstring == "uint16 scalar")       {octave_uint16 d; MPI_Datatype TRcv = MPI_UNSIGNED_SHORT;   info = (recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);};
-    if (tstring == "uint32 scalar")       {octave_uint32 d; MPI_Datatype TRcv = MPI_UNSIGNED;   info = (recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);};
-    if (tstring == "uint64 scalar")       {octave_uint64 d; MPI_Datatype TRcv = MPI_UNSIGNED_LONG_LONG;   info = (recv_scalar (TRcv,comm, d,source,mytag));   ov=d ;return(info);};
-    if (tstring == "float scalar")      {float 	 d; MPI_Datatype TRcv = MPI_FLOAT;   info =(recv_scalar (TRcv,comm, d,source,mytag)) ;  ov=d;return(info);};
-    if (tstring == "complex scalar")     {std::complex<double>   d; MPI_Datatype TRcv = MPI_DOUBLE;   info =(recv_scalar (TRcv,comm, d,source,mytag)) ;  ov=d;return(info);};
-    if (tstring == "float complex scalar") {std::complex<float> d; MPI_Datatype TRcv = MPI_FLOAT;   info =(recv_scalar (TRcv,comm, d,source,mytag)) ;  ov=d;return(info);};
-    if (tstring == "string")  return(recv_string (comm, ov,source,mytag));
-    if (tstring == "sq_string") return(recv_string (comm, ov,source,mytag));
-    if (tstring == "range")		 {Range 	       d;    info =(recv_range (comm, d,source,mytag));	ov=d;return(info);};
-    if (tstring == "matrix")    		{ bool is_complex = false;MPI_Datatype TRcv = MPI_DOUBLE; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}	
-    if (tstring == "complex matrix")		{ bool is_complex = true;MPI_Datatype TRcv = MPI_DOUBLE; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info); }
-    if (tstring == "bool matrix")		{ bool is_complex = false;MPI_Datatype TRcv = MPI_INT; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}  
-    if (tstring == "int8 matrix")  		{ bool is_complex = false;MPI_Datatype TRcv = MPI_BYTE; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}
-    if (tstring == "int16 matrix") 		{ bool is_complex = false;MPI_Datatype TRcv = MPI_SHORT; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}
-    if (tstring == "int32 matrix") 		{ bool is_complex = false;MPI_Datatype TRcv = MPI_INT; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}
-    if (tstring == "int64 matrix") 		{ bool is_complex = false;MPI_Datatype TRcv = MPI_LONG_LONG; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}
-    if (tstring == "uint8 matrix")		{ bool is_complex = false;MPI_Datatype TRcv = MPI_UNSIGNED_CHAR; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}
-    if (tstring == "uint16 matrix")		{ bool is_complex = false;MPI_Datatype TRcv = MPI_UNSIGNED_SHORT; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag);return(info);}
-    if (tstring == "uint32 matrix")		{ bool is_complex = false;MPI_Datatype TRcv = MPI_UNSIGNED; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag) ;return(info);}
-    if (tstring == "uint64 matrix")		{ bool is_complex = false;MPI_Datatype TRcv = MPI_UNSIGNED_LONG_LONG; info = recv_matrix (is_complex,TRcv,comm,ov,source,mytag) ;return(info);}
-    if (tstring == "float matrix")            	{ bool is_complex = false;MPI_Datatype TRcv = MPI_DOUBLE; info = recv_matrix (is_complex,TRcv,comm, ov,source,mytag) ;return(info);}
-    if (tstring == "float complex matrix")    	{ bool is_complex = true;MPI_Datatype TRcv = MPI_FLOAT; info = recv_matrix(is_complex,TRcv,comm, ov,source,mytag) ;return(info);}
-    if (tstring == "sparse matrix")		{ bool is_complex = false;MPI_Datatype TRcv = MPI_DOUBLE; info = recv_sp_mat(is_complex,TRcv,comm, ov,source,mytag) ;return(info);}
-    if (tstring == "sparse complex matrix")   	{ bool is_complex = true;MPI_Datatype TRcv = MPI_DOUBLE; info = recv_sp_mat(is_complex,TRcv,comm, ov,source,mytag) ;return(info);}			
-    if (tstring == "<unknown type>")    { printf("MPI_Recv: unknown class\n");
-            return(MPI_ERR_UNKNOWN );
-
-	    }
-
-    else    {    printf("MPI_Recv: unsupported class %s\n",
-                     ov.type_name().c_str());
-             return(MPI_ERR_UNKNOWN );
- 	    }
-}
-
-
-
-DEFUN_DLD(MPI_Recv, args, nargout,"-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} [@var{VALUE} @var{INFO}]= MPI_Recv(@var{SOURCE},@var{TAG},@var{COMM})\n\
-MPI_Recv receive  any Octave datatype into contiguous memory using openmpi library even over an hetherogeneous cluster i.e 32 bits CPUs and 64 bits CPU \n \n\
-Returns @var{VALUE} that is an octave variable received\n\
-and an integer @var{INFO} to indicate success or failure  \
- @example\n\
- @group\n\
-@var{SOURCE} must be an integer indicating source processes \n\
-@var{TAG} must be an integer called TAG to identifie the message by openmpi \n\
-@var{COMM} must be an octave communicator object created by MPI_Comm_Load function \n\
-@end group\n\
-@end example\n\
-@seealso{MPI_Comm_Load,MPI_Init,MPI_Finalize,MPI_Send}\n\
-@end deftypefn")
-{
-     octave_value_list retval;
-  int nargin = args.length ();
-  if (nargin != 3)
-    {
-      error ("expecting 3 input arguments");
-      return retval;
-    }
-
-  if (error_state)
-    return retval;
-
-     int source = args(0).int_value();    
-  if (error_state)
-    {
-      error ("expecting first argument to be an integer");
-      return retval;
-    }
- int mytag = args(1).int_value();
-  if (error_state)
-    {
-      error ("expecting second argument to be an integer");
-      return retval;
-    }
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if (args(2).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(2).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();
-
-
-     octave_value result;
-     int info = recv_class (comm, result,source, mytag );
-     comm= NULL;
-     retval(1) = info;
-     retval(0) = result;
-     return retval;
-   
-}
--- a/extra/openmpi_ext/src/MPI_Send.cc	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1209 +0,0 @@
-// Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
-// under the terms of the GNU General Public License.
-// Copyright (C) 2009 VZLU Prague
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; If not, see <http://www.gnu.org/licenses/>.
-
-
-
-
-#include "simple.h"
-#include <ov-cell.h>    // avoid errmsg "cell -- incomplete datatype"
-#include <oct-map.h>    // avoid errmsg "Oct.map -- invalid use undef type"
-
-
-/*----------------------------------*/        /* forward declaration */
-
-
-
-int send_class( MPI_Comm comm, octave_value ov,  ColumnVector rankrec, int mytag);        /* along the datatype */
-
-int send_string(int t_id, MPI_Comm comm, std::string  oi8,ColumnVector rankrec, int mytag);
-
-int send_cell(int t_id, MPI_Comm comm, Cell cell, ColumnVector rankrec, int mytag);
-int send_struct(int t_id, MPI_Comm comm, Octave_map map,ColumnVector rankrec, int mytag);
-
-
-template <class Any>
-int send_scalar(int t_id, MPI_Datatype TSnd,MPI_Comm comm, std::complex<Any> d, ColumnVector rankrec, int mytag);
-
-
-template <class Any>
-int send_scalar(int t_id, MPI_Datatype TSnd, MPI_Comm comm, Any d, ColumnVector rankrec, int mytag);
-
-int send_range(int t_id, MPI_Comm comm, Range range,ColumnVector rankrec, int mytag);
-
-int send_matrix(int t_id,  MPI_Datatype TSnd,MPI_Comm comm, octave_value myOv ,ColumnVector rankrec, int mytag);
-
-
-int send_sp_mat(int t_id, MPI_Datatype TSnd ,MPI_Comm comm, octave_value MyOv ,ColumnVector rankrec, int mytag  );
-
-// template specialization for complex case
-
-template <class Any>
-int send_scalar(int t_id,MPI_Datatype TSnd ,MPI_Comm comm, std::complex<Any> d, ColumnVector rankrec, int mytag){        
-  int info;
-  OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  OCTAVE_LOCAL_BUFFER(std::complex<Any>,Deco,2);
-  Deco[0] = real(d);
-  Deco[1] = imag(d);
-
-      for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-      {
-	  info = MPI_Send(&t_id, 1, MPI_INT,  rankrec(i), tanktag[0], comm);
-	  if (info !=MPI_SUCCESS) return info;
-	  info = MPI_Send((&Deco), 2,TSnd, rankrec(i), tanktag[1], comm);
-	  if (info !=MPI_SUCCESS) return info;
-      }
-
-   return(info);
-}
-
-
-template <class Any>
-int send_scalar(int t_id, MPI_Datatype TSnd, MPI_Comm comm, Any d, ColumnVector rankrec, int mytag){        
-  int info;
-  OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  
-      for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-      {
-	  info = MPI_Send(&t_id, 1, MPI_INT,  rankrec(i), tanktag[0], comm);
-	  if (info !=MPI_SUCCESS) return info;
-	  info = MPI_Send((&d), 1,TSnd, rankrec(i), tanktag[1], comm);
-	  if (info !=MPI_SUCCESS) return info;
-      }
-
-   return(info);
-}
-
-int send_range(int t_id, MPI_Comm comm, Range range,ColumnVector rankrec, int mytag){        /* put base,limit,incr,nelem */
-/*-------------------------------*/        /* just 3 doubles + 1 int */
-// octave_range (double base, double limit, double inc)
-  OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  OCTAVE_LOCAL_BUFFER(double,d,3);
-  d[0]= range.base();
-  d[1]= range.limit();
-  d[2]= range.inc();
-  int info;
-  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-  {
-    info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
-    if (info !=MPI_SUCCESS) return info;
-    info = MPI_Send(d, 3, MPI_INT, rankrec(i), tanktag[1], comm);
-    if (info !=MPI_SUCCESS) return info;
-  }
-   
-return(MPI_SUCCESS);
-}
-
-
-int send_matrix(int t_id,  MPI_Datatype TSnd,MPI_Comm comm, octave_value myOv ,ColumnVector rankrec, int mytag){       
-  int info;
-  int nitem;
-  dim_vector dv;
-  OCTAVE_LOCAL_BUFFER(int,tanktag,6);
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  tanktag[2] = mytag+2;
-  tanktag[3] = mytag+3;
-  tanktag[4] = mytag+4;
-  tanktag[5] = mytag+5;
-  int nd;
-		if (TSnd == MPI_DOUBLE and myOv.is_real_type())
-		{  
-		NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		  for (octave_idx_type i=0; i<nd; i++)
-		  {
-		    dimV[i] = dv(i) ;
-		  }
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(double,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-		}
-		else if (TSnd == MPI_DOUBLE and myOv.is_complex_type())
-		{  
-		ComplexNDArray myNDA = myOv.complex_array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		  for (octave_idx_type i=0; i<nd; i++)
-		  {
-		    dimV[i] = dv(i) ;
-		  }
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(double,LBNDA1,nitem);
-		OCTAVE_LOCAL_BUFFER(double,LBNDA2,nitem);
-
-		  for (octave_idx_type i=0; i<nitem; i++)
-		  {
-		      LBNDA1[i] = real(myNDA(i));
-		      LBNDA2[i] = imag(myNDA(i));
-		  }
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA1,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA2,1,fortvec,rankrec(i),tanktag[5],comm);
-		      if (info !=MPI_SUCCESS) return info;
-
-		  }		
-
-		}
-		else if (TSnd == MPI_INT and myOv.is_bool_type())
-		{  
-		boolNDArray myNDA = myOv.bool_array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		  for (octave_idx_type i=0; i<nd; i++)
-		  {
-		    dimV[i] = dv(i) ;
-		  }
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(bool,LBNDA,nitem);
-    
-
-		  for (octave_idx_type i=0; i<nitem; i++)
-		  {
-		      LBNDA[i] = myNDA(i) ;
-		  }
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if(TSnd == MPI_FLOAT and myOv.is_float_type())
-		{  
-		  FloatNDArray myNDA = myOv.float_array_value();
-		  nitem = myNDA.nelem();
-		  dv = myNDA.dims();
-		  nd = myNDA.ndims();
-		  OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		  for (octave_idx_type i=0; i<nd; i++)
-		  {
-		    dimV[i] = dv(i) ;
-		  }
-		  // Now create the contiguous derived datatype for the dim vector
-		  MPI_Datatype dimvec;
-		  MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		  MPI_Type_commit(&dimvec);
-		  
-		  OCTAVE_LOCAL_BUFFER(float,LBNDA,nitem);
-      
-
-		  for (octave_idx_type i=0; i<nitem; i++)
-		  {
-		      LBNDA[i] = myNDA(i) ;
-		  }
-
-		  // Now create the contiguous derived datatype
-		  MPI_Datatype fortvec;
-		  MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		  MPI_Type_commit(&fortvec);
-		  
-		    for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		    {  
-			    info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-			if (info !=MPI_SUCCESS) return info;
-			    info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-			if (info !=MPI_SUCCESS) return info;
-			    info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-			if (info !=MPI_SUCCESS) return info;
-			    info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-			if (info !=MPI_SUCCESS) return info;
-			info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-			if (info !=MPI_SUCCESS) return info;
-		    }		
-
-		
-		}
-		else if(TSnd == MPI_FLOAT and myOv.is_complex_type())
-		{  
-		  FloatComplexNDArray myNDA = myOv.float_complex_array_value();
-		  nitem = myNDA.nelem();
-		  dv = myNDA.dims();
-		  nd = myNDA.ndims();
-		  OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		  for (octave_idx_type i=0; i<nd; i++)
-		  {
-		    dimV[i] = dv(i) ;
-		  }
-		  // Now create the contiguous derived datatype for the dim vector
-		  MPI_Datatype dimvec;
-		  MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		  MPI_Type_commit(&dimvec);
-		  
-		  OCTAVE_LOCAL_BUFFER(float,LBNDA1,nitem);
-		  OCTAVE_LOCAL_BUFFER(float,LBNDA2,nitem);
-
-		  for (octave_idx_type i=0; i<nitem; i++)
-		  {
-		      LBNDA1[i] = real(myNDA(i));
-		      LBNDA2[i] = imag(myNDA(i));
-		  }
-
-		  // Now create the contiguous derived datatype
-		  MPI_Datatype fortvec;
-		  MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		  MPI_Type_commit(&fortvec);
-		  
-		    for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		    {  
-			    info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-			if (info !=MPI_SUCCESS) return info;
-			    info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-			if (info !=MPI_SUCCESS) return info;
-			    info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-			if (info !=MPI_SUCCESS) return info;
-			    info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-			if (info !=MPI_SUCCESS) return info;
-			info =  MPI_Send(LBNDA1,1,fortvec,rankrec(i),tanktag[4],comm);
-			if (info !=MPI_SUCCESS) return info;
-			info =  MPI_Send(LBNDA2,1,fortvec,rankrec(i),tanktag[5],comm);
-			if (info !=MPI_SUCCESS) return info;
-		    }		
-
-		}
-		else if(TSnd == MPI_BYTE and myOv.is_int8_type())
-		{   
-		int8NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_int8,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-		
-		}
-		else if(TSnd == MPI_SHORT and myOv.is_int16_type())
-		{  
-		int16NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_int16,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if (TSnd == MPI_INT and myOv.is_int32_type())
-		{   
-		int32NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_int32,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if(TSnd == MPI_LONG_LONG and myOv.is_int64_type())
-		{   
-		int64NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_int64,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if(TSnd == MPI_UNSIGNED_CHAR and myOv.is_uint8_type()) 
-		{  
-		uint8NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_uint8,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if(TSnd == MPI_UNSIGNED_SHORT and myOv.is_uint16_type())
-		{  
-		uint16NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_uint16,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if(TSnd == MPI_UNSIGNED and myOv.is_uint32_type())
-		{   
-		uint32NDArray myNDA = myOv.array_value();
-		nitem = myNDA.nelem();
-		dv = myNDA.dims();
-		nd = myNDA.ndims();
-		OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		for (octave_idx_type i=0; i<nd; i++)
-		{
-		  dimV[i] = dv(i) ;
-		}
-		// Now create the contiguous derived datatype for the dim vector
-		MPI_Datatype dimvec;
-		MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		MPI_Type_commit(&dimvec);
-		
-		OCTAVE_LOCAL_BUFFER(octave_uint32,LBNDA,nitem);
-    
-
-		for (octave_idx_type i=0; i<nitem; i++)
-		{
-		    LBNDA[i] = myNDA(i) ;
-		}
-
-		// Now create the contiguous derived datatype
-		MPI_Datatype fortvec;
-		MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		MPI_Type_commit(&fortvec);
-		
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {  
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-
-		}
-		else if(TSnd== MPI_UNSIGNED_LONG_LONG and myOv.is_uint64_type())
-		{  
-		    uint64NDArray myNDA = myOv.array_value();
-		    nitem = myNDA.nelem();
-		    dv = myNDA.dims();
-		    nd = myNDA.ndims();
-		    OCTAVE_LOCAL_BUFFER(int,dimV,nd);
-		    for (octave_idx_type i=0; i<nd; i++)
-		    {
-		      dimV[i] = dv(i) ;
-		    }
-		    // Now create the contiguous derived datatype for the dim vector
-		    MPI_Datatype dimvec;
-		    MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-		    MPI_Type_commit(&dimvec);
-		    
-		    OCTAVE_LOCAL_BUFFER(octave_uint64,LBNDA,nitem);
-	
-
-		    for (octave_idx_type i=0; i<nitem; i++)
-		    {
-			LBNDA[i] = myNDA(i) ;
-		    }
-
-		    // Now create the contiguous derived datatype
-		    MPI_Datatype fortvec;
-		    MPI_Type_contiguous(nitem,TSnd, &fortvec);
-		    MPI_Type_commit(&fortvec);
-		    
-		      for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		      {  
-			      info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm); 
-			  if (info !=MPI_SUCCESS) return info;
-			      info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-			  if (info !=MPI_SUCCESS) return info;
-			      info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-			  if (info !=MPI_SUCCESS) return info;
-			      info = MPI_Send(dimV, 1, dimvec, rankrec(i), tanktag[3], comm);
-			  if (info !=MPI_SUCCESS) return info;
-			  info =  MPI_Send(LBNDA,1,fortvec,rankrec(i),tanktag[4],comm);
-			  if (info !=MPI_SUCCESS) return info;
-		      }		
-
-                 }
-
- 
-return(info);
-}
-
-
-int send_sp_mat(int t_id, MPI_Datatype TSnd ,MPI_Comm comm, octave_value MyOv ,ColumnVector rankrec, int mytag  ){
-int info;
-OCTAVE_LOCAL_BUFFER(int,tanktag,6);
-tanktag[0] = mytag;
-tanktag[1] = mytag+1;
-tanktag[2] = mytag+2;
-tanktag[3] = mytag+3;
-tanktag[4] = mytag+4;
-tanktag[5] = mytag+5;
-
-// printf("I will send this t_id=%i\n",t_id);
-  
-		if(TSnd == MPI_INT and MyOv.is_bool_type())
-		 {  
-		  TSnd = MPI_INT;
-		  OCTAVE_LOCAL_BUFFER(int,s,3); 
-		  SparseBoolMatrix m = MyOv.sparse_bool_matrix_value();
-		  s[0]= m.rows();
-		  s[1]= m.cols();
-		  s[2]= m.capacity();
-
-		  // Create a contiguous derived datatype
-		  MPI_Datatype sintsparse;
-		  MPI_Type_contiguous(3,MPI_INT, &sintsparse);
-		  MPI_Type_commit(&sintsparse);
-
-
-		  MPI_Datatype rowindex;
-		  MPI_Type_contiguous(m.capacity(),MPI_INT, &rowindex);
-		  MPI_Type_commit(&rowindex);
-
-		  MPI_Datatype columnindex;
-		  MPI_Type_contiguous(m.cols()+1,MPI_INT, &columnindex);
-		  MPI_Type_commit(&columnindex);
-
-		  OCTAVE_LOCAL_BUFFER( int ,sridx,m.capacity());
-		  OCTAVE_LOCAL_BUFFER( int ,scidx,m.cols()+1);
-		  
-		  for (octave_idx_type ix = 0; ix < m.cols()+1; ix++)
-		  {
-		  scidx[ix]= m.cidx(ix);   
-		  }
-		  OCTAVE_LOCAL_BUFFER(bool ,sdata,m.capacity());
-		  // Fill them with their respective value
-		  for (octave_idx_type ix = 0; ix < m.capacity(); ix++)
-		  {
-		      sdata[ix]= m.data(ix);
-		      sridx[ix]= m.ridx(ix);
-		  }
-		  MPI_Datatype numnnz;
-		  MPI_Type_contiguous(m.capacity(),TSnd, &numnnz);
-		  MPI_Type_commit(&numnnz);
-
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(s, 1, sintsparse, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sridx,1,rowindex,rankrec(i),tanktag[2],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(scidx,1,columnindex,rankrec(i),tanktag[3],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sdata,1,numnnz,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }
-		 }
-		else if (TSnd == MPI_DOUBLE and MyOv.is_real_type())
-		 { 
-		  TSnd = MPI_DOUBLE;
-		  SparseMatrix m = MyOv.sparse_matrix_value();
-		  OCTAVE_LOCAL_BUFFER(int,s,3);  
-		  s[0]= m.rows();
-		  s[1]= m.cols();
-		  s[2]= m.capacity();
-
-		  // Create a contiguous derived datatype
-		  MPI_Datatype sintsparse;
-		  MPI_Type_contiguous(3,MPI_INT, &sintsparse);
-		  MPI_Type_commit(&sintsparse);
-
-
-		  MPI_Datatype rowindex;
-		  MPI_Type_contiguous(m.capacity(),MPI_INT, &rowindex);
-		  MPI_Type_commit(&rowindex);
-
-		  MPI_Datatype columnindex;
-		  MPI_Type_contiguous(m.cols()+1,MPI_INT, &columnindex);
-		  MPI_Type_commit(&columnindex);
-
-		  OCTAVE_LOCAL_BUFFER( int ,sridx,m.capacity());
-		  OCTAVE_LOCAL_BUFFER( int ,scidx,m.cols()+1);
-		  
-		  for (octave_idx_type ix = 0; ix < m.cols()+1; ix++)
-		  {
-		  scidx[ix]= m.cidx(ix);   
-		  }
-		  OCTAVE_LOCAL_BUFFER(double ,sdata,m.capacity());
-		  // Fill them with their respective value
-		  for (octave_idx_type ix = 0; ix < m.capacity(); ix++)
-		  {
-		      sdata[ix]= m.data(ix);
-		      sridx[ix]= m.ridx(ix);
-		  }
-		  MPI_Datatype numnnz;
-		  MPI_Type_contiguous(m.capacity(),TSnd, &numnnz);
-		  MPI_Type_commit(&numnnz);
-
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
-// 			  printf("This is info for sending t_id =%i\n",info);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(s, 1, sintsparse, rankrec(i), tanktag[1], comm);
-// 			  printf("This is info for sending s=%i\n",info);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sridx,1,rowindex,rankrec(i),tanktag[2],comm);
-// 		      printf("This is info for sending sridx=%i\n",info);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(scidx,1,columnindex,rankrec(i),tanktag[3],comm);
-// 		      printf("This is info for sending scidx=%i\n",info);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sdata,1,numnnz,rankrec(i),tanktag[4],comm);
-// 		      printf("This is info for sending sdata=%i\n",info);
-		      if (info !=MPI_SUCCESS) return info;
-		  }		
-		 }
-		else if (TSnd == MPI_DOUBLE and MyOv.is_complex_type())
-		 { 
-		  SparseComplexMatrix m = MyOv.sparse_complex_matrix_value();
-		  OCTAVE_LOCAL_BUFFER(int,s,3);  
-		  s[0]= m.rows();
-		  s[1]= m.cols();
-		  s[2]= m.capacity();
-
-		  // Create a contiguous derived datatype
-		  MPI_Datatype sintsparse;
-		  MPI_Type_contiguous(3,MPI_INT, &sintsparse);
-		  MPI_Type_commit(&sintsparse);
-
-
-		  MPI_Datatype rowindex;
-		  MPI_Type_contiguous(m.capacity(),MPI_INT, &rowindex);
-		  MPI_Type_commit(&rowindex);
-
-		  MPI_Datatype columnindex;
-		  MPI_Type_contiguous(m.cols()+1,MPI_INT, &columnindex);
-		  MPI_Type_commit(&columnindex);
-
-		  OCTAVE_LOCAL_BUFFER( int ,sridx,m.capacity());
-		  OCTAVE_LOCAL_BUFFER( int ,scidx,m.cols()+1);
-		  
-		  for (octave_idx_type ix = 0; ix < m.cols()+1; ix++)
-		  {
-		  scidx[ix]= m.cidx(ix);   
-		  }
-		  OCTAVE_LOCAL_BUFFER(double ,sdata1,m.capacity());
-		  OCTAVE_LOCAL_BUFFER(double ,sdata2,m.capacity());
-		  // Fill them with their respective value
-		  for (octave_idx_type ix = 0; ix < m.capacity(); ix++)
-		  {
-		      sdata1[ix]= real(m.data(ix));
-		      sdata2[ix]= imag(m.data(ix));
-		      sridx[ix]= m.ridx(ix);
-		  }
-		  MPI_Datatype numnnz;
-		  MPI_Type_contiguous(m.capacity(),TSnd, &numnnz);
-		  MPI_Type_commit(&numnnz);
-
-		  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-		  {
-			  info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
-		      if (info !=MPI_SUCCESS) return info;
-			  info = MPI_Send(s, 1, sintsparse, rankrec(i), tanktag[1], comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sridx,1,rowindex,rankrec(i),tanktag[2],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(scidx,1,columnindex,rankrec(i),tanktag[3],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sdata1,1,numnnz,rankrec(i),tanktag[4],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		      info =  MPI_Send(sdata1,1,numnnz,rankrec(i),tanktag[5],comm);
-		      if (info !=MPI_SUCCESS) return info;
-		  }
-		 }
-return(info);
-}
-int send_string(int t_id, MPI_Comm comm, std::string  oi8,ColumnVector rankrec, int mytag){       
-  int info;
-  int nitem = oi8.length();
-  int tanktag[3];
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  tanktag[2] = mytag+2;
-//    OCTAVE_LOCAL_BUFFER(char,i8,nitem+1);
-    char i8[nitem+1];
-  strcpy(i8, oi8.c_str());
-
-// Here we declare a contiguous derived datatype
-// Create a contiguous datatype for the fortranvec
-MPI_Datatype fortvec;
-MPI_Type_contiguous(nitem+1,MPI_CHAR, &fortvec);
-MPI_Type_commit(&fortvec);
-
-
-
-  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-  {
-//       printf("I am sending to %d \n",rankrec(i));   
-//           printf("Sending block with tag to %i \n",mytag);
-          info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), mytag, comm);
-      if (info !=MPI_SUCCESS) return info;
-//       printf("Sending type of object  %i \n",t_id);   
-          info = MPI_Send(&nitem, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-//           printf("Sending nitem  %i \n",nitem);   
-      if (info !=MPI_SUCCESS) return info;
-      info =  MPI_Send(&i8,1,fortvec,rankrec(i),tanktag[2],comm);
-//           printf("Info for sending fortvec  %i \n",info);   
-      if (info !=MPI_SUCCESS) return info;
-  }
-
-    return(info);
-
-}
-int send_cell(int t_id,MPI_Comm comm, Cell cell, ColumnVector rankrec, int mytag){    /* we first store nelems and then */
-/*----------------------------*/    /* recursively the elements themselves */
-
-// Lists of items to send
-// type_id to identify octave_value
-// n for the cell capacity
-// nd for number of dimensions
-// dimvec derived datatype
-// item of cell
-  int n = cell.capacity();
-  int info;
-  int tanktag[5];
-  tanktag[0] = mytag;
-  tanktag[1] = mytag+1;
-  tanktag[2] = mytag+2;
-  tanktag[3] = mytag+3;
-  tanktag[4] = mytag+4;
-  int newtag = tanktag[4];
-  dim_vector    vdim   = cell. dims();
-  int nd = cell.ndims();
-
-// Declare here the octave_local_buffers
-  OCTAVE_LOCAL_BUFFER(int,dimV,nd);
- for (octave_idx_type i=0; i<nd; i++)
- {
-  dimV[i] = vdim(i) ;
- }
-
-  // Now create the contiguous derived datatype
-  MPI_Datatype dimvec;
-  MPI_Type_contiguous(nd,MPI_INT, &dimvec);
-  MPI_Type_commit(&dimvec);
-
-
-// Now start the big loop
-
-  for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-  {
-          info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
-//   	  printf("I have sent the t_id of cell .. and this the flag =%i \n",info);
-      if (info !=MPI_SUCCESS) return info;
-// send cell capacity
-          info = MPI_Send(&n, 1, MPI_INT, rankrec(i), tanktag[1], comm);
-/*           printf("I have sent the capacity of the cell .. and this the flag =%i \n",info);
-  	  printf(".. and this the value of capacity =%i \n",n);*/
-      if (info !=MPI_SUCCESS) return info;
-          info = MPI_Send(&nd, 1, MPI_INT, rankrec(i), tanktag[2], comm);
-/*           printf("I have sent the capacity of the number of dimensions .. and this the flag =%i \n",info);
-           printf("I have sent the value of nd =%i \n",nd);*/
-      if (info !=MPI_SUCCESS) return info;
-// send the dim vector
-      info =  MPI_Send(dimV,1,dimvec,rankrec(i),tanktag[3],comm);
-//         printf("I have sent the dim_vector .. and this the flag =%i \n",info);
-      if (info !=MPI_SUCCESS) return info;
-  }
-
-int cap;
-// Now focus on every single octave_value
-         for (octave_idx_type i=0; i<n; i++){
-// 	     printf("I am processing octave_value number %i\n",i);
-             octave_value ov = cell.data()[i];
-	     cap =ov.capacity();
-	     info = MPI_Send(&cap, 1, MPI_INT, rankrec(i), newtag, comm);
-//   	     printf("I have sent the capacity .. and this the flag = %i\n",info);
-	     if (info !=MPI_SUCCESS) return info;
-             newtag = newtag +ov.capacity();
-	     info=send_class(comm,ov,rankrec,newtag);
-//                printf("I have sent the octave_value inside the cell .. and this the flag = %i\n",info);
-// 	       printf("I have sent the octave_value inside the cell  = %f\n",ov.scalar_value());
-// 	       printf("I have sent NEWTAG  = %i\n",newtag);
-// 	       printf("I have sent NEWTAG+1  = %i\n",newtag+1);
-	     if (info !=MPI_SUCCESS) return info;
-					    }
-					    
-
-
-  return(info); 
-
-
-}
-
-int send_struct(int t_id,MPI_Comm comm, Octave_map map,ColumnVector rankrec, int mytag){        /* we store nkeys, */
-
-int n = map.nfields(); 
-int info;
-OCTAVE_LOCAL_BUFFER(int,tanktag,2);
-  tanktag[0] = mytag; //t-id
-  tanktag[1] = mytag+1; // n
-int tagcap = mytag+2;
-int   ntagkey = mytag+3; // string
-
-// Create 3 contiguous derived datatype
-// one for dim_vector struc_dims
-
-  dim_vector struc_dims = map.dims();    // struct array dimensions (ND)
-  dim_vector conts_dims;        // each key stores ND field-values
-
-
-// Now we start the big loop
-   for (octave_idx_type  i = 0; i< rankrec.nelem(); i++)
-   {
-           info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
-/*  	   printf("I have sent % i \n",t_id);
-  	   printf("with info = % i \n",info);*/
-	   if (info !=MPI_SUCCESS) return info;
-     info = MPI_Send(&n,1,MPI_INT,rankrec(i),tanktag[1],comm);
-     if (info !=MPI_SUCCESS) return info;/**/
-//       printf("I have sent n with info = % i \n",info);
-// // This is to avoid confusion between tags of strings and tags of Cells
-   int   ntagCell = ntagkey+1;
-    Octave_map::const_iterator p = map.begin();    // iterate through keys(fnames)
-   int scap;
-   for (octave_idx_type i=0; p!=map.end(); p++, i++)
-    {
-    std::string        key = map.key     (p);    // field name
-    Cell               conts = map.contents(p);    // Cell w/ND contents
-    conts_dims = conts.dims();        /* each elemt should have same ND */
-    if (struc_dims != conts_dims){
-        printf("MPI_Send: inconsistent map dims\n"); return(MPI_ERR_UNKNOWN);
-				}
-    // Sending capacity of octave_cell
-    scap = conts.capacity(); 
-    info = MPI_Send(&scap,1,MPI_INT,rankrec(i),tagcap,comm);
-//     printf("I have sent capacity of octave cell with info = % i \n",info);
-   if (info !=MPI_SUCCESS) return info;
-    tagcap = tagcap+1;
-    ntagkey = ntagkey + 3;
-    info =send_class(comm, key,rankrec,ntagkey);
-//     printf("I have sent class with info = % i \n",info);
-   
-    if (info !=MPI_SUCCESS) return info;
-    
-    // Sending Cell
-    ntagCell = ntagCell + conts.capacity();
-    info =send_class(comm, conts,rankrec,ntagCell);
-//     printf("I have sent Cell with info = % i \n",info);
-   if (info !=MPI_SUCCESS) return info;
-    }
-
-      if (n != map.nfields()){printf("MPI_Send: inconsistent map length\n");return(MPI_ERR_UNKNOWN);}
-
-
-    }
-
-return(info);
-}
-
-int send_class(MPI_Comm comm, octave_value ov, ColumnVector rankrec,int mytag){    /* varname-strlength 1st, dims[ndim] */
-/*----------------------------------*/    /* and then appropriate specific info */
-  int t_id = ov.type_id();
-  const std::string mystring = ov.type_name();
-  MPI_Datatype TSnd;
-      // range
-      if (mystring == "range")     		return(send_range  (t_id, comm, ov.range_value(),rankrec,mytag));
-      // scalar
-      if (mystring == "scalar")  {   TSnd = MPI_DOUBLE;	 	return(send_scalar (t_id, TSnd,comm, ov.scalar_value(),rankrec,mytag)); }
-      if (mystring == "int8 scalar")  { TSnd = MPI_BYTE;		return(send_scalar (t_id, TSnd,comm, ov.int8_scalar_value(),rankrec,mytag)); }
-      if (mystring == "int16 scalar")  { TSnd = MPI_SHORT; 		return(send_scalar (t_id,TSnd ,comm, ov.int16_scalar_value(),rankrec,mytag));}
-      if (mystring ==  "int32 scalar")  { TSnd = MPI_INT;		return(send_scalar (t_id, TSnd,comm, ov.int32_scalar_value(),rankrec,mytag));}
-      if (mystring ==  "int64 scalar")  { TSnd = MPI_LONG_LONG;		return(send_scalar (t_id,TSnd ,comm, ov.int64_scalar_value(),rankrec,mytag));}
-      if (mystring ==  "uint8 scalar")  {	TSnd = MPI_UNSIGNED_CHAR;	return(send_scalar (t_id,TSnd ,comm, ov.uint8_scalar_value(),rankrec,mytag));}
-      if (mystring ==  "uint16 scalar")  {	TSnd = MPI_UNSIGNED_SHORT;	return(send_scalar (t_id,TSnd , comm, ov.uint16_scalar_value(),rankrec,mytag));}
-      if (mystring ==  "uint32 scalar")  {	TSnd = MPI_UNSIGNED;	return(send_scalar (t_id,TSnd , comm, ov.uint32_scalar_value(),rankrec,mytag));}
-      if (mystring == "uint64 scalar")  {	TSnd = MPI_UNSIGNED_LONG_LONG;	return(send_scalar (t_id,TSnd , comm, ov.uint64_scalar_value(),rankrec,mytag));}
-      if (mystring ==  "bool")		{TSnd = MPI_INT;	return(send_scalar (t_id,TSnd , comm, ov.int_value(),rankrec,mytag));}
-      if (mystring ==  "float scalar")    	{ TSnd = MPI_FLOAT;	return(send_scalar (t_id,TSnd , comm, ov.float_value (),rankrec,mytag));}
-      if (mystring ==  "complex scalar") 	{ TSnd = MPI_DOUBLE;	return(send_scalar (t_id,TSnd , comm, ov.complex_value(),rankrec,mytag));}
-      if (mystring ==  "float complex scalar")	{ TSnd = MPI_FLOAT; 		return(send_scalar (t_id,TSnd , comm, ov.float_complex_value(),rankrec,mytag));}
-      // matrix
-      if (mystring ==  "matrix")    	 	{TSnd = MPI_DOUBLE; return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));} 
-      if (mystring ==  "bool matrix")		{TSnd = MPI_INT;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "int8 matrix")    	 	{TSnd = MPI_BYTE;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "int16 matrix")    	 	{TSnd = MPI_SHORT;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}        
-      if (mystring ==  "int32 matrix")    	 	{TSnd = MPI_INT;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "int64 matrix")   	 	{TSnd = MPI_LONG_LONG;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "uint8 matrix")    	 	{TSnd = MPI_UNSIGNED_CHAR;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "uint16 matrix")    	{TSnd = MPI_UNSIGNED_SHORT;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "uint32 matrix")    	{TSnd = MPI_UNSIGNED;return(send_matrix(t_id,TSnd,comm, ov,rankrec,mytag));}
-      if (mystring ==  "uint64 matrix")    	{TSnd = MPI_UNSIGNED_LONG_LONG;return(send_matrix(t_id,TSnd ,comm, ov,rankrec,mytag));}
-//       complex matrix
-      if (mystring ==  "complex matrix")           {TSnd = MPI_DOUBLE;return(send_matrix(t_id,TSnd,comm,ov,rankrec,mytag));}
-      if (mystring ==  "float complex matrix")     {TSnd = MPI_FLOAT;return(send_matrix(t_id,TSnd,comm,ov,rankrec,mytag));} 
-//       sparse matrix
-      if (mystring ==  "sparse bool matrix")		  {TSnd = MPI_INT;return(send_sp_mat(t_id,TSnd,comm,ov,rankrec,mytag));}
-      if (mystring ==  "sparse matrix")			  {TSnd = MPI_DOUBLE;return(send_sp_mat(t_id,TSnd,comm,ov,rankrec,mytag));}	
-      if (mystring ==  "sparse complex matrix")  		  {TSnd = MPI_DOUBLE;return(send_sp_mat(t_id,TSnd,comm,ov,rankrec,mytag));}	
-      
-      if (mystring == "string")    		return(send_string (t_id,comm, ov.string_value(),rankrec,mytag));
-      if (mystring == "sq_string")  		return(send_string (t_id,comm, ov.string_value(),rankrec,mytag));
-      
-      if (mystring ==  "struct")    		return(send_struct (t_id,comm, ov.map_value    (),rankrec,mytag));
-      if (mystring ==  "cell")    	 	 	return(send_cell   (t_id,comm, ov.cell_value   (),rankrec,mytag));
-      
-      if (mystring ==  "<unknown type>")  {  		printf("MPI_Send: unknown class\n");
-             return(MPI_ERR_UNKNOWN );
-	  } 
-      else{
-	    printf("MPI_Send: unsupported class %s\n",
-                    ov.type_name().c_str());
-            return(MPI_ERR_UNKNOWN );
-	   } 
-      
-}
-DEFUN_DLD(MPI_Send,args,nargout, 
-  "-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {@var{INFO} =} MPI_Send(@var{VALUE},@var{RANKS},@var{TAG},@var{COMM})\n\
-MPI_Send sends  any octave_value  into contiguous memory using openmpi library \n\
-even over an hetherogeneous cluster i.e 32 bits CPUs and 64 bits CPU.\n\
-Returns an integer @var{INFO} to indicate success or failure of octave_value expedition.\n\
- @example\n\
- @group\n\
-@var{VALUE} must be an octave variable \n\
-@var{RANKS} must be a vector cointaining the list of rank destination processes \n\
-@var{TAG} must be an integer called TAG to identifie the message by openmpi \n\
-@var{COMM} must be an octave communicator object created by MPI_Comm_Load function \n\
-@end group\n\
-@end example\n\
-\n\
-@seealso{MPI_Comm_Load,MPI_Init,MPI_Finalize,MPI_Recv}\n\
-@end deftypefn")
-{
-     octave_value retval;
-
-  int nargin = args.length ();
-  if (nargin != 4 )
-    {
-      error ("expecting 4 input arguments");
-      return retval;
-    }
-
-  if (error_state)
-    return retval;
-
-     ColumnVector tankrank = args(1).column_vector_value();    
-  
-  if (error_state)
-    {
-      error ("expecting second argument to be a column vector");
-      return retval;
-    }
-     int mytag = args(2).int_value();    
-  if (error_state)
-    {
-      error ("expecting third vector argument to be an integer value");
-      return retval;
-    }
-
-
-
-  if (!simple_type_loaded)
-    {
-      simple::register_type ();
-      simple_type_loaded = true;
-      mlock ();
-    }
-
-	if (args(3).type_id()!=simple::static_type_id()){
-		
-		error("Please enter octave comunicator object!");
-		return octave_value(-1);
-	}
-
-	const octave_base_value& rep = args(3).get_rep();
-        const simple& B = ((const simple &)rep);
-        MPI_Comm comm = ((const simple&) B).comunicator_value ();
-     int info = send_class (comm, args(0), tankrank, mytag);
-     comm= NULL;
-     retval=info;
-     return retval;
-   
-}
-
--- a/extra/openmpi_ext/src/Makefile	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-MPICC := mpiCC
-JUNK := $(shell $(MPICC) -showme:compile)
-MPIINC := $(shell echo $(JUNK) | sed -e "s/-pthread/-lpthread/g")
-JUNK := $(shell $(MPICC) -showme:link)
-MPILIBS := $(shell echo $(JUNK) | sed -e "s/-pthread/ /g")
-
-
-all: MPI_Init.oct \
-     MPI_Initialized.oct \
-     MPI_Comm_rank.oct \
-     MPI_Comm_size.oct \
-     MPI_Finalize.oct \
-     MPI_Finalized.oct \
-     MPI_Send.oct \
-     MPI_Recv.oct \
-     MPI_Iprobe.oct \
-     MPI_Probe.oct \
-     MPI_Barrier.oct \
-     MPI_Comm_Load.oct \
-     MPI_Comm_Test.oct \
-
-
-%.oct: %.cc
-	mkoctfile -s $(MPIINC) $(MPILIBS) $<
-clean:
-	-rm *.o  *.oct *~
--- a/extra/openmpi_ext/src/Makefile.OPENMPI1.3.3	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-# <<<<<<< .mine
-MYHOME = /home/corradin/
-MPIINC = -I$(MYHOME)openmpi-1.3.3/include
-MPILIBS = -lpthread -L$(MYHOME)openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -lnsl -lutil -lm -ldl
-# =======
-# MPIINC = -I/usr/lib/openmpi/include -I/usr/lib/openmpi/include/openmpi
-# MPILIBS = -lpthread -L/usr/lib/openmpi/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -Wl,--export-dynamic -lnsl -lutil -lm -ldl
-# >>>>>>> .r6543
-
-all: MPI_Init.oct \
-     MPI_Initialized.oct \
-     MPI_Comm_rank.oct \
-     MPI_Comm_size.oct \
-     MPI_Finalize.oct \
-     MPI_Finalized.oct \
-     MPI_Send.oct \
-     MPI_Recv.oct \
-     MPI_Iprobe.oct \
-     MPI_Probe.oct \
-     MPI_Barrier.oct \
-     MPI_Comm_Load.oct \
-     MPI_Comm_Test.oct \
-
-
-%.oct: %.cc
-	mkoctfile -s $(MPIINC)  $(MPILIBS)   $<
-clean:
-	-rm *.o  *.oct *~
--- a/extra/openmpi_ext/src/simple.h	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-#include "mpi.h"
-#include <octave/oct.h>
-
-class simple : public octave_base_value
-{
-public:
-  // Constructor
-  simple (const std::string _name = "", MPI_Comm _Comm_Value = MPI_COMM_WORLD )
-    : octave_base_value (), name (_name), Comm_Value(_Comm_Value)
-    {
-    }
-
-  void print (std::ostream& os, bool pr_as_read_syntax = false) const
-    {
-      os << name << std::endl;
-    }
-   ~simple(void)
-    {
-      Comm_Value = NULL;
-    }
-  bool is_defined (void) const { return true; }
-  MPI_Comm comunicator_value (bool = false) const { return Comm_Value; }
-  const std::string name_value (bool = false) const { return name; }
-private:
-  const std::string name;
-  MPI_Comm Comm_Value;
-  DECLARE_OCTAVE_ALLOCATOR
-  DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA
-};
-
-DEFINE_OCTAVE_ALLOCATOR (simple);
-DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (simple, "simple", "simple");
-
-static bool simple_type_loaded = false;
\ No newline at end of file
--- a/extra/openmpi_ext/src/simpleop.h	Fri Mar 12 14:12:12 2010 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-#include "mpi.h"
-#include <octave/oct.h>
-
-class simpleop : public octave_base_value
-{
-public:
-  // Constructor
-  simpleop (const std::string _name = "", MPI_Op _Op_Value = MPI_OP_NULL )
-    : octave_base_value (), name (_name), Op_Value(_Op_Value)
-    {
-
-
-    }
-   void set ( MPI_Op _Op_Value) 
-     {
-	  if(_Op_Value == MPI_BAND)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_BOR)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_BXOR)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_LAND)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_LOR)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_LXOR)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_MAX)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_MIN)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_OP_NULL)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_PROD)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_REPLACE)  {Op_Value=_Op_Value;};
-	  if(_Op_Value == MPI_SUM)  {Op_Value=_Op_Value;}
-	  else {printf("This is not an MPI Operator!\n");Op_Value=MPI_OP_NULL;};
-     }
-  void print (std::ostream& os, bool pr_as_read_syntax = false) const
-    {
-      os << name << std::endl;
-    }
-   ~simpleop(void)
-    {
-      Op_Value = MPI_OP_NULL;
-    }
-  bool is_defined (void) const { return true; }
-  MPI_Op operator_value (bool = false) const { return Op_Value; }
-  const std::string name_value (bool = false) const { return name; }
-private:
-  const std::string name;
-  MPI_Op Op_Value;
-  DECLARE_OCTAVE_ALLOCATOR
-  DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA
-};
-
-DEFINE_OCTAVE_ALLOCATOR (simpleop);
-DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (simpleop, "simpleop", "simpleop");
-
-static bool simpleop_type_loaded = false;
\ No newline at end of file