changeset 10741:dc6d0e4f406f octave-forge

more changes for new release
author cdf
date Wed, 29 Aug 2012 16:28:49 +0000
parents a9fd789b543e
children f87e4428e955
files main/openmpi_ext/DESCRIPTION main/openmpi_ext/INDEX main/openmpi_ext/NEWS main/openmpi_ext/README main/openmpi_ext/doc/README main/openmpi_ext/inst/Pi.m main/openmpi_ext/inst/hello2dimmat.m main/openmpi_ext/inst/hellocell.m main/openmpi_ext/inst/hellosparsemat.m main/openmpi_ext/inst/hellostruct.m main/openmpi_ext/inst/helloworld.m main/openmpi_ext/inst/mc_example.m main/openmpi_ext/inst/montecarlo.m main/openmpi_ext/src/MPI_Barrier.cc main/openmpi_ext/src/MPI_Comm_Test.cc main/openmpi_ext/src/MPI_Comm_rank.cc main/openmpi_ext/src/MPI_Comm_size.cc main/openmpi_ext/src/MPI_Probe.cc main/openmpi_ext/src/MPI_Recv.cc main/openmpi_ext/src/MPI_Send.cc
diffstat 20 files changed, 630 insertions(+), 494 deletions(-) [+]
line wrap: on
line diff
--- a/main/openmpi_ext/DESCRIPTION	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/DESCRIPTION	Wed Aug 29 16:28:49 2012 +0000
@@ -1,8 +1,8 @@
 Name: openmpi_ext
-Version: 1.0.2
-Date: 2010-6-17
-Author: Riccardo Corradini <riccardocorradini@yahoo.it> and the Octave Community
-Maintainer: Riccardo Corradini <riccardocorradini@yahoo.it>
+Version: 1.1.0
+Date: 2012-8-29
+Author: Riccardo Corradini <riccardocorradini@yahoo.it>, Jaroslav Hajek, Carlo de Falco
+Maintainer: the Octave Community
 Title: openmpi_ext
 Description: MPI functions for parallel computing using simple MPI Derived Datatypes.
 Depends: octave (>= 3.2.4)
--- a/main/openmpi_ext/INDEX	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/INDEX	Wed Aug 29 16:28:49 2012 +0000
@@ -1,9 +1,19 @@
 openmpi_ext >> Openmpi_ext
-Openmpi_ext
- 
- MPI_Barrier    MPI_Comm_Test  MPI_Initialized  MPI_Probe
- MPI_Comm_Load  MPI_Finalize   MPI_Iprobe      MPI_Recv
- MPI_Comm_rank  MPI_Finalized  MPI_Op_Load     MPI_Send
- MPI_Comm_size  MPI_Init      MPI_Op_Test
- hello2dimmat  hellosparsemat  helloworld  montecarlo
- hellocell     hellostruct     mc_example  Pi
+Octave Wrappers for MPI functions
+ MPI_Barrier 	MPI_Comm_Load
+ MPI_Comm_Test 	MPI_Comm_rank
+ MPI_Comm_size 	MPI_Finalize
+ MPI_Finalized
+ MPI_Get_processor_name
+ MPI_Init 	MPI_Initialized
+ MPI_Iprobe 	MPI_Probe
+ MPI_Recv 	MPI_Send
+Examples 
+ Pi
+ hello2dimmat
+ hellocell
+ hellosparsemat
+ hellostruct
+ helloworld
+ mc_example
+ montecarlo
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/main/openmpi_ext/NEWS	Wed Aug 29 16:28:49 2012 +0000
@@ -0,0 +1,7 @@
+Summary of changes for openmpi_ext 1.1.0:
+-------------------------------------------------------------------
+
+ * Added help text and demos in all example scripts
+
+ * Code clean-up and standardization, and some bug-fixes
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/main/openmpi_ext/README	Wed Aug 29 16:28:49 2012 +0000
@@ -0,0 +1,77 @@
+CONTENTS:
+
+1) INSTALLATION INSTRUCTIONS
+
+The makefile included derives all the info it needs for building the code 
+from running mpicc, so make sure that mpicc is in your path before running Octave
+or type 
+
+putenv ("PATH", "/path/to/mpicc:${PATH}")
+
+from within Octave.
+Once this is done you should be able to install openmpi_ext from a locally dowloaded tarball
+by doing:
+
+pkg install openmpi_ext-<version>.tar.gz
+
+or directly from the ftp server by doing
+
+pkg install -forge openmpi_ext
+
+
+2) ORIGINAL README
+
+--------------------
+
+1) INSTALLATION INSTRUCTIONS
+
+2) ORIGINAL README
+
+Below are the contents of the original README file included with the first release
+by R. Corradini, I am not sure all the info there still make sense but they are still
+reported here, just in case ...
+
+The code is general-purpose, but  I would like to use it for econometrics.
+So the first step will be to install the following tarball from
+http://www.open-mpi.org/software/ompi/v1.3/downloads/openmpi-1.3.3.tar.bz2
+possibly in a multi-core computer to run my simple examples
+and configure it for instance in the following way (/home/user is your $HOME)
+./configure --enable-mpirun-prefix-by-default --enable-heterogeneous --prefix=/home/user/openmpi-1.3.3/ --enable-static
+
+and modify .bashrc in your home
+ OMPIBIN=`$ompi_info -path     bindir  -parsable | cut -d: -f3`
+ OMPILIB=`$ompi_info -path     libdir  -parsable | cut -d: -f3`
+ OMPISCD=`$ompi_info -path sysconfdir  -parsable | cut -d: -f3`
+
+
+
+export            PATH=$OMPIBIN:$PATH
+
+export LD_LIBRARY_PATH=:$OMPILIB:$LD_LIBRARY_PATH
+
+unset  ompi_info OMPIBIN OMPILIB OMPISCD 
+
+If you want to install it on a simple toy network, just assign a static ip address on every linux computer and set up 
+an ssh connection with no password (see for instance http://linuxproblem.org/art_9.html ) and then install openmpi and octave always with the same versions and with the same info on .bashrc for the same user.
+
+After this type in a terminal mpiCC --showme
+In my case I will have something like
+
+g++ -I/home/user/openmpi-1.3.3/include -pthread -L/home/user/openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -Wl,--export-dynamic -lnsl -lutil -lm -ldl
+
+This will be useful for mkoctfile
+for instance for MPI_Init.cc we shall have
+mkoctfile -I/home/user/openmpi-1.3.3/include -lpthread -L/home/user/openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -lnsl -lutil -lm -ldl MPI_Init.cc
+
+
+
+The m files just contain some very simple examples
+More complex examples will be provided in the next future.
+See also
+http://static.msi.umn.edu/tutorial/scicomp/general/MPI/content6.html
+to understand the logic of MPI Derived Datatypes and how could they be easily handled by openmpi_ext package.
+Bests regards
+Riccardo Corradini
+
+
+
--- a/main/openmpi_ext/doc/README	Wed Aug 29 02:56:16 2012 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-The code is general-purpose, but  I would like to use it for econometrics.
-So the first step will be to install the following tarball from
-http://www.open-mpi.org/software/ompi/v1.3/downloads/openmpi-1.3.3.tar.bz2
-possibly in a multi-core computer to run my simple examples
-and configure it for instance in the following way (/home/user is your $HOME)
-./configure --enable-mpirun-prefix-by-default --enable-heterogeneous --prefix=/home/user/openmpi-1.3.3/ --enable-static
-
-and modify .bashrc in your home
- OMPIBIN=`$ompi_info -path     bindir  -parsable | cut -d: -f3`
- OMPILIB=`$ompi_info -path     libdir  -parsable | cut -d: -f3`
- OMPISCD=`$ompi_info -path sysconfdir  -parsable | cut -d: -f3`
-
-
-
-export            PATH=$OMPIBIN:$PATH
-
-export LD_LIBRARY_PATH=:$OMPILIB:$LD_LIBRARY_PATH
-
-unset  ompi_info OMPIBIN OMPILIB OMPISCD 
-
-If you want to install it on a simple toy network, just assign a static ip address on every linux computer and set up 
-an ssh connection with no password (see for instance http://linuxproblem.org/art_9.html ) and then install openmpi and octave always with the same versions and with the same info on .bashrc for the same user.
-
-After this type in a terminal mpiCC --showme
-In my case I will have something like
-
-g++ -I/home/user/openmpi-1.3.3/include -pthread -L/home/user/openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -Wl,--export-dynamic -lnsl -lutil -lm -ldl
-
-This will be useful for mkoctfile
-for instance for MPI_Init.cc we shall have
-mkoctfile -I/home/user/openmpi-1.3.3/include -lpthread -L/home/user/openmpi-1.3.3/lib -lmpi_cxx -lmpi -lopen-rte -lopen-pal -ldl -lnsl -lutil -lm -ldl MPI_Init.cc
-
-
-
-The m files just contain some very simple examples
-More complex examples will be provided in the next future.
-See also
-http://static.msi.umn.edu/tutorial/scicomp/general/MPI/content6.html
-to understand the logic of MPI Derived Datatypes and how could they be easily handled by openmpi_ext package.
-Bests regards
-Riccardo Corradini
-
-
-
--- a/main/openmpi_ext/inst/Pi.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/Pi.m	Wed Aug 29 16:28:49 2012 +0000
@@ -14,98 +14,113 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# Please add the oct files openmpi_ext folder 
-# For instance addpath("../src");
-# mpirun -np 5 octave -q --eval "Pi(2E7,'s')"
-
-function results = Pi(N,mod)
-addpath("../src");
-# Pi:	Classic PI computation by numeric integration of arctan'(x) in [0..1]
-#
-#	Pi [ ( N [ ,mod ] ) ]
-#
-#  N	[1E7]	#subdivisions of the [0, 1] interval
-#  mod	['s']	communication modality:  (s)end (r)educe
-#
-#  printed results struct contains
-#	pi	estimated pi value
-#	err	error
-#	time	from argument xmit to pi computed
-#
-	
+## -*- texinfo -*-
+## @deftypefn {Function File} {[@var{result}]} = Pi ()
+## Classical MPI example that computes @var{PI} by integrating arctan'(x) in [0,1].
+##  @var{N} [1e7] #subdivisions of the [0, 1] interval.
+##  @var{mod} ['s'] communication modality:  (s)end (r)educe.
+##  @var{results} struct contains
+##
+##  @itemize @minus
+##      @item @var{pi}: estimated pi value
+##      @item @var{err}: error
+##      @item @var{time}: from argument xmit to pi computed
+## @end itemize
+##
+## To run this example, set the variables HOSTFILE and NUMBER_OF_MPI_NODES to appropriate values, 
+## then type the following command in your shell:
+## @example
+## mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave --eval 'pkg load openmpi_ext; Pi ()'
+## @end example
+## @seealso{hello2dimmat,helloworld,hellocell,hellosparsemat,mc_example,montecarlo,hellostruct} 
+## @end deftypefn
 
-##########
-# ArgChk #
-##########
-if nargin<1,	N=1E7;	end
-if nargin<2,  mod='s';	end
-if nargin>2,	usage("Pi(N,mod)"); end		# let all ranks complain
-flag=0;						# code much simpler
-flag=flag || ~isscalar(N) || ~isnumeric(N);
-flag=flag  |   fix(N)~=N   |           N<1;
-		   mod=lower(mod); mods='sr';
-flag=flag  | isempty(findstr(mod,  mods));	# let them all error out
-if flag,	usage("Pi( <int> N>0, <char> mod=='s|r' )"); end
+function results = Pi (N, mod)
+        
+  ############
+  ## ArgChk ##
+  ############
+  if (nargin < 1)
+    N = 1E7;
+  end
+  if (nargin < 2)
+    mod = 's';
+  end
+  if (nargin > 2)
+    print_usage (); # let all ranks complain
+  end 
+  flag = 0; # code much simpler
+  flag = flag || ~isscalar (N) || ~isnumeric (N);
+  flag = flag || fix(N) ~= N || N < 1;
+  mod = lower (mod); mods = 'sr';
+  flag = flag || isempty (findstr (mod, mods)); 
+  if (flag)
+    print_usage (); # let them all error out
+  end
 
-##################
-# Results struct #
-##################
-results.pi   =0;
-results.err  =0;
-results.time =0;
+  ####################
+  ## Results struct ##
+  ####################
+  results.pi = 0;
+  results.err = 0;
+  results.time = 0;
 
+  ####################################################################
+  ## PARALLEL: initialization, include MPI_Init time in measurement  ##
+  ####################################################################
 
-############
-# PARALLEL # initialization, include MPI_Init time in measurement
-############
   T=clock; #
-############
-   MPI_ANY_SOURCE = -1;
-   MPI_Init();	
-   MPI_COMM_WORLD = MPI_Comm_Load("NEWORLD");		
-   rnk   =	MPI_Comm_rank (MPI_COMM_WORLD);	# let it abort if it fails
-   siz   =	MPI_Comm_size (MPI_COMM_WORLD);
+  ############
+  MPI_ANY_SOURCE = -1;
+  MPI_Init ();  
+  MPI_COMM_WORLD = MPI_Comm_Load ("NEWORLD");           
+  rnk   = MPI_Comm_rank (MPI_COMM_WORLD); # let it abort if it fails
+  siz   = MPI_Comm_size (MPI_COMM_WORLD);
+
+  SLV = logical(rnk);                 # handy shortcuts, master is rank 0
+  MST = ~ SLV;                        # slaves are all other
 
-    SLV = logical(rnk);			# handy shortcuts, master is rank 0
-    MST = ~ SLV;			# slaves are all other
+  #####################################################
+  ## PARALLEL: computation (depends on rank/size)    ##
+  #####################################################
 
-############
-# PARALLEL # computation (depends on rank/size)
-############			# vectorized code, equivalent to
-  width=1/N; lsum=0;		# for i=rnk:siz:N-1
-  i=rnk:siz:N-1;		#   x=(i+0.5)*width;
-  x=(i+0.5)*width;		#   lsum=lsum+4/(1+x^2);
-  lsum=sum(4./(1+x.^2));	# end
+  width=1/N; lsum=0;            # for i=rnk:siz:N-1
+  i=rnk:siz:N-1;                #   x=(i+0.5)*width;
+  x=(i+0.5)*width;              #   lsum=lsum+4/(1+x^2);
+  lsum=sum(4./(1+x.^2));        # end
+
+  #####################################
+  ## PARALLEL: reduction and finish  ##
+  #####################################
 
-############
-# PARALLEL # reduction and finish
-############
-switch mod
-  case 's',			TAG=7;	# Any tag would do
-    if SLV				# All slaves send result back
-	MPI_Send(lsum,             0,TAG,MPI_COMM_WORLD);
-    else				# Here at master
-	    Sum =lsum;			# save local result
-      for slv=1:siz-1			# collect in any order
-	    lsum = MPI_Recv(MPI_ANY_SOURCE,TAG,MPI_COMM_WORLD);
-	    Sum+=lsum;			# and accumulate
-      end				# order: slv or MPI_ANY_SOURCE
-    end
-  case 'r',
-        disp("not yet implemented");
-#	Sum=0;		
-# reduction master = rank 0 @ WORLD
-#       MPI_Reduce(lsum,Sum, MPI_SUM,  0,MPI_COMM_WORLD);
-end
+  switch mod
+    case 's',                     
+      TAG=7;                              # Any tag would do
+      if SLV                              # All slaves send result back
+        MPI_Send (lsum, 0, TAG, MPI_COMM_WORLD);
+      else                                # Here at master
+        Sum =lsum;                        # save local result
+        for slv=1:siz-1                   # collect in any order
+          lsum = MPI_Recv (MPI_ANY_SOURCE, TAG, MPI_COMM_WORLD);
+          Sum += lsum;                    # and accumulate
+        endfor                            # order: slv or MPI_ANY_SOURCE
+      endif
+    case 'r',
+      disp ("not yet implemented");
+  endswitch
 
-MPI_Finalize();
+  MPI_Finalize ();
 
-if MST
-    Sum      = Sum/N ; 			# better at end: don't loose resolution
-#################################	# stopwatch measurement
-results.time = etime(clock,T);  #	# but only at master after PI computed
-#################################	# all them started T=clock;
-results.err  = Sum-pi;
-results.pi   = Sum # ;
+  if MST
+    Sum      = Sum/N ;                      # better at end: don't loose resolution
+    #################################       # stopwatch measurement
+    results.time = etime(clock,T);  #       # but only at master after PI computed
+    #################################       # all them started T=clock;
+    results.err  = Sum - pi;
+    results.pi   = Sum # ;
+  endif
 
-end 
+endfunction
+
+%!demo
+%! system ("mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave -q --eval 'pkg load openmpi_ext; Pi ()'");
--- a/main/openmpi_ext/inst/hello2dimmat.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/hello2dimmat.m	Wed Aug 29 16:28:49 2012 +0000
@@ -1,4 +1,5 @@
 ## Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
+## Copyright (C) 2012 Carlo de Falco
 ##
 ## This program is free software; you can redistribute it and/or modify it under
 ## the terms of the GNU General Public License as published by the Free Software
@@ -13,39 +14,53 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# Please add the oct files openmpi_ext folder 
-# For instance 
-  addpath("../src");
-  MPI_SUCCESS =0;
-  MPI_Init();
+## -*- texinfo -*-
+## @deftypefn {Function File} {} = hello2dimmat ()
+## This function demonstrates sending and receiving of a 2-dimensional matrix over MPI.
+## Each process in the pool will create a random 90x90 matrix and send it to process with rank 0.
+## To run this example, set the variables HOSTFILE and NUMBER_OF_MPI_NODES to appropriate values, 
+## then type the following command in your shell:
+## @example
+## mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave --eval 'pkg load openmpi_ext; hello2dimmat ()'
+## @end example
+## @seealso{hellocell,hellosparsemat,hellostruct,helloworld,mc_example,montecarlo,Pi} 
+## @end deftypefn
 
-  # the string NEWORLD is just a label could be whatever you want    
-  CW = MPI_Comm_Load("NEWORLD");
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
+function hello2dimmat ()
+
+  MPI_SUCCESS = 0;
+  MPI_Init ();
+  
+  ## the string NEWORLD is just a label could be whatever you want    
+  CW = MPI_Comm_Load ("NEWORLD");
+  my_rank = MPI_Comm_rank (CW);
+  p = MPI_Comm_size (CW);
   mytag = 48;
-
-
- 
+  
   if (my_rank != 0)
-#        Generate a random matrix
-       message=rand(90,90);
-#        load message
-#       rankvect is the vector containing the list of rank  destination process
-     rankvect = 0;
-     [info] = MPI_Send(message,rankvect,mytag,CW);
+    ## Generate a random matrix
+    message = rand (90, 90);
+    ## load message
+    ## rankvect is the vector containing the list of rank  destination process
+    rankvect = 0;
+    [info] = MPI_Send (message, rankvect, mytag, CW);
   else
-        for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [messager, info] = MPI_Recv(source,mytag,CW);
-          
-#	You could also save each result and make comparisons if you don't trust MPI
-          disp("Rank 0 is the master receiving ... :");
-            if (info == MPI_SUCCESS)
-              disp('OK!');
-          endif
-          endfor
-  end   
+    for source = 1:p-1
+      disp ("We are at rank 0 that is master etc..");
+      [messager, info] = MPI_Recv (source, mytag, CW);
+      
+      ## You could also save each result and make comparisons if you don't trust MPI
+      disp ("Rank 0 is the master receiving ... :");
+      if (info == MPI_SUCCESS)
+        disp ('OK!');
+      endif
+    endfor
+  endif   
 
+  MPI_Finalize ();
 
-   MPI_Finalize();
+endfunction
+
+%!demo
+%! system ("mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave -q --eval 'pkg load openmpi_ext; hello2dimmat ()'");
+
--- a/main/openmpi_ext/inst/hellocell.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/hellocell.m	Wed Aug 29 16:28:49 2012 +0000
@@ -1,4 +1,5 @@
 ## Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
+## Copyright (C) 2012 Carlo de Falco
 ##
 ## This program is free software; you can redistribute it and/or modify it under
 ## the terms of the GNU General Public License as published by the Free Software
@@ -13,41 +14,49 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# Please add the oct files openmpi_ext folder 
-# For instance 
-addpath("../src");
-# if you have 4 cores or a network of 4 computers with a ssh connection with no password and same openmpi 1.3.3 installation
-# type at the terminal mpirun -np 4 octave --eval hellocell
-
+## -*- texinfo -*-
+## @deftypefn {Function File} {} = hellocell ()
+## This function demonstrates sending and receiving a string message over MPI.
+## Each process will send a message to process with rank 0, which will then display it.
+## To run this example, set the variables HOSTFILE and NUMBER_OF_MPI_NODES to appropriate values, 
+## then type the following command in your shell:
+## @example
+## mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave --eval 'pkg load openmpi_ext; hellocell ()'
+## @end example
+## @seealso{hello2dimmat,helloworld,hellosparsemat,hellostruct,mc_example,montecarlo,Pi} 
+## @end deftypefn
 
-   MPI_Init();
-   # the string NEWORLD is just a label could be whatever you want 
-   CW = MPI_Comm_Load("NEWORLD");
+function hellocell ()
 
-   
+  MPI_Init ();
+  ## the string NEWORLD is just a label could be whatever you want 
+  CW = MPI_Comm_Load ("NEWORLD");
 
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  # TAG is very important to identify the message
+  my_rank = MPI_Comm_rank (CW);
+  p = MPI_Comm_size (CW);
+
+  ## TAG is very important to identify the message
   TAG = 1;
 
-
-  message="";
   if (my_rank != 0)
-     message = {magic(3) 17 'fred'; ...
-     'AliceBettyCarolDianeEllen' 'yp' 42; ...
-     {1} 2 3};
-     rankvect = 0;
-     [info] = MPI_Send(message,rankvect,TAG,CW);
+    message = {magic(3) 17 'fred'; ...
+               'AliceBettyCarolDianeEllen' 'yp' 42; ...
+               {1} 2 3};
+    rankvect = 0;
+    [info] = MPI_Send (message, rankvect, TAG, CW);
   else
-        for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [messager, info] = MPI_Recv(source,TAG,CW);
-	  info
-          messager
-        endfor
-  end   
+    for source = 1:p-1
+      disp ("We are at rank 0 that is master etc..");
+      [messagerec, info] = MPI_Recv (source, TAG, CW);
+      info
+      messagerec
+    endfor
+  endif   
 
+  MPI_Finalize ();
+  
+endfunction
 
-  MPI_Finalize();
+%!demo
+%! system ("mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave -q --eval 'pkg load openmpi_ext; hellocell ()'");
 
--- a/main/openmpi_ext/inst/hellosparsemat.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/hellosparsemat.m	Wed Aug 29 16:28:49 2012 +0000
@@ -1,4 +1,5 @@
 ## Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
+## Copyright (C) 2012 Carlo de Falco
 ##
 ## This program is free software; you can redistribute it and/or modify it under
 ## the terms of the GNU General Public License as published by the Free Software
@@ -13,53 +14,71 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# Please add the oct files openmpi_ext folder 
-# For instance 
-  addpath("../src");
+## -*- texinfo -*-
+## @deftypefn {Function File} {} = hellosparsemat ()
+## This function demonstrates sending and receiving a sparse matrix over MPI.
+## Each process will send a a sparse matrix to process with rank 0, which will then display it.
+## To run this example, set the variables HOSTFILE and NUMBER_OF_MPI_NODES to appropriate values, 
+## then type the following command in your shell:
+## @example
+## mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave --eval 'pkg load openmpi_ext; hellosparsemat ()'
+## @end example
+## @seealso{hello2dimmat,helloworld,hellocell,hellostruct,mc_example,montecarlo,Pi} 
+## @end deftypefn
+
+function hellosparsemat ()
+
   MPI_Init();
-  # the string NEWORLD is just a label could be whatever you want  
-  CW = MPI_Comm_Load("NEWORLD");
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-# tag[0] ----> type of octave_value
-# tag[1] ----> array of three elements 1) num of rows 2) number of columns 3) number of non zero elements
-# tag[2] ---->  vector of rowindex
-# tag[3] ---->  vector of columnindex
-# tag[4] ---->  vector of  non zero elements
-# These tags will be generated after mytag by the MPI_Send and MPI_Recv (see source code)
+  ## the string NEWORLD is just a label could be whatever you want  
+  CW = MPI_Comm_Load ("NEWORLD");
+  my_rank = MPI_Comm_rank (CW);
+  p = MPI_Comm_size (CW);
 
+  ## tag[0] ----> type of octave_value
+  ## tag[1] ----> array of three elements 1) num of rows 2) number of columns 3) number of non zero elements
+  ## tag[2] ---->  vector of rowindex
+  ## tag[3] ---->  vector of columnindex
+  ## tag[4] ---->  vector of  non zero elements
+  ## These tags will be generated after mytag by the MPI_Send and MPI_Recv (see source code)
   mytag = 48;
-
-
-
+  
+  ## This is just to fill the sparse matrix
+  M = 5;
+  N = 5;
+  D = 0.9;
 
-# This is just to fill the sparse matrix
-  M=5;
-  N=5;
-  D=0.9;
-    message = sprand (M, N, D);
-#  load message
- 
+  for one_by_one = p-1:-1:0 ## work one cpu at a time to make the output readable
 
- 
-  if (my_rank != 0)
+    if (my_rank != 0)
+      
+      message = sprand (M, N, D);
       dest = 0;
-#       rankvect is the vector containing the list of rank  destination process
-     rankvect(1,1) = 0;
-     [info] = MPI_Send(message,rankvect,mytag,CW);
-     disp("This is flag for sending the message --")
-     info
-  else
-        for source = 1:p-1
-          messager='';
-          disp("We are at rank 0 that is master etc..");
-          [messager, info] = MPI_Recv(source,mytag,CW);
-          disp("Rank 0 is the master receiving ... :");
-          if (messager/message)
-                disp('OK!');
-          endif
-      messager
-          endfor
-  end   
+      info = MPI_Send (message, dest, mytag, CW);
+      printf ("on rank %d MPI_Send returned the following error code (0 = Success)\n", my_rank)
+      info
+
+    else
 
-   MPI_Finalize();
+      for source = 1:p-1
+        
+        [messager, info] = MPI_Recv (source, mytag, CW);
+        
+        printf ("MPI_Recv returned the following error code (0 = Success) while receving from rank %d\n", source)
+        info
+      
+        printf ("This is the matrix received from rank %d: \n", source);
+        full (messager)
+
+      endfor
+      
+    endif   
+    
+    MPI_Barrier (CW);
+  endfor
+  
+  MPI_Finalize ();
+  
+endfunction
+
+%!demo
+%! system ("mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave -q --eval 'pkg load openmpi_ext; hellosparsemat ()'");
--- a/main/openmpi_ext/inst/hellostruct.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/hellostruct.m	Wed Aug 29 16:28:49 2012 +0000
@@ -1,4 +1,5 @@
 ## Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
+## Copyright (C) 2012 Carlo de Falco
 ##
 ## This program is free software; you can redistribute it and/or modify it under
 ## the terms of the GNU General Public License as published by the Free Software
@@ -13,37 +14,46 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# Please add the oct files openmpi_ext folder 
-# For instance 
-addpath("../src");
-# if you have 4 cores or a network of 4 computers with a ssh connection with no password and same openmpi 1.3.3 installation
-# type at the terminal mpirun -np 4 octave --eval hellostruct
-
+## -*- texinfo -*-
+## @deftypefn {Function File} {} = hellostruct ()
+## This function demonstrates sending and receiving a struct over MPI.
+## Each process will send a a struct to process with rank 0, which will then display it.
+## To run this example, set the variables HOSTFILE and NUMBER_OF_MPI_NODES to appropriate values, 
+## then type the following command in your shell:
+## @example
+## mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave --eval 'pkg load openmpi_ext; hellostruct ()'
+## @end example
+## @seealso{hello2dimmat,helloworld,hellocell,hellosparsemat,mc_example,montecarlo,Pi} 
+## @end deftypefn
 
-  MPI_Init();
-  # the string NEWORLD is just a label could be whatever you want 
-  CW = MPI_Comm_Load("NEWORLD");
+function hellostruct ()
 
-   
+  MPI_Init ();
+  ## the string NEWORLD is just a label could be whatever you want 
+  CW = MPI_Comm_Load ("NEWORLD");
 
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  # TAG is very important to identify the message
+  my_rank = MPI_Comm_rank (CW);
+  p = MPI_Comm_size (CW);
+
+  ## TAG is very important to identify the message
   TAG = 1;
 
-
-  message="";
   if (my_rank != 0)
-     message = struct('f1', {1 3; 2 4}, 'f2', 25);
-     # Could be a vector containing the list of ranks identifiers; 
-     rankvect = 0;
-     [info] = MPI_Send(message,rankvect,TAG,CW);
+    message = struct ("f1", {1 3; 2 4}, "f2", 25);
+    ## Could be a vector containing the list of ranks identifiers; 
+    rankvect = 0
+    info = MPI_Send (message, rankvect, TAG, CW);
   else
-        for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [message, info] = MPI_Recv(source,TAG,CW);
-          message
-        endfor
-  end   
-  MPI_Finalize();
+    for source = 1:p-1
+      disp ("We are at rank 0 that is master etc..");
+      [messagerec, info] = MPI_Recv (source, TAG, CW);
+      messagerec
+    endfor
+  endif
 
+  MPI_Finalize ();
+
+endfunction
+
+%!demo
+%! system ("mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave -q --eval 'pkg load openmpi_ext; hellostruct ()'");
--- a/main/openmpi_ext/inst/helloworld.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/helloworld.m	Wed Aug 29 16:28:49 2012 +0000
@@ -1,4 +1,5 @@
 ## Copyright (C) 2009 Riccardo Corradini <riccardocorradini@yahoo.it>
+## Copyright (C) 2012 Carlo de Falco
 ##
 ## This program is free software; you can redistribute it and/or modify it under
 ## the terms of the GNU General Public License as published by the Free Software
@@ -13,38 +14,48 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# Please add the oct files openmpi_ext folder 
-# For instance 
-addpath('../src');
-# if you have 4 cores or a network of 4 computers with a ssh connection with no password and same openmpi 1.3.3 installation
-# type at the terminal mpirun -np 4 octave --eval helloworld
+## -*- texinfo -*-
+## @deftypefn {Function File} {} = helloworld ()
+## This function demonstrates sending and receiving a string message over MPI.
+## Each process will send a message to process with rank 0, which will then display it.
+## To run this example, set the variables HOSTFILE and NUMBER_OF_MPI_NODES to appropriate values, 
+## then type the following command in your shell:
+## @example
+## mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave --eval 'pkg load openmpi_ext; helloworld ()'
+## @end example
+## @seealso{hello2dimmat,hellocell,hellosparsemat,hellostruct,mc_example,montecarlo,Pi} 
+## @end deftypefn
 
-
-   MPI_Init();
-   # the string NEWORLD is just a label could be whatever you want 
-   CW = MPI_Comm_Load("NEWORLD");
-
-   
+function helloworld ()
 
-  my_rank = MPI_Comm_rank(CW);
-  p = MPI_Comm_size(CW);
-  # Could be any number
-  TAG=1;
+  MPI_Init();
+  ## the string NEWORLD is just a label could be whatever you want 
+  CW = MPI_Comm_Load("NEWORLD");
+  
+  my_rank = MPI_Comm_rank (CW);
+  p = MPI_Comm_size (CW);
 
+  ## Could be any number
+  TAG = 1;
 
-  message="";
+  message = "";
   if (my_rank != 0)
-      message = sprintf('Greetings from process: %d!',my_rank);
-      # rankvect is the vector containing the list of rank  destination process
-      rankvect = 0;
-      [info] = MPI_Send(message,rankvect,TAG,CW);
+    message = sprintf ("Greetings from process: %d!", my_rank);
+    ## rankvect is the vector containing the list of rank of destination process
+    rankvect = 0;
+    [info] = MPI_Send (message, rankvect, TAG, CW);    
   else
-      for source = 1:p-1
-          disp("We are at rank 0 that is master etc..");
-          [message, info] = MPI_Recv(source,TAG,CW);
-          printf('%s\n', message);
-      endfor
-  end   
-
+    for source = 1:p-1
+      disp ("We are at rank 0 that is master etc..");
+      [message, info] = MPI_Recv (source, TAG, CW);
+      printf ("%s\n", message);
+    endfor
+  endif
+  
   MPI_Finalize();
 
+endfunction
+
+%!demo
+%! system ("mpirun --hostfile $HOSTFILE -np $NUMBER_OF_MPI_NODES octave -q --eval 'pkg load openmpi_ext; helloworld ()'");
+
--- a/main/openmpi_ext/inst/mc_example.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/mc_example.m	Wed Aug 29 16:28:49 2012 +0000
@@ -13,35 +13,44 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# mc_example: shows how Monte Carlo can be done using mpi, Does Monte
-# Carlo on the OLS estimator. Uses montecarlo.m
-#
-# USAGE: from the command prompt, not the octave prompt, execute
-# orterun -np 3 octave --eval mc_example
+## -*- texinfo -*-
+## @deftypefn {Function File} {} = mc_example ()
+## Demonstrates doing Monte Carlo with mpi.
+## Does Monte Carlo on the OLS estimator. Uses montecarlo.m
+## @seealso{hello2dimmat,helloworld,hellocell,hellosparsemat,Pi,montecarlo,hellostruct} 
+## @end deftypefn
+
+function mc_example ()
+
+  n = 30;
+  theta = [1;1];
 
-1;
-function betahat = olswrapper(args)
-	n = args{1};
-	theta = args{2};
-	x = [ones(n,1) randn(n,1)];
-	y = x*theta + randn(n,1);
-	betahat = ols(y,x);
-  	betahat = betahat';
+  reps = 1000;
+  f = "olswrapper";
+  args = {n, theta};
+  outfile = "mc_output";
+  n_pooled = 10;
+  verbose = true;
+
+  ## montecarlo(f, args, reps, outfile, n_pooled, false, verbose);
+
+  if not (MPI_Initialized ()) 
+    MPI_Init (); 
+  endif
+  
+  montecarlo (f, args, reps, outfile, n_pooled, verbose);
+  if not (MPI_Finalized) 
+    MPI_Finalize; 
+  endif
+
 endfunction
 
-
-n = 30;
-theta = [1;1];
+function betahat = olswrapper (args)
+  n = args{1};
+  theta = args{2};
+  x = [ones(n,1) randn(n,1)];
+  y = x*theta + randn(n,1);
+  betahat = ols(y,x);
+  betahat = betahat';
+endfunction
 
-reps = 1000;
-f = "olswrapper";
-args = {n, theta};
-outfile = "mc_output";
-n_pooled = 10;
-verbose = true;
-
-# montecarlo(f, args, reps, outfile, n_pooled, false, verbose);
-
-if not(MPI_Initialized) MPI_Init; endif
-montecarlo(f, args, reps, outfile, n_pooled, verbose);
-if not(MPI_Finalized) MPI_Finalize; endif
--- a/main/openmpi_ext/inst/montecarlo.m	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/inst/montecarlo.m	Wed Aug 29 16:28:49 2012 +0000
@@ -13,124 +13,129 @@
 ## You should have received a copy of the GNU General Public License along with
 ## this program; if not, see <http://www.gnu.org/licenses/>.
 
-# montecarlo.m: generates a specified number of replications of a function's
-# output and writes them to a user-specified output file.
-#
-# USAGE: montecarlo(f,f_args,reps,outfile,n_pooled,n_returns,usempi, verbose)
-#
-# IMPORTANT: f should return a row vector of output from feval(f,f_args)
-#
-# For normal evaluation on one core, only the first 4 arguments are required.
-# * Arg 1: (required) the function that generates a row vector of output
-# * Arg 2: (required) the arguments of the function, in a cell
-# * Arg 3: (required) the number of replications to generate
-# * Arg 4: (required) the output file name
-# * Arg 5 (optional) number of replications to be pooled together between writes
-# * Arg 6 (optional) verbose: 1 for on, 0 for off
-#
-# If using MPI, you should run using ranks equal to number of cores plus 1,
-# and should make sure that the core running the frontend is also the one that
-# has the second rank. That way the core the frontend is on will also do work.
+## -*- texinfo -*-
+## @deftypefn {Function File} {[@var{n_received}]} = @
+## montecarlo (@var{f}, @var{f_args}, @var{reps}, @var{outfile}, @var{n_pooled}, @var{n_returns}, @var{usempi}, @var{verbose})
+## Generate a specified number of replications of a function's
+## output and write them to a user-specified output file.
+##
+## IMPORTANT: @var{f} should return a row vector of output from feval (f, f_args)
+##
+## For normal evaluation on one core, only the first 4 arguments are required.
+##
+## @itemize @minus
+## @item Arg 1: (required) the function that generates a row vector of output
+## @item Arg 2: (required) the arguments of the function, in a cell
+## @item Arg 3: (required) the number of replications to generate
+## @item Arg 4: (required) the output file name
+## @item Arg 5 (optional) number of replications to be pooled together between writes
+## @item Arg 6 (optional) verbose: 1 for on, 0 for off
+## @end itemize
+##
+## If using MPI, you should run using ranks equal to number of cores plus 1,
+## and should make sure that the core running the frontend is also the one that
+## has the second rank. That way the core the frontend is on will also do work.
+## @end deftypefn
 
-function n_received = montecarlo(f,f_args,reps,outfile,n_pooled,verbose)
+function n_received = montecarlo (f, f_args, reps, outfile, n_pooled, verbose)
 
-	t0 = clock(); # initialize timing
+  t0 = clock(); # initialize timing
 
-	# defaults for optional arguments
-	if (nargin < 6) verbose = false; endif
-	if (nargin < 5)	n_pooled = 1; endif;
+  ## defaults for optional arguments
+  if (nargin < 6) verbose = false; endif
+  if (nargin < 5) n_pooled = 1; endif;
 
-	if MPI_Initialized 	# check if doing this parallel or serial
-		use_mpi = true;
-		CW = MPI_Comm_Load("NEWORLD");
-		is_node = MPI_Comm_rank(CW);
-		nodes = MPI_Comm_size(CW);
-		mytag = 48;
-	else
-		use_mpi = false;
-		is_node = 0;
-	endif
+  if MPI_Initialized      # check if doing this parallel or serial
+    use_mpi = true;
+    CW = MPI_Comm_Load("NEWORLD");
+    is_node = MPI_Comm_rank(CW);
+    nodes = MPI_Comm_size(CW);
+    mytag = 48;
+  else
+    use_mpi = false;
+    is_node = 0;
+  endif
 
-	if is_node # compute nodes
-		more_please = 1;
-		while more_please
-			for i = 1:n_pooled
-				contrib = feval(f, f_args);
-				contribs(i,:) = contrib;
-			endfor
-			MPI_Send(contribs, 0, mytag, CW);
-			pause(0.05); # give time for the fronted to send a stop message, if done
-			# check if we're done
-			if (MPI_Iprobe(0, is_node, CW)) # check for ping from rank 0
-				junk = MPI_Recv(0, is_node, CW);
-				break;
-			endif
-		endwhile
-	else # frontend
-		received = 0;
-		done = false;
-		while received < reps
-			if use_mpi
-				# retrieve results from compute nodes
-				for i = 1:nodes-1
-					# compute nodes have results yet?
-					ready = false;
-					ready = MPI_Iprobe(i, mytag, CW); # check if message pending
-					if ready
-						# get it if it's there
-						contribs = MPI_Recv(i, mytag, CW);
-						need = reps - received;
-						received = received + n_pooled;
-						# truncate?
-						if n_pooled  >= need
-								contribs = contribs(1:need,:);
-								done = true;
-						endif
-						# write to output file
-						FN = fopen (outfile, "a");
-						if (FN < 0) error ("montecarlo: couldn't open output file %s", outfile); endif
-						t = etime(clock(), t0);
-						for j = 1:rows(contribs)
-							fprintf(FN, "%f ", i, t, contribs(j,:));
-							fprintf(FN, "\n");
-						endfor
-						fclose(FN);
-						if verbose printf("\nContribution received from node%d.  Received so far: %d\n", i, received); endif
-						if done
-							# tell compute nodes to stop loop
-							for j = 1:5
-								for i = 1:(nodes-1)
-									if (j==1) MPI_Send(" ",i,i,CW); endif # send out message to stop
-									ready = MPI_Iprobe(i, mytag, CW); # get last messages
-									if ready contribs = MPI_Recv(i, mytag, CW); endif
-								endfor
-							endfor
-							break;
-						endif	
-					endif
-				endfor
-			else
-				for i = 1:n_pooled
-					contrib = feval(f, f_args);
-					contribs(i,:) = contrib;
-				endfor
-				need = reps - received;
-				received = received + n_pooled;
-				# truncate?
-				if n_pooled  >= need
-					contribs = contribs(1:need,:);
-				endif
-				# write to output file
-				FN = fopen (outfile, "a");
-				if (FN < 0) error ("montecarlo: couldn't open output file %s", outfile); endif
-				t = etime(clock(), t0);
-				for j = 1:rows(contribs)
-					fprintf(FN, "%f ", 0, t, contribs(j,:));
-					fprintf(FN, "\n");
-				endfor
-				fclose(FN);
-				if verbose printf("\nContribution received from node 0.  Received so far: %d\n", received); endif
-			endif
-		endwhile
-	endif
+  if is_node # compute nodes
+    more_please = 1;
+    while more_please
+      for i = 1:n_pooled
+        contrib = feval(f, f_args);
+        contribs(i,:) = contrib;
+      endfor
+      MPI_Send(contribs, 0, mytag, CW);
+      pause(0.05); # give time for the fronted to send a stop message, if done
+                   # check if we're done
+      if (MPI_Iprobe(0, is_node, CW)) # check for ping from rank 0
+        junk = MPI_Recv(0, is_node, CW);
+        break;
+      endif
+    endwhile
+  else # frontend
+    received = 0;
+    done = false;
+    while received < reps
+      if use_mpi
+        ## retrieve results from compute nodes
+        for i = 1:nodes-1
+          ## compute nodes have results yet?
+          ready = false;
+          ready = MPI_Iprobe(i, mytag, CW); # check if message pending
+          if ready
+            ## get it if it's there
+            contribs = MPI_Recv(i, mytag, CW);
+            need = reps - received;
+            received = received + n_pooled;
+            ## truncate?
+            if n_pooled  >= need
+              contribs = contribs(1:need,:);
+              done = true;
+            endif
+            ## write to output file
+            FN = fopen (outfile, "a");
+            if (FN < 0) error ("montecarlo: couldn't open output file %s", outfile); endif
+            t = etime(clock(), t0);
+            for j = 1:rows(contribs)
+              fprintf(FN, "%f ", i, t, contribs(j,:));
+              fprintf(FN, "\n");
+            endfor
+            fclose(FN);
+            if verbose printf("\nContribution received from node%d.  Received so far: %d\n", i, received); endif
+            if done
+              ## tell compute nodes to stop loop
+              for j = 1:5
+                for i = 1:(nodes-1)
+                  if (j==1) MPI_Send(" ",i,i,CW); endif # send out message to stop
+                  ready = MPI_Iprobe(i, mytag, CW); # get last messages
+                  if ready contribs = MPI_Recv(i, mytag, CW); endif
+                endfor
+              endfor
+              break;
+            endif   
+          endif
+        endfor
+      else
+        for i = 1:n_pooled
+          contrib = feval(f, f_args);
+          contribs(i,:) = contrib;
+        endfor
+        need = reps - received;
+        received = received + n_pooled;
+        ## truncate?
+        if n_pooled  >= need
+          contribs = contribs(1:need,:);
+        endif
+        ## write to output file
+        FN = fopen (outfile, "a");
+        if (FN < 0) error ("montecarlo: couldn't open output file %s", outfile); endif
+        t = etime(clock(), t0);
+        for j = 1:rows(contribs)
+          fprintf(FN, "%f ", 0, t, contribs(j,:));
+          fprintf(FN, "\n");
+        endfor
+        fclose(FN);
+        if verbose printf("\nContribution received from node 0.  Received so far: %d\n", received); endif
+      endif
+    endwhile
+  endif
 endfunction
--- a/main/openmpi_ext/src/MPI_Barrier.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Barrier.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -27,22 +27,22 @@
 
 DEFUN_DLD (NAME, args, ,
 "-*- texinfo -*-\n\
-@deftypefn {Loadable Function} {} @var{INFO} = MPI_Barrier (@var{COMM})\n \
-Blocks until all processes in the communicator have reached this routine.\n \
-If @var{COMM} octave comunicator object loaded with MPI_Comm_Load is omitted \n \
-returns an error. \n                                                    \
- @example\n                                                             \
- @group\n                                                               \
-    @var{INFO} (int) return code\n                                      \
-       0 MPI_SUCCESS    No error\n                                      \
-       5 MPI_ERR_COMM   Invalid communicator (NULL?)\n                  \
-      13 MPI_ERR_ARG    Invalid argument (typically a NULL pointer?)\n  \
-@end group\n                                                            \
-@end example\n                                                          \
+@deftypefn {Loadable Function} {} @var{INFO} = MPI_Barrier (@var{COMM})\n\
+Blocks until all processes in the communicator have reached this routine.\n\
+If @var{COMM} octave comunicator object loaded with MPI_Comm_Load is omitted \n\
+returns an error. \n\
+ @example\n\
+ @group\n\
+    @var{INFO} (int) return code\n\
+       0 MPI_SUCCESS    No error\n\
+       5 MPI_ERR_COMM   Invalid communicator (NULL?)\n\
+      13 MPI_ERR_ARG    Invalid argument (typically a NULL pointer?)\n\
+@end group\n\
+@end example\n\
 @end deftypefn")
 {
 
-  octave_value results;
+  octave_value_list results;
   int nargin = args.length ();
 
   if (nargin != 1)
@@ -53,7 +53,7 @@
          || args(0).type_id () != simple::static_type_id ())
         {
           error ("MPI_Barrier: Please enter octave comunicator object");
-          results(0) = octave_value(-1);
+          results(0) = octave_value (-1);
         }
       else
         {
@@ -72,14 +72,12 @@
               int my_size;
               int info = MPI_Barrier (comm);
               
-              results = info;
+              results(0) = octave_value (info);
             }
           else
             print_usage ();
         }
     }
-  comm= NULL;
-  /* [info] = MPI_Barrier (comm) */
    
   return results;
 }
--- a/main/openmpi_ext/src/MPI_Comm_Test.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Comm_Test.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -33,12 +33,12 @@
 @end example\n\
 @end deftypefn")
 {
-  octave_value retval;
+  octave_value_list results;
   if(args.length() != 1 
      || args(0).type_id () != simple::static_type_id ())
     {
       print_usage ();
-      results = octave_value (-1);
+      results(0) = octave_value (-1);
     }
   else
     {
@@ -52,7 +52,7 @@
       const simple& b = ((const simple &)rep);
       //octave_stdout << "MPI_Comm_Test has " << b.name_value()  << " output arguments.\n";
       MPI_Comm res = b.comunicator_value ();
-      retval = b.name_value ();
+      results(0) = octave_value (b.name_value ());
     }
-  return retval;
+  return results;
 }
--- a/main/openmpi_ext/src/MPI_Comm_rank.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Comm_rank.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -64,6 +64,7 @@
             {
               int my_rank;
               int info = MPI_Comm_rank (comm, &my_rank);
+              //std::cout << my_rank << std::endl;
               if (nargout > 1)
                 results(1) = info;
 
--- a/main/openmpi_ext/src/MPI_Comm_size.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Comm_size.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -58,8 +58,8 @@
       if ((args.length() != 1 )
           || args(0).type_id () != simple::static_type_id ())
         {		
-          error("MPI_Comm_size: Please enter octave comunicator object");
-          results(0) = octave_value(-1);
+          error ("MPI_Comm_size: Please enter octave comunicator object");
+          results(0) = octave_value (-1);
 	}
       else
         {
@@ -78,7 +78,7 @@
           else
             print_usage ();
         }
-      comm = NULL;
+      // comm = NULL;
       /* [size info] = MPI_Comm_size (comm) */   
     }
   return results;
--- a/main/openmpi_ext/src/MPI_Probe.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Probe.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -87,7 +87,7 @@
           int src = args(0).int_value ();    
           int tag = args(1).int_value ();    
           
-          if (! error_status)
+          if (! error_state)
             {
               MPI_Status stat = {0, 0, 0, 0};
               int info = MPI_Probe (src, tag, comm, &stat);
--- a/main/openmpi_ext/src/MPI_Recv.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Recv.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -349,7 +349,7 @@
           for (octave_idx_type i = 0; i < s[2]; i++)
             {
               m.ridx(i) = sridx[i];
-              m.data(i) = real(LBNDA1[i])+imag(LBNDA2[i]);
+              m.data(i) = real (LBNDA1[i]) + imag (LBNDA2[i]);
             }
           
           ov = m;
--- a/main/openmpi_ext/src/MPI_Send.cc	Wed Aug 29 02:56:16 2012 +0000
+++ b/main/openmpi_ext/src/MPI_Send.cc	Wed Aug 29 16:28:49 2012 +0000
@@ -24,11 +24,8 @@
 /* along the datatype   */
 
 int send_class (MPI_Comm comm, octave_value ov, ColumnVector rankrec, int mytag);       
-
 int send_string (int t_id, MPI_Comm comm, std::string  oi8, ColumnVector rankrec, int mytag);
-
 int send_cell (int t_id, MPI_Comm comm, Cell cell, ColumnVector rankrec, int mytag);
-
 int send_struct (int t_id, MPI_Comm comm, Octave_map map,ColumnVector rankrec, int mytag);
 
 template <class Any>
@@ -38,9 +35,7 @@
 int send_scalar (int t_id, MPI_Datatype TSnd, MPI_Comm comm, Any d, ColumnVector rankrec, int mytag);
 
 int send_range (int t_id, MPI_Comm comm, Range range, ColumnVector rankrec, int mytag);
-
 int send_matrix (int t_id, MPI_Datatype TSnd, MPI_Comm comm, octave_value myOv, ColumnVector rankrec, int mytag);
-
 int send_sp_mat (int t_id, MPI_Datatype TSnd, MPI_Comm comm, octave_value MyOv, ColumnVector rankrec, int mytag);
 
 // template specialization for complex case
@@ -280,6 +275,7 @@
 
 int send_sp_mat (int t_id, MPI_Datatype TSnd, MPI_Comm comm, octave_value MyOv, ColumnVector rankrec, int mytag)
 {
+
   int info;
   OCTAVE_LOCAL_BUFFER(int,tanktag,6);
   tanktag[0] = mytag;
@@ -289,7 +285,7 @@
   tanktag[4] = mytag+4;
   tanktag[5] = mytag+5;
   
-#define __MAKE_TYPE_BRANCH__(TMPI,T1,T2,A1)                             \
+#define __MAKE_TYPE_BRANCH__(TMPI,T0,T1,T2,A1)                          \
   if (TSnd == TMPI and MyOv.T1)                                         \
     {                                                                   \
       OCTAVE_LOCAL_BUFFER(int,s,3);                                     \
@@ -316,7 +312,7 @@
       for (octave_idx_type ix = 0; ix < m.cols () + 1; ix++)            \
         scidx[ix]= m.cidx(ix);                                          \
                                                                         \
-      OCTAVE_LOCAL_BUFFER(bool,sdata,m.capacity ());                    \
+      OCTAVE_LOCAL_BUFFER(T0,sdata,m.capacity ());                      \
                                                                         \
       for (octave_idx_type ix = 0; ix < m.capacity (); ix++)            \
         {                                                               \
@@ -353,8 +349,8 @@
         }                                                               \
     }                                                                   
 
-  __MAKE_TYPE_BRANCH__(MPI_INT,is_bool_type(),SparseBoolMatrix,sparse_bool_matrix_value())
-  else __MAKE_TYPE_BRANCH__(MPI_DOUBLE,is_real_type(),SparseMatrix,sparse_matrix_value())
+  __MAKE_TYPE_BRANCH__(MPI_INT,bool,is_bool_type(),SparseBoolMatrix,sparse_bool_matrix_value())
+  else __MAKE_TYPE_BRANCH__(MPI_DOUBLE,double,is_real_type(),SparseMatrix,sparse_matrix_value())
   else if (TSnd == MPI_DOUBLE and MyOv.is_complex_type ())
     { 
       SparseComplexMatrix m = MyOv.sparse_complex_matrix_value ();
@@ -477,7 +473,7 @@
   // nd for number of dimensions
   // dimvec derived datatype
   // item of cell
-  int n = cell.capacity();
+  int n = cell.capacity ();
   int info;
   int tanktag[5];
   tanktag[0] = mytag;
@@ -502,9 +498,9 @@
   // Now start the big loop
   for (octave_idx_type i = 0; i < rankrec.nelem (); i++)
     {
-      info = MPI_Send (&t_id, 1, MPI_INT, rankrec(i), 
+      info = MPI_Send (&t_id, 1, MPI_INT, int (rankrec(i)),
                        tanktag[0], comm);
-      if (info !=MPI_SUCCESS) 
+      if (info != MPI_SUCCESS) 
         return info;
       info = MPI_Send (&n, 1, MPI_INT, rankrec(i), 
                        tanktag[1], comm);
@@ -519,24 +515,23 @@
                        tanktag[3], comm);
       if (info != MPI_SUCCESS) 
         return info;
+      
+      int cap;
+      // Now focus on every single octave_value
+      for (octave_idx_type j = 0; j < n; j++)
+        {
+          octave_value ov = cell.data ()[j];
+          cap = ov.capacity ();
+          info = MPI_Send (&cap, 1, MPI_INT, rankrec(i), 
+                           newtag, comm);
+          if (info != MPI_SUCCESS) 
+            return info;
+          newtag = newtag + ov.capacity ();
+          info = send_class (comm, ov, rankrec, newtag);
+          if (info != MPI_SUCCESS) 
+            return info;
+        }                                       
     }
-
-  int cap;
-  // Now focus on every single octave_value
-  for (octave_idx_type i=0; i<n; i++)
-    {
-      octave_value ov = cell.data ()[i];
-      cap = ov.capacity ();
-      info = MPI_Send (&cap, 1, MPI_INT, rankrec(i), 
-                       newtag, comm);
-      if (info != MPI_SUCCESS) 
-        return info;
-      newtag = newtag + ov.capacity ();
-      info = send_class (comm, ov, rankrec, newtag);
-      if (info != MPI_SUCCESS) 
-        return info;
-    }                                       
-
   return (info); 
 }
 
@@ -574,9 +569,8 @@
       int   ntagCell = ntagkey + 1;
 
       // iterate through keys(fnames)
-      Octave_map::const_iterator p = map.begin (); 
       int scap;
-      for (octave_idx_type i=0; p != map.end (); p++, i++)
+      for (Octave_map::const_iterator p = map.begin (); p != map.end (); p++)
         {
           // field name
           std::string key = map.key (p);