[cig-commits] r13235 - in seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta: DATA setup src

dkomati1 at geodynamics.org dkomati1 at geodynamics.org
Mon Nov 3 15:37:15 PST 2008


Author: dkomati1
Date: 2008-11-03 15:37:15 -0800 (Mon, 03 Nov 2008)
New Revision: 13235

Modified:
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/DATA/Par_file
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/setup/constants.h
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar_block.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector_block.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_CM_IC.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_OC.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/create_regions_mesh.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/fix_non_blocking_slices.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_eta.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_xi.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/main_program.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/meshfem3D.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/read_compute_parameters.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/specfem3D.F90
Log:
debugged the multiples of 8 (cutting the superbrick in two)


Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/DATA/Par_file
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/DATA/Par_file	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/DATA/Par_file	2008-11-03 23:37:15 UTC (rev 13235)
@@ -14,7 +14,7 @@
 GAMMA_ROTATION_AZIMUTH        =  0.d0
 
 # number of elements at the surface along the two sides of the first chunk
-# (must be multiple of 16 and also of 8 * multiple of NPROC below)
+# (must be a multiple of 16 and also of 8 * NPROC below)
 NEX_XI                          = 128
 NEX_ETA                         = 128
 

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/setup/constants.h
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/setup/constants.h	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/setup/constants.h	2008-11-03 23:37:15 UTC (rev 13235)
@@ -40,7 +40,6 @@
   integer, parameter :: ELEMENTS_NONBLOCKING_CM_IC = 1500
   integer, parameter :: ELEMENTS_NONBLOCKING_OC = 3000
 
-  logical, parameter :: DEBUG_NONBLOCKING_COMMS = .false.
   logical, parameter :: DEBUG_USING_OPENDX = .false.
 
 !! DK DK temporary patch for the large Gordon Bell runs: set RECEIVERS_CAN_BE_BURIED

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -36,7 +36,7 @@
             iboolfaces,iboolcorner, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_real_size, &
+            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL, &
@@ -78,9 +78,9 @@
   integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner
   integer icount_corners
 
-  integer :: npoin2D_real_size
+  integer :: npoin2D_max_all_CM_IC
   integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces
-  real(kind=CUSTOM_REAL), dimension(npoin2D_real_size,NUMFACES_SHARED) :: buffer_send_faces_scalar,buffer_received_faces_scalar
+  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all_CM_IC,NUMFACES_SHARED) :: buffer_send_faces_scalar,buffer_received_faces_scalar
 
 ! buffers for send and receive between corners of the chunks
   real(kind=CUSTOM_REAL), dimension(NGLOB1D_RADIAL) :: buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar
@@ -151,16 +151,6 @@
     receiver = addressing(ichunk,iproc_xi + 1,iproc_eta)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,receiver, &
-!       itag2,buffer_received_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,receiver, &
-!       itag2,MPI_COMM_WORLD,ier)
-
   call MPI_IRECV(buffer_received_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,sender, &
         itag,MPI_COMM_WORLD,request_receive,ier)
 
@@ -216,16 +206,6 @@
     receiver = addressing(ichunk,iproc_xi - 1,iproc_eta)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,receiver, &
-!       itag2,buffer_received_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,receiver, &
-!       itag2,MPI_COMM_WORLD,ier)
-
   call MPI_IRECV(buffer_received_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,sender, &
         itag,MPI_COMM_WORLD,request_receive,ier)
 
@@ -282,16 +262,6 @@
     receiver = addressing(ichunk,iproc_xi,iproc_eta + 1)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,receiver, &
-!   itag2,buffer_received_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,receiver, &
-!   itag2,MPI_COMM_WORLD,ier)
-
   call MPI_IRECV(buffer_received_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,sender, &
     itag,MPI_COMM_WORLD,request_receive,ier)
 
@@ -347,16 +317,6 @@
     receiver = addressing(ichunk,iproc_xi,iproc_eta - 1)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,receiver, &
-!   itag2,buffer_received_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,receiver, &
-!   itag2,MPI_COMM_WORLD,ier)
-
   call MPI_IRECV(buffer_received_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,sender, &
     itag,MPI_COMM_WORLD,request_receive,ier)
 

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar_block.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar_block.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_scalar_block.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -36,7 +36,7 @@
             iboolfaces,iboolcorner, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all, &
+            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL, &
@@ -78,9 +78,9 @@
   integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner
   integer icount_corners
 
-  integer :: npoin2D_max_all
+  integer :: npoin2D_max_all_CM_IC
   integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces
-  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all) :: buffer_send_faces_scalar,buffer_received_faces_scalar
+  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all_CM_IC) :: buffer_send_faces_scalar,buffer_received_faces_scalar
 
 ! buffers for send and receive between corners of the chunks
   real(kind=CUSTOM_REAL), dimension(NGLOB1D_RADIAL) :: buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -41,7 +41,7 @@
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces_vector,buffer_received_faces_vector,npoin2D_real_size, &
+            buffer_send_faces_vector,buffer_received_faces_vector,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL_crust_mantle, &
@@ -69,11 +69,12 @@
   real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_INNER_CORE) :: accel_inner_core
 
   integer iproc_xi,iproc_eta,ichunk
-  integer npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
   integer npoin2D_faces_crust_mantle(NUMFACES_SHARED)
-  integer npoin2D_xi_inner_core,npoin2D_eta_inner_core
   integer npoin2D_faces_inner_core(NUMFACES_SHARED)
 
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+        npoin2D_xi_inner_core,npoin2D_eta_inner_core
+
   integer NGLOB1D_RADIAL_crust_mantle,NGLOB1D_RADIAL_inner_core,NPROC_XI,NPROC_ETA
   integer NUMMSGS_FACES,NCORNERSCHUNKS
 
@@ -91,10 +92,11 @@
   integer, dimension(NGLOB1D_RADIAL_inner_core,NUMCORNERS_SHARED) :: iboolcorner_inner_core
   integer icount_corners
 
-  integer :: npoin2D_real_size
+  integer :: npoin2D_max_all_CM_IC
   integer, dimension(NGLOB2DMAX_XY_VAL_CM,NUMFACES_SHARED) :: iboolfaces_crust_mantle
   integer, dimension(NGLOB2DMAX_XY_VAL_IC,NUMFACES_SHARED) :: iboolfaces_inner_core
-  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_real_size,NUMFACES_SHARED) :: buffer_send_faces_vector,buffer_received_faces_vector
+  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all_CM_IC,NUMFACES_SHARED) :: &
+      buffer_send_faces_vector,buffer_received_faces_vector
 
 ! buffers for send and receive between corners of the chunks
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
@@ -119,8 +121,12 @@
   integer :: imsg
   integer :: icount_faces,npoin2D_chunks_all
 
-  integer :: npoin2D_xi_all,npoin2D_eta_all,NGLOB1D_RADIAL_all
+  integer :: NGLOB1D_RADIAL_all
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_all,npoin2D_eta_all
 
+! do not remove the "save" statement because this routine is non blocking
+! therefore it needs to find the right value of ioffset when it re-enters
+! the routine later to perform the next communication step
   integer, save :: ioffset
 
 #ifdef USE_MPI
@@ -141,8 +147,8 @@
 ! here we have to assemble all the contributions between slices using MPI
 
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
-  npoin2D_xi_all = npoin2D_xi_crust_mantle + npoin2D_xi_inner_core
-  npoin2D_eta_all = npoin2D_eta_crust_mantle + npoin2D_eta_inner_core
+  npoin2D_xi_all(:) = npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)
+  npoin2D_eta_all(:) = npoin2D_eta_crust_mantle(:) + npoin2D_eta_inner_core(:)
 
 !----
 !---- assemble the contributions between slices using MPI
@@ -154,17 +160,17 @@
 
   if(iphase == 1) then
 
-! the buffer for the inner core starts right after the buffer for the crust and mantle
-  ioffset = npoin2D_xi_crust_mantle
-
 ! slices copy the right face into the buffer
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(2)
     buffer_send_faces_vector(1,ipoin,1) = accel_crust_mantle(1,iboolright_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin,1) = accel_crust_mantle(2,iboolright_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin,1) = accel_crust_mantle(3,iboolright_xi_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_xi_inner_core(2)
     buffer_send_faces_vector(1,ioffset + ipoin,1) = accel_inner_core(1,iboolright_xi_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin,1) = accel_inner_core(2,iboolright_xi_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin,1) = accel_inner_core(3,iboolright_xi_inner_core(ipoin))
@@ -186,20 +192,10 @@
     receiver = addressing(ichunk,iproc_xi + 1,iproc_eta)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
-!       itag2,buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
-!       itag2,MPI_COMM_WORLD,ier)
-
-  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_xi_all(1),CUSTOM_MPI_TYPE,sender, &
         itag,MPI_COMM_WORLD,request_receive,ier)
 
-  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
+  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_xi_all(2),CUSTOM_MPI_TYPE,receiver, &
         itag2,MPI_COMM_WORLD,request_send,ier)
 
 #endif
@@ -223,7 +219,7 @@
 ! all slices add the buffer received to the contributions on the left face
   if(iproc_xi > 0) then
 
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(1)
     accel_crust_mantle(1,iboolleft_xi_crust_mantle(ipoin)) = accel_crust_mantle(1,iboolleft_xi_crust_mantle(ipoin)) + &
                               buffer_received_faces_vector(1,ipoin,1)
     accel_crust_mantle(2,iboolleft_xi_crust_mantle(ipoin)) = accel_crust_mantle(2,iboolleft_xi_crust_mantle(ipoin)) + &
@@ -232,7 +228,10 @@
                               buffer_received_faces_vector(3,ipoin,1)
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_xi_inner_core(1)
     accel_inner_core(1,iboolleft_xi_inner_core(ipoin)) = accel_inner_core(1,iboolleft_xi_inner_core(ipoin)) + &
                               buffer_received_faces_vector(1,ioffset + ipoin,1)
     accel_inner_core(2,iboolleft_xi_inner_core(ipoin)) = accel_inner_core(2,iboolleft_xi_inner_core(ipoin)) + &
@@ -246,13 +245,16 @@
 ! the contributions are correctly assembled on the left side of each slice
 ! now we have to send the result back to the sender
 ! all slices copy the left face into the buffer
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(1)
     buffer_send_faces_vector(1,ipoin,1) = accel_crust_mantle(1,iboolleft_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin,1) = accel_crust_mantle(2,iboolleft_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin,1) = accel_crust_mantle(3,iboolleft_xi_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_xi_inner_core(1)
     buffer_send_faces_vector(1,ioffset + ipoin,1) = accel_inner_core(1,iboolleft_xi_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin,1) = accel_inner_core(2,iboolleft_xi_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin,1) = accel_inner_core(3,iboolleft_xi_inner_core(ipoin))
@@ -274,20 +276,10 @@
     receiver = addressing(ichunk,iproc_xi - 1,iproc_eta)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
-!       itag2,buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
-!       itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
-!       itag2,MPI_COMM_WORLD,ier)
-
-  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_xi_all(2),CUSTOM_MPI_TYPE,sender, &
         itag,MPI_COMM_WORLD,request_receive,ier)
 
-  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
+  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_xi_all(1),CUSTOM_MPI_TYPE,receiver, &
         itag2,MPI_COMM_WORLD,request_send,ier)
 #endif
 
@@ -310,13 +302,16 @@
 ! all slices copy the buffer received to the contributions on the right face
   if(iproc_xi < NPROC_XI-1) then
 
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(2)
     accel_crust_mantle(1,iboolright_xi_crust_mantle(ipoin)) = buffer_received_faces_vector(1,ipoin,1)
     accel_crust_mantle(2,iboolright_xi_crust_mantle(ipoin)) = buffer_received_faces_vector(2,ipoin,1)
     accel_crust_mantle(3,iboolright_xi_crust_mantle(ipoin)) = buffer_received_faces_vector(3,ipoin,1)
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_xi_inner_core(2)
     accel_inner_core(1,iboolright_xi_inner_core(ipoin)) = buffer_received_faces_vector(1,ioffset + ipoin,1)
     accel_inner_core(2,iboolright_xi_inner_core(ipoin)) = buffer_received_faces_vector(2,ioffset + ipoin,1)
     accel_inner_core(3,iboolright_xi_inner_core(ipoin)) = buffer_received_faces_vector(3,ioffset + ipoin,1)
@@ -328,17 +323,17 @@
 !---- then assemble along eta using the 2-D topology
 !----
 
-! the buffer for the inner core starts right after the buffer for the crust and mantle
-  ioffset = npoin2D_eta_crust_mantle
-
 ! slices copy the right face into the buffer
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(2)
     buffer_send_faces_vector(1,ipoin,1) = accel_crust_mantle(1,iboolright_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin,1) = accel_crust_mantle(2,iboolright_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin,1) = accel_crust_mantle(3,iboolright_eta_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_eta_inner_core(2)
     buffer_send_faces_vector(1,ioffset + ipoin,1) = accel_inner_core(1,iboolright_eta_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin,1) = accel_inner_core(2,iboolright_eta_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin,1) = accel_inner_core(3,iboolright_eta_inner_core(ipoin))
@@ -360,20 +355,10 @@
     receiver = addressing(ichunk,iproc_xi,iproc_eta + 1)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
-!   itag2,buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
-!   itag2,MPI_COMM_WORLD,ier)
-
-  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_eta_all(1),CUSTOM_MPI_TYPE,sender, &
     itag,MPI_COMM_WORLD,request_receive,ier)
 
-  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
+  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_eta_all(2),CUSTOM_MPI_TYPE,receiver, &
     itag2,MPI_COMM_WORLD,request_send,ier)
 #endif
 
@@ -396,7 +381,7 @@
 ! all slices add the buffer received to the contributions on the left face
   if(iproc_eta > 0) then
 
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(1)
     accel_crust_mantle(1,iboolleft_eta_crust_mantle(ipoin)) = accel_crust_mantle(1,iboolleft_eta_crust_mantle(ipoin)) + &
                               buffer_received_faces_vector(1,ipoin,1)
     accel_crust_mantle(2,iboolleft_eta_crust_mantle(ipoin)) = accel_crust_mantle(2,iboolleft_eta_crust_mantle(ipoin)) + &
@@ -405,7 +390,10 @@
                               buffer_received_faces_vector(3,ipoin,1)
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_eta_inner_core(1)
     accel_inner_core(1,iboolleft_eta_inner_core(ipoin)) = accel_inner_core(1,iboolleft_eta_inner_core(ipoin)) + &
                               buffer_received_faces_vector(1,ioffset + ipoin,1)
     accel_inner_core(2,iboolleft_eta_inner_core(ipoin)) = accel_inner_core(2,iboolleft_eta_inner_core(ipoin)) + &
@@ -419,13 +407,16 @@
 ! the contributions are correctly assembled on the left side of each slice
 ! now we have to send the result back to the sender
 ! all slices copy the left face into the buffer
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(1)
     buffer_send_faces_vector(1,ipoin,1) = accel_crust_mantle(1,iboolleft_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin,1) = accel_crust_mantle(2,iboolleft_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin,1) = accel_crust_mantle(3,iboolleft_eta_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_eta_inner_core(1)
     buffer_send_faces_vector(1,ioffset + ipoin,1) = accel_inner_core(1,iboolleft_eta_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin,1) = accel_inner_core(2,iboolleft_eta_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin,1) = accel_inner_core(3,iboolleft_eta_inner_core(ipoin))
@@ -447,20 +438,10 @@
     receiver = addressing(ichunk,iproc_xi,iproc_eta - 1)
   endif
 #ifdef USE_MPI
-! call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
-!   itag2,buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_RECV(buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
-!   itag,MPI_COMM_WORLD,msg_status,ier)
-
-! call MPI_SEND(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
-!   itag2,MPI_COMM_WORLD,ier)
-
-  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_IRECV(buffer_received_faces_vector,NDIM*npoin2D_eta_all(2),CUSTOM_MPI_TYPE,sender, &
     itag,MPI_COMM_WORLD,request_receive,ier)
 
-  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
+  call MPI_ISSEND(buffer_send_faces_vector,NDIM*npoin2D_eta_all(1),CUSTOM_MPI_TYPE,receiver, &
     itag2,MPI_COMM_WORLD,request_send,ier)
 #endif
 
@@ -483,13 +464,16 @@
 ! all slices copy the buffer received to the contributions on the right face
   if(iproc_eta < NPROC_ETA-1) then
 
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(2)
     accel_crust_mantle(1,iboolright_eta_crust_mantle(ipoin)) = buffer_received_faces_vector(1,ipoin,1)
     accel_crust_mantle(2,iboolright_eta_crust_mantle(ipoin)) = buffer_received_faces_vector(2,ipoin,1)
     accel_crust_mantle(3,iboolright_eta_crust_mantle(ipoin)) = buffer_received_faces_vector(3,ipoin,1)
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_eta_inner_core(2)
     accel_inner_core(1,iboolright_eta_inner_core(ipoin)) = buffer_received_faces_vector(1,ioffset + ipoin,1)
     accel_inner_core(2,iboolright_eta_inner_core(ipoin)) = buffer_received_faces_vector(2,ioffset + ipoin,1)
     accel_inner_core(3,iboolright_eta_inner_core(ipoin)) = buffer_received_faces_vector(3,ioffset + ipoin,1)

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector_block.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector_block.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/assemble_MPI_vector_block.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -41,7 +41,7 @@
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces_vector,buffer_received_faces_vector,npoin2D_max_all, &
+            buffer_send_faces_vector,buffer_received_faces_vector,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL_crust_mantle, &
@@ -69,9 +69,11 @@
   real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_INNER_CORE) :: accel_inner_core
 
   integer iproc_xi,iproc_eta,ichunk
-  integer npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
+
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+        npoin2D_xi_inner_core,npoin2D_eta_inner_core
+
   integer npoin2D_faces_crust_mantle(NUMFACES_SHARED)
-  integer npoin2D_xi_inner_core,npoin2D_eta_inner_core
   integer npoin2D_faces_inner_core(NUMFACES_SHARED)
 
   integer NGLOB1D_RADIAL_crust_mantle,NGLOB1D_RADIAL_inner_core,NPROC_XI,NPROC_ETA
@@ -91,10 +93,10 @@
   integer, dimension(NGLOB1D_RADIAL_inner_core,NUMCORNERS_SHARED) :: iboolcorner_inner_core
   integer icount_corners
 
-  integer :: npoin2D_max_all
+  integer :: npoin2D_max_all_CM_IC
   integer, dimension(NGLOB2DMAX_XY_VAL_CM,NUMFACES_SHARED) :: iboolfaces_crust_mantle
   integer, dimension(NGLOB2DMAX_XY_VAL_IC,NUMFACES_SHARED) :: iboolfaces_inner_core
-  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all) :: buffer_send_faces_vector,buffer_received_faces_vector
+  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all_CM_IC) :: buffer_send_faces_vector,buffer_received_faces_vector
 
 ! buffers for send and receive between corners of the chunks
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
@@ -119,7 +121,8 @@
   integer :: imsg,imsg_loop
   integer :: icount_faces,npoin2D_chunks_all
 
-  integer :: npoin2D_xi_all,npoin2D_eta_all,NGLOB1D_RADIAL_all,ioffset
+  integer :: NGLOB1D_RADIAL_all,ioffset
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_all,npoin2D_eta_all
 
 #ifdef USE_MPI
   integer :: ier
@@ -133,8 +136,8 @@
 ! here we have to assemble all the contributions between slices using MPI
 
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
-  npoin2D_xi_all = npoin2D_xi_crust_mantle + npoin2D_xi_inner_core
-  npoin2D_eta_all = npoin2D_eta_crust_mantle + npoin2D_eta_inner_core
+  npoin2D_xi_all(:) = npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)
+  npoin2D_eta_all(:) = npoin2D_eta_crust_mantle(:) + npoin2D_eta_inner_core(:)
 
 !----
 !---- assemble the contributions between slices using MPI
@@ -147,17 +150,17 @@
 ! assemble along xi only if more than one slice
   if(NPROC_XI > 1) then
 
-! the buffer for the inner core starts right after the buffer for the crust and mantle
-  ioffset = npoin2D_xi_crust_mantle
-
 ! slices copy the right face into the buffer
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(2)
     buffer_send_faces_vector(1,ipoin) = accel_crust_mantle(1,iboolright_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin) = accel_crust_mantle(2,iboolright_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin) = accel_crust_mantle(3,iboolright_xi_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_xi_inner_core(2)
     buffer_send_faces_vector(1,ioffset + ipoin) = accel_inner_core(1,iboolright_xi_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin) = accel_inner_core(2,iboolright_xi_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin) = accel_inner_core(3,iboolright_xi_inner_core(ipoin))
@@ -179,15 +182,15 @@
     receiver = addressing(ichunk,iproc_xi + 1,iproc_eta)
   endif
 #ifdef USE_MPI
-  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
-        itag2,buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_xi_all(2),CUSTOM_MPI_TYPE,receiver, &
+        itag2,buffer_received_faces_vector,NDIM*npoin2D_xi_all(1),CUSTOM_MPI_TYPE,sender, &
         itag,MPI_COMM_WORLD,msg_status,ier)
 #endif
 
 ! all slices add the buffer received to the contributions on the left face
   if(iproc_xi > 0) then
 
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(1)
     accel_crust_mantle(1,iboolleft_xi_crust_mantle(ipoin)) = accel_crust_mantle(1,iboolleft_xi_crust_mantle(ipoin)) + &
                               buffer_received_faces_vector(1,ipoin)
     accel_crust_mantle(2,iboolleft_xi_crust_mantle(ipoin)) = accel_crust_mantle(2,iboolleft_xi_crust_mantle(ipoin)) + &
@@ -196,7 +199,10 @@
                               buffer_received_faces_vector(3,ipoin)
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_xi_inner_core(1)
     accel_inner_core(1,iboolleft_xi_inner_core(ipoin)) = accel_inner_core(1,iboolleft_xi_inner_core(ipoin)) + &
                               buffer_received_faces_vector(1,ioffset + ipoin)
     accel_inner_core(2,iboolleft_xi_inner_core(ipoin)) = accel_inner_core(2,iboolleft_xi_inner_core(ipoin)) + &
@@ -210,13 +216,16 @@
 ! the contributions are correctly assembled on the left side of each slice
 ! now we have to send the result back to the sender
 ! all slices copy the left face into the buffer
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(1)
     buffer_send_faces_vector(1,ipoin) = accel_crust_mantle(1,iboolleft_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin) = accel_crust_mantle(2,iboolleft_xi_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin) = accel_crust_mantle(3,iboolleft_xi_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_xi_inner_core(1)
     buffer_send_faces_vector(1,ioffset + ipoin) = accel_inner_core(1,iboolleft_xi_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin) = accel_inner_core(2,iboolleft_xi_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin) = accel_inner_core(3,iboolleft_xi_inner_core(ipoin))
@@ -238,21 +247,24 @@
     receiver = addressing(ichunk,iproc_xi - 1,iproc_eta)
   endif
 #ifdef USE_MPI
-  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,receiver, &
-        itag2,buffer_received_faces_vector,NDIM*npoin2D_xi_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_xi_all(1),CUSTOM_MPI_TYPE,receiver, &
+        itag2,buffer_received_faces_vector,NDIM*npoin2D_xi_all(2),CUSTOM_MPI_TYPE,sender, &
         itag,MPI_COMM_WORLD,msg_status,ier)
 #endif
 
 ! all slices copy the buffer received to the contributions on the right face
   if(iproc_xi < NPROC_XI-1) then
 
-  do ipoin = 1,npoin2D_xi_crust_mantle
+  do ipoin = 1,npoin2D_xi_crust_mantle(2)
     accel_crust_mantle(1,iboolright_xi_crust_mantle(ipoin)) = buffer_received_faces_vector(1,ipoin)
     accel_crust_mantle(2,iboolright_xi_crust_mantle(ipoin)) = buffer_received_faces_vector(2,ipoin)
     accel_crust_mantle(3,iboolright_xi_crust_mantle(ipoin)) = buffer_received_faces_vector(3,ipoin)
   enddo
 
-  do ipoin = 1,npoin2D_xi_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_xi_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_xi_inner_core(2)
     accel_inner_core(1,iboolright_xi_inner_core(ipoin)) = buffer_received_faces_vector(1,ioffset + ipoin)
     accel_inner_core(2,iboolright_xi_inner_core(ipoin)) = buffer_received_faces_vector(2,ioffset + ipoin)
     accel_inner_core(3,iboolright_xi_inner_core(ipoin)) = buffer_received_faces_vector(3,ioffset + ipoin)
@@ -269,17 +281,17 @@
 ! assemble along eta only if more than one slice
   if(NPROC_ETA > 1) then
 
-! the buffer for the inner core starts right after the buffer for the crust and mantle
-  ioffset = npoin2D_eta_crust_mantle
-
 ! slices copy the right face into the buffer
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(2)
     buffer_send_faces_vector(1,ipoin) = accel_crust_mantle(1,iboolright_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin) = accel_crust_mantle(2,iboolright_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin) = accel_crust_mantle(3,iboolright_eta_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_eta_inner_core(2)
     buffer_send_faces_vector(1,ioffset + ipoin) = accel_inner_core(1,iboolright_eta_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin) = accel_inner_core(2,iboolright_eta_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin) = accel_inner_core(3,iboolright_eta_inner_core(ipoin))
@@ -301,15 +313,15 @@
     receiver = addressing(ichunk,iproc_xi,iproc_eta + 1)
   endif
 #ifdef USE_MPI
-  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
-    itag2,buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_eta_all(2),CUSTOM_MPI_TYPE,receiver, &
+    itag2,buffer_received_faces_vector,NDIM*npoin2D_eta_all(1),CUSTOM_MPI_TYPE,sender, &
     itag,MPI_COMM_WORLD,msg_status,ier)
 #endif
 
 ! all slices add the buffer received to the contributions on the left face
   if(iproc_eta > 0) then
 
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(1)
     accel_crust_mantle(1,iboolleft_eta_crust_mantle(ipoin)) = accel_crust_mantle(1,iboolleft_eta_crust_mantle(ipoin)) + &
                               buffer_received_faces_vector(1,ipoin)
     accel_crust_mantle(2,iboolleft_eta_crust_mantle(ipoin)) = accel_crust_mantle(2,iboolleft_eta_crust_mantle(ipoin)) + &
@@ -318,7 +330,10 @@
                               buffer_received_faces_vector(3,ipoin)
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_eta_inner_core(1)
     accel_inner_core(1,iboolleft_eta_inner_core(ipoin)) = accel_inner_core(1,iboolleft_eta_inner_core(ipoin)) + &
                               buffer_received_faces_vector(1,ioffset + ipoin)
     accel_inner_core(2,iboolleft_eta_inner_core(ipoin)) = accel_inner_core(2,iboolleft_eta_inner_core(ipoin)) + &
@@ -332,13 +347,16 @@
 ! the contributions are correctly assembled on the left side of each slice
 ! now we have to send the result back to the sender
 ! all slices copy the left face into the buffer
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(1)
     buffer_send_faces_vector(1,ipoin) = accel_crust_mantle(1,iboolleft_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(2,ipoin) = accel_crust_mantle(2,iboolleft_eta_crust_mantle(ipoin))
     buffer_send_faces_vector(3,ipoin) = accel_crust_mantle(3,iboolleft_eta_crust_mantle(ipoin))
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(1)
+
+  do ipoin = 1,npoin2D_eta_inner_core(1)
     buffer_send_faces_vector(1,ioffset + ipoin) = accel_inner_core(1,iboolleft_eta_inner_core(ipoin))
     buffer_send_faces_vector(2,ioffset + ipoin) = accel_inner_core(2,iboolleft_eta_inner_core(ipoin))
     buffer_send_faces_vector(3,ioffset + ipoin) = accel_inner_core(3,iboolleft_eta_inner_core(ipoin))
@@ -360,21 +378,24 @@
     receiver = addressing(ichunk,iproc_xi,iproc_eta - 1)
   endif
 #ifdef USE_MPI
-  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,receiver, &
-    itag2,buffer_received_faces_vector,NDIM*npoin2D_eta_all,CUSTOM_MPI_TYPE,sender, &
+  call MPI_SENDRECV(buffer_send_faces_vector,NDIM*npoin2D_eta_all(1),CUSTOM_MPI_TYPE,receiver, &
+    itag2,buffer_received_faces_vector,NDIM*npoin2D_eta_all(2),CUSTOM_MPI_TYPE,sender, &
     itag,MPI_COMM_WORLD,msg_status,ier)
 #endif
 
 ! all slices copy the buffer received to the contributions on the right face
   if(iproc_eta < NPROC_ETA-1) then
 
-  do ipoin = 1,npoin2D_eta_crust_mantle
+  do ipoin = 1,npoin2D_eta_crust_mantle(2)
     accel_crust_mantle(1,iboolright_eta_crust_mantle(ipoin)) = buffer_received_faces_vector(1,ipoin)
     accel_crust_mantle(2,iboolright_eta_crust_mantle(ipoin)) = buffer_received_faces_vector(2,ipoin)
     accel_crust_mantle(3,iboolright_eta_crust_mantle(ipoin)) = buffer_received_faces_vector(3,ipoin)
   enddo
 
-  do ipoin = 1,npoin2D_eta_inner_core
+! the buffer for the inner core starts right after the buffer for the crust and mantle
+  ioffset = npoin2D_eta_crust_mantle(2)
+
+  do ipoin = 1,npoin2D_eta_inner_core(2)
     accel_inner_core(1,iboolright_eta_inner_core(ipoin)) = buffer_received_faces_vector(1,ioffset + ipoin)
     accel_inner_core(2,iboolright_eta_inner_core(ipoin)) = buffer_received_faces_vector(2,ioffset + ipoin)
     accel_inner_core(3,iboolright_eta_inner_core(ipoin)) = buffer_received_faces_vector(3,ioffset + ipoin)

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_CM_IC.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_CM_IC.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_CM_IC.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -56,7 +56,7 @@
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector,iphase, &
                nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
                npoin2D_cube_from_slices,buffer_all_cube_from_slices,buffer_slices,ibool_central_cube, &
@@ -247,8 +247,8 @@
   integer npoin2D_faces_crust_mantle(NUMFACES_SHARED)
   integer npoin2D_faces_inner_core(NUMFACES_SHARED)
 
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_inner_core,npoin2D_eta_inner_core
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+       npoin2D_xi_inner_core,npoin2D_eta_inner_core
 
 ! communication pattern for faces between chunks
   integer, dimension(NUMMSGS_FACES_VAL) :: iprocfrom_faces,iprocto_faces
@@ -262,10 +262,9 @@
   integer, dimension(NGLOB2DMAX_XY_VAL_CM,NUMFACES_SHARED) :: iboolfaces_crust_mantle
   integer, dimension(NGLOB2DMAX_XY_VAL_IC,NUMFACES_SHARED) :: iboolfaces_inner_core
 
-  integer :: npoin2D_max_all
+  integer :: npoin2D_max_all_CM_IC
+  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all_CM_IC) :: buffer_send_faces,buffer_received_faces
 
-  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all) :: buffer_send_faces,buffer_received_faces
-
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
   real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB1D_RADIAL_CM + NGLOB1D_RADIAL_IC) :: &
      buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector
@@ -308,14 +307,14 @@
       if(iphase <= 7) call assemble_MPI_vector(myrank,accel_crust_mantle,accel_inner_core, &
             iproc_xi,iproc_eta,ichunk,addressing, &
             iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1), &
+            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-            npoin2D_faces_inner_core,npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1), &
+            npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES_VAL,NCORNERSCHUNKS_VAL, &
             NPROC_XI_VAL,NPROC_ETA_VAL,NGLOB1D_RADIAL_CM, &
@@ -825,14 +824,14 @@
          if(iphase <= 7) call assemble_MPI_vector(myrank,accel_crust_mantle,accel_inner_core, &
             iproc_xi,iproc_eta,ichunk,addressing, &
             iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1), &
+            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-            npoin2D_faces_inner_core,npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1), &
+            npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES_VAL,NCORNERSCHUNKS_VAL, &
             NPROC_XI_VAL,NPROC_ETA_VAL,NGLOB1D_RADIAL_CM, &

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_OC.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_OC.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/compute_forces_OC.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -39,7 +39,7 @@
           iboolfaces_outer_core,iboolcorner_outer_core, &
           iprocfrom_faces,iprocto_faces, &
           iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-          buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+          buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
           buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar,iphase, &
 #endif
           hprime_xx,hprime_yy,hprime_zz, &
@@ -80,8 +80,8 @@
 ! always three times bigger and therefore scalars can use the first part
 ! of the vector buffer in memory even if it has an additional index here
 ! allocate these automatic arrays in the memory stack to avoid memory fragmentation with "allocate()"
-  integer :: npoin2D_max_all
-  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all,NUMFACES_SHARED) :: buffer_send_faces,buffer_received_faces
+  integer :: npoin2D_max_all_CM_IC
+  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all_CM_IC,NUMFACES_SHARED) :: buffer_send_faces,buffer_received_faces
 
   integer, dimension(NGLOB1D_RADIAL_OC,NUMCORNERS_SHARED) :: iboolcorner_outer_core
 
@@ -156,7 +156,7 @@
             iboolfaces_outer_core,iboolcorner_outer_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_xi_outer_core(1), &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES_VAL,NCORNERSCHUNKS_VAL, &
             NPROC_XI_VAL,NPROC_ETA_VAL,NGLOB1D_RADIAL_OC, &

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/create_regions_mesh.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/create_regions_mesh.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/create_regions_mesh.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -73,7 +73,7 @@
 
 !! DK DK added this for merged version
 #ifdef USE_MPI
-  integer :: npoin2D_xi,npoin2D_eta
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
 #endif
 
 ! mass matrix

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/fix_non_blocking_slices.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/fix_non_blocking_slices.f90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/fix_non_blocking_slices.f90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -37,8 +37,10 @@
 
   include "constants.h"
 
-  integer :: npoin2D_xi,npoin2D_eta,nspec,nglob,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
+  integer :: nspec,nglob,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
 
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
+
   logical, dimension(nspec) :: is_on_a_slice_edge
 
   integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi,iboolright_xi
@@ -56,16 +58,22 @@
   mask_ibool(:) = .false.
 
 ! mark all the points that are in the MPI buffers to assemble inside each chunk
-  do ipoin = 1,npoin2D_xi
-    mask_ibool(iboolright_xi(ipoin)) = .true.
+  do ipoin = 1,npoin2D_xi(1)
     mask_ibool(iboolleft_xi(ipoin)) = .true.
   enddo
 
-  do ipoin = 1,npoin2D_eta
-    mask_ibool(iboolright_eta(ipoin)) = .true.
+  do ipoin = 1,npoin2D_eta(1)
     mask_ibool(iboolleft_eta(ipoin)) = .true.
   enddo
 
+  do ipoin = 1,npoin2D_xi(2)
+    mask_ibool(iboolright_xi(ipoin)) = .true.
+  enddo
+
+  do ipoin = 1,npoin2D_eta(2)
+    mask_ibool(iboolright_eta(ipoin)) = .true.
+  enddo
+
 ! now label all the elements that have at least one corner belonging
 ! to any of these buffers as elements that must contribute to the
 ! first step of the calculations (performed on the edges before starting

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_eta.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_eta.f90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_eta.f90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -56,27 +56,24 @@
   integer ispec
 
 ! MPI cut-plane element numbering
-  integer ispecc1,ispecc2,npoin2D_eta,ix,iy,iz
-  integer nspec2Dtheor
+  integer :: ispecc1,ispecc2,ix,iy,iz,nspec2Dtheor
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_eta
 
-! theoretical number of surface elements in the buffers
-! cut planes along eta=constant correspond to XI faces
-      nspec2Dtheor = NSPEC2D_XI_FACE(iregion,1)
-
-! write the MPI buffers for the left and right edges of the slice
-! and the position of the points to check that the buffers are fine
-
 !
 ! determine if the element falls on the left MPI cut plane
 !
 
+! theoretical number of surface elements in the buffers
+! cut planes along eta=constant correspond to XI faces
+  nspec2Dtheor = NSPEC2D_XI_FACE(iregion,1)
+
 ! global point number and coordinates left MPI cut-plane
 
 ! erase the logical mask used to mark points already found
   mask_ibool(:) = .false.
 
 ! nb of global points shared with the other slice
-  npoin2D_eta = 0
+  npoin2D_eta(1) = 0
 
 ! nb of elements in this cut-plane
   ispecc1=0
@@ -91,15 +88,15 @@
             ! select point, if not already selected
             if(.not. mask_ibool(ibool(ix,iy,iz,ispec))) then
                 mask_ibool(ibool(ix,iy,iz,ispec)) = .true.
-                npoin2D_eta = npoin2D_eta + 1
+                npoin2D_eta(1) = npoin2D_eta(1) + 1
 !! DK DK added this for merged
-                if(npoin2D_eta > NGLOB2DMAX_YMIN_YMAX) stop 'DK DK error points merged'
+                if(npoin2D_eta(1) > NGLOB2DMAX_YMIN_YMAX) stop 'DK DK error points merged'
 !! DK DK suppressed merged                  write(10,*) ibool(ix,iy,iz,ispec), xstore(ix,iy,iz,ispec), &
 !! DK DK suppressed merged                        ystore(ix,iy,iz,ispec),zstore(ix,iy,iz,ispec)
 !! DK DK added this for merged
 !! DK DK merged   ces deux tableaux sont les memes donc on pourrait n'en declarer qu'un seul
 !! DK DK merged   mais en fait non car on le reutilise ci-dessous pour ibool_right
-                iboolleft_eta(npoin2D_eta) = ibool(ix,iy,iz,ispec)
+                iboolleft_eta(npoin2D_eta(1)) = ibool(ix,iy,iz,ispec)
             endif
           enddo
       enddo
@@ -112,17 +109,18 @@
 !! DK DK suppressed merged  write(10,*) '0 0  0.  0.  0.'
 
 ! write total number of points
-!! DK DK suppressed merged  write(10,*) npoin2D_eta
+!! DK DK suppressed merged  write(10,*) npoin2D_eta(1)
 
 !! DK DK suppressed merged  close(10)
 
 ! compare number of surface elements detected to analytical value
   if(ispecc1 /= nspec2Dtheor) call exit_MPI(myrank,'error MPI cut-planes detection in eta=left')
 
+
 !
 ! determine if the element falls on the right MPI cut plane
 !
-      nspec2Dtheor = NSPEC2D_XI_FACE(iregion,2)
+  nspec2Dtheor = NSPEC2D_XI_FACE(iregion,2)
 
 ! global point number and coordinates right MPI cut-plane
 
@@ -130,7 +128,7 @@
   mask_ibool(:) = .false.
 
 ! nb of global points shared with the other slice
-  npoin2D_eta = 0
+  npoin2D_eta(2) = 0
 
 ! nb of elements in this cut-plane
   ispecc2=0
@@ -145,15 +143,15 @@
           ! select point, if not already selected
           if(.not. mask_ibool(ibool(ix,iy,iz,ispec))) then
               mask_ibool(ibool(ix,iy,iz,ispec)) = .true.
-              npoin2D_eta = npoin2D_eta + 1
+              npoin2D_eta(2) = npoin2D_eta(2) + 1
 !! DK DK added this for merged
-              if(npoin2D_eta > NGLOB2DMAX_YMIN_YMAX) stop 'DK DK error points merged'
+              if(npoin2D_eta(2) > NGLOB2DMAX_YMIN_YMAX) stop 'DK DK error points merged'
 !! DK DK suppressed merged                write(10,*) ibool(ix,iy,iz,ispec), xstore(ix,iy,iz,ispec), &
 !! DK DK suppressed merged                      ystore(ix,iy,iz,ispec),zstore(ix,iy,iz,ispec)
 !! DK DK added this for merged
 !! DK DK merged   ces deux tableaux sont les memes donc on pourrait n'en declarer qu'un seul
 !! DK DK merged   mais en fait non car on le reutilise ci-dessous pour ibool_right
-              iboolright_eta(npoin2D_eta) = ibool(ix,iy,iz,ispec)
+              iboolright_eta(npoin2D_eta(2)) = ibool(ix,iy,iz,ispec)
           endif
         enddo
       enddo
@@ -166,7 +164,7 @@
 !! DK DK suppressed merged  write(10,*) '0 0  0.  0.  0.'
 
 ! write total number of points
-!! DK DK suppressed merged  write(10,*) npoin2D_eta
+!! DK DK suppressed merged  write(10,*) npoin2D_eta(2)
 
 !! DK DK suppressed merged  close(10)
 

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_xi.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_xi.f90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/get_MPI_cutplanes_xi.f90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -56,28 +56,26 @@
   integer ispec
 
 ! MPI cut-plane element numbering
-  integer ispecc1,ispecc2,npoin2D_xi,ix,iy,iz
-  integer nspec2Dtheor
+  integer :: ispecc1,ispecc2,ix,iy,iz,nspec2Dtheor
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi
 
   character(len=150) errmsg
 
-! theoretical number of surface elements in the buffers
-! cut planes along xi=constant correspond to ETA faces
-      nspec2Dtheor = NSPEC2D_ETA_FACE(iregion,1)
-! write the MPI buffers for the left and right edges of the slice
-! and the position of the points to check that the buffers are fine
-
 !
 ! determine if the element falls on the left MPI cut plane
 !
 
+! theoretical number of surface elements in the buffers
+! cut planes along xi=constant correspond to ETA faces
+  nspec2Dtheor = NSPEC2D_ETA_FACE(iregion,1)
+
 ! global point number and coordinates left MPI cut-plane
 
 ! erase the logical mask used to mark points already found
   mask_ibool(:) = .false.
 
 ! nb of global points shared with the other slice
-  npoin2D_xi = 0
+  npoin2D_xi(1) = 0
 
 ! nb of elements in this cut-plane
   ispecc1=0
@@ -92,13 +90,13 @@
             ! select point, if not already selected
             if(.not. mask_ibool(ibool(ix,iy,iz,ispec))) then
                 mask_ibool(ibool(ix,iy,iz,ispec)) = .true.
-                npoin2D_xi = npoin2D_xi + 1
+                npoin2D_xi(1) = npoin2D_xi(1) + 1
 !! DK DK added this for merged
-                if(npoin2D_xi > NGLOB2DMAX_XMIN_XMAX) stop 'DK DK error points merged'
+                if(npoin2D_xi(1) > NGLOB2DMAX_XMIN_XMAX) stop 'DK DK error points merged'
 !! DK DK suppressed merged                  write(10,*) ibool(ix,iy,iz,ispec), xstore(ix,iy,iz,ispec), &
 !! DK DK suppressed merged                        ystore(ix,iy,iz,ispec),zstore(ix,iy,iz,ispec)
 !! DK DK added this for merged
-                iboolleft_xi(npoin2D_xi) = ibool(ix,iy,iz,ispec)
+                iboolleft_xi(npoin2D_xi(1)) = ibool(ix,iy,iz,ispec)
             endif
           enddo
       enddo
@@ -111,7 +109,7 @@
 !! DK DK suppressed merged  write(10,*) '0 0  0.  0.  0.'
 
 ! write total number of points
-!! DK DK suppressed merged  write(10,*) npoin2D_xi
+!! DK DK suppressed merged  write(10,*) npoin2D_xi(1)
 
 !! DK DK suppressed merged  close(10)
 
@@ -120,10 +118,12 @@
     write(errmsg,*) 'error MPI cut-planes detection in xi=left T=',nspec2Dtheor,' C=',ispecc1
     call exit_MPI(myrank,errmsg)
   endif
+
+
 !
 ! determine if the element falls on the right MPI cut plane
 !
-      nspec2Dtheor = NSPEC2D_ETA_FACE(iregion,2)
+  nspec2Dtheor = NSPEC2D_ETA_FACE(iregion,2)
 
 ! global point number and coordinates right MPI cut-plane
 
@@ -131,7 +131,7 @@
   mask_ibool(:) = .false.
 
 ! nb of global points shared with the other slice
-  npoin2D_xi = 0
+  npoin2D_xi(2) = 0
 
 ! nb of elements in this cut-plane
   ispecc2=0
@@ -146,12 +146,12 @@
           ! select point, if not already selected
           if(.not. mask_ibool(ibool(ix,iy,iz,ispec))) then
               mask_ibool(ibool(ix,iy,iz,ispec)) = .true.
-              npoin2D_xi = npoin2D_xi + 1
+              npoin2D_xi(2) = npoin2D_xi(2) + 1
 !! DK DK added this for merged
-              if(npoin2D_xi > NGLOB2DMAX_XMIN_XMAX) stop 'DK DK error points merged'
+              if(npoin2D_xi(2) > NGLOB2DMAX_XMIN_XMAX) stop 'DK DK error points merged'
 !! DK DK suppressed merged                write(10,*) ibool(ix,iy,iz,ispec), xstore(ix,iy,iz,ispec), &
 !! DK DK suppressed merged                      ystore(ix,iy,iz,ispec),zstore(ix,iy,iz,ispec)
-              iboolright_xi(npoin2D_xi) = ibool(ix,iy,iz,ispec)
+              iboolright_xi(npoin2D_xi(2)) = ibool(ix,iy,iz,ispec)
           endif
         enddo
       enddo
@@ -164,7 +164,7 @@
 !! DK DK suppressed merged  write(10,*) '0 0  0.  0.  0.'
 
 ! write total number of points
-!! DK DK suppressed merged  write(10,*) npoin2D_xi
+!! DK DK suppressed merged  write(10,*) npoin2D_xi(2)
 
 !! DK DK suppressed merged  close(10)
 

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/main_program.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/main_program.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/main_program.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -328,7 +328,7 @@
   integer, dimension(NGLOB2DMAX_YMIN_YMAX_IC) :: iboolleft_eta_inner_core,iboolright_eta_inner_core
 #endif
 
-  integer :: npoin2D_max_all
+  integer :: npoin2D_max_all_CM_IC
 
 ! receiver information
   integer :: nrec
@@ -508,41 +508,28 @@
 
   if(.not. MESHER_ONLY) then
 
-!! DK DK for merged version, temporary patch for David's code to cut the superbrick
-!! DK DK which I have not fully ported to the merged version yet: I do not
-!! DK DK yet distinguish the two values of each array, therefore let me set them
-!! DK DK equal here
-  npoin2D_xi_crust_mantle(2) = npoin2D_xi_crust_mantle(1)
-  npoin2D_eta_crust_mantle(2) = npoin2D_eta_crust_mantle(1)
-
-  npoin2D_xi_outer_core(2) = npoin2D_xi_outer_core(1)
-  npoin2D_eta_outer_core(2) = npoin2D_eta_outer_core(1)
-
-  npoin2D_xi_inner_core(2) = npoin2D_xi_inner_core(1)
-  npoin2D_eta_inner_core(2) = npoin2D_eta_inner_core(1)
-
-!! DK DK added this to reduce the size of the buffers
+! added this to reduce the size of the buffers
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
-  npoin2D_max_all = max(maxval(npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)), &
+  npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)), &
                         maxval(npoin2D_eta_crust_mantle(:) + npoin2D_eta_inner_core(:)))
 
-!! DK DK fix the non-blocking arrays to assemble inside the chunks: elements
-!! DK DK in contact with the MPI faces by an edge or a corner only but not
-!! DK DK a full face are missing, therefore let us add them
+! fix the non-blocking arrays to assemble inside the chunks: elements
+! in contact with the MPI faces by an edge or a corner only but not
+! a full face are missing, therefore let us add them
 #ifdef USE_MPI
   call fix_non_blocking_slices(is_on_a_slice_edge_crust_mantle,iboolright_xi_crust_mantle, &
          iboolleft_xi_crust_mantle,iboolright_eta_crust_mantle,iboolleft_eta_crust_mantle, &
-         npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1),ibool_crust_mantle, &
+         npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle,ibool_crust_mantle, &
          mask_ibool,NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE,NGLOB2DMAX_XMIN_XMAX_CM,NGLOB2DMAX_YMIN_YMAX_CM)
 
   call fix_non_blocking_slices(is_on_a_slice_edge_outer_core,iboolright_xi_outer_core, &
          iboolleft_xi_outer_core,iboolright_eta_outer_core,iboolleft_eta_outer_core, &
-         npoin2D_xi_outer_core(1),npoin2D_eta_outer_core(1),ibool_outer_core, &
+         npoin2D_xi_outer_core,npoin2D_eta_outer_core,ibool_outer_core, &
          mask_ibool,NSPEC_OUTER_CORE,NGLOB_OUTER_CORE,NGLOB2DMAX_XMIN_XMAX_OC,NGLOB2DMAX_YMIN_YMAX_OC)
 
   call fix_non_blocking_slices(is_on_a_slice_edge_inner_core,iboolright_xi_inner_core, &
          iboolleft_xi_inner_core,iboolright_eta_inner_core,iboolleft_eta_inner_core, &
-         npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1),ibool_inner_core, &
+         npoin2D_xi_inner_core,npoin2D_eta_inner_core,ibool_inner_core, &
          mask_ibool,NSPEC_INNER_CORE,NGLOB_INNER_CORE,NGLOB2DMAX_XMIN_XMAX_IC,NGLOB2DMAX_YMIN_YMAX_IC)
 #endif
 
@@ -555,7 +542,7 @@
   kappahstore_crust_mantle,muhstore_crust_mantle,eta_anisostore_crust_mantle,kappavstore_inner_core,muvstore_inner_core, &
   rmass_crust_mantle,rmass_outer_core,rmass_inner_core,rmass_ocean_load, &
 #ifdef USE_MPI
-  npoin2D_max_all,nrec,addressing,ibathy_topo, &
+  npoin2D_max_all_CM_IC,nrec,addressing,ibathy_topo, &
   ibelm_xmin_inner_core,ibelm_xmax_inner_core,ibelm_ymin_inner_core,ibelm_ymax_inner_core,ibelm_bottom_inner_core, &
 iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle, iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
 iboolleft_xi_outer_core,iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core, &

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/meshfem3D.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/meshfem3D.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/meshfem3D.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -1534,8 +1534,7 @@
   c55store,c56store,c66store,iboun,locval,ifseg,rmass_ocean_load,mask_ibool,copy_ibool_ori,iMPIcut_xi,iMPIcut_eta, &
 #ifdef USE_MPI
   maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:)),NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE,NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER, &
-!! DK DK this below with index (1) will have to change to fully support David's code to cut the superbrick
-  NGLOB2DMAX_XMIN_XMAX_CM,NGLOB2DMAX_YMIN_YMAX_CM,npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1),iboolleft_xi_crust_mantle,&
+  NGLOB2DMAX_XMIN_XMAX_CM,NGLOB2DMAX_YMIN_YMAX_CM,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle,iboolleft_xi_crust_mantle,&
   iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle,ibool1D_leftxi_lefteta, &
   ibool1D_rightxi_lefteta,ibool1D_leftxi_righteta,ibool1D_rightxi_righteta,xread1D_leftxi_lefteta,xread1D_rightxi_lefteta, &
   xread1D_leftxi_righteta,xread1D_rightxi_righteta,yread1D_leftxi_lefteta,yread1D_rightxi_lefteta,yread1D_leftxi_righteta, &
@@ -1569,8 +1568,7 @@
   c55store,c56store,c66store,iboun,locval,ifseg,rmass_ocean_load,mask_ibool,copy_ibool_ori,iMPIcut_xi,iMPIcut_eta, &
 #ifdef USE_MPI
   maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:)),NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE,NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER, &
-!! DK DK this below with index (1) will have to change to fully support David's code to cut the superbrick
-  NGLOB2DMAX_XMIN_XMAX_OC,NGLOB2DMAX_YMIN_YMAX_OC,npoin2D_xi_outer_core(1),npoin2D_eta_outer_core(1),iboolleft_xi_outer_core, &
+  NGLOB2DMAX_XMIN_XMAX_OC,NGLOB2DMAX_YMIN_YMAX_OC,npoin2D_xi_outer_core,npoin2D_eta_outer_core,iboolleft_xi_outer_core, &
   iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core,ibool1D_leftxi_lefteta, &
   ibool1D_rightxi_lefteta,ibool1D_leftxi_righteta,ibool1D_rightxi_righteta,xread1D_leftxi_lefteta,xread1D_rightxi_lefteta, &
   xread1D_leftxi_righteta,xread1D_rightxi_righteta,yread1D_leftxi_lefteta,yread1D_rightxi_lefteta,yread1D_leftxi_righteta, &
@@ -1604,8 +1602,7 @@
   c55store,c56store,c66store,iboun,locval,ifseg,rmass_ocean_load,mask_ibool,copy_ibool_ori,iMPIcut_xi,iMPIcut_eta, &
 #ifdef USE_MPI
   maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:)),NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE,NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER, &
-!! DK DK this below with index (1) will have to change to fully support David's code to cut the superbrick
-  NGLOB2DMAX_XMIN_XMAX_IC,NGLOB2DMAX_YMIN_YMAX_IC,npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1),iboolleft_xi_inner_core, &
+  NGLOB2DMAX_XMIN_XMAX_IC,NGLOB2DMAX_YMIN_YMAX_IC,npoin2D_xi_inner_core,npoin2D_eta_inner_core,iboolleft_xi_inner_core, &
   iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core,ibool1D_leftxi_lefteta, &
   ibool1D_rightxi_lefteta,ibool1D_leftxi_righteta,ibool1D_rightxi_righteta,xread1D_leftxi_lefteta,xread1D_rightxi_lefteta, &
   xread1D_leftxi_righteta,xread1D_rightxi_righteta,yread1D_leftxi_lefteta,yread1D_rightxi_lefteta,yread1D_leftxi_righteta, &

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/read_compute_parameters.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/read_compute_parameters.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/read_compute_parameters.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -1237,12 +1237,7 @@
     if(mod(NEX_ETA/8,NPROC_ETA) /= 0) stop 'NEX_ETA must be a multiple of 8*NPROC_ETA'
     if(mod(NEX_XI/16,NPROC_XI) /= 0) CUT_SUPERBRICK_XI = .true.
     if(mod(NEX_ETA/16,NPROC_ETA) /= 0) CUT_SUPERBRICK_ETA = .true.
-!! DK DK added this because of temporary bug in David's code
-    if(mod(NEX_XI/16,NPROC_XI) /= 0) &
-      stop 'NEX_XI multiple of 8*NPROC_XI but not of 16*NPROC_XI is currently unsafe'
-    if(mod(NEX_ETA/16,NPROC_ETA) /= 0) &
-      stop 'NEX_ETA multiple of 8*NPROC_ETA but not of 16*NPROC_ETA is currently unsafe'
-!! DK DK added this because of temporary bug in David's code
+
   else
 
     if(mod(NEX_XI,32) /= 0) stop 'NEX_XI must be a multiple of 32'
@@ -1251,6 +1246,7 @@
     if(mod(NEX_ETA/16,NPROC_ETA) /= 0) stop 'NEX_ETA must be a multiple of 16*NPROC_ETA'
     if(mod(NEX_XI/32,NPROC_XI) /= 0) CUT_SUPERBRICK_XI = .true.
     if(mod(NEX_ETA/32,NPROC_ETA) /= 0) CUT_SUPERBRICK_ETA = .true.
+
   endif
 
 ! check that topology is correct if more than two chunks

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/specfem3D.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/specfem3D.F90	2008-11-03 23:07:03 UTC (rev 13234)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/version41_beta/src/specfem3D.F90	2008-11-03 23:37:15 UTC (rev 13235)
@@ -39,7 +39,7 @@
   kappahstore_crust_mantle,muhstore_crust_mantle,eta_anisostore_crust_mantle,kappavstore_inner_core,muvstore_inner_core, &
   rmass_crust_mantle,rmass_outer_core,rmass_inner_core,rmass_ocean_load, &
 #ifdef USE_MPI
-  npoin2D_max_all,nrec,addressing,ibathy_topo, &
+  npoin2D_max_all_CM_IC,nrec,addressing,ibathy_topo, &
   ibelm_xmin_inner_core,ibelm_xmax_inner_core,ibelm_ymin_inner_core,ibelm_ymax_inner_core,ibelm_bottom_inner_core, &
 iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle, iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
 iboolleft_xi_outer_core,iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core, &
@@ -225,8 +225,8 @@
 ! always three times bigger and therefore scalars can use the first part
 ! of the vector buffer in memory even if it has an additional index here
 ! allocate these automatic arrays in the memory stack to avoid memory fragmentation with "allocate()"
-  integer :: npoin2D_max_all
-  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all,NUMFACES_SHARED) :: buffer_send_faces,buffer_received_faces
+  integer :: npoin2D_max_all_CM_IC
+  real(kind=CUSTOM_REAL), dimension(NDIM,npoin2D_max_all_CM_IC,NUMFACES_SHARED) :: buffer_send_faces,buffer_received_faces
 
 #endif
 
@@ -454,9 +454,8 @@
 #endif
 
 #ifdef USE_MPI
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_outer_core,npoin2D_eta_outer_core
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_inner_core,npoin2D_eta_inner_core
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+      npoin2D_xi_outer_core,npoin2D_eta_outer_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core
 #endif
 
   integer :: ichunk,iproc_xi,iproc_eta
@@ -1240,7 +1239,7 @@
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
@@ -1255,7 +1254,7 @@
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
@@ -1269,7 +1268,7 @@
             iboolfaces_outer_core,iboolcorner_outer_core, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
@@ -1283,7 +1282,7 @@
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_INNER_CORE), &
@@ -1706,18 +1705,6 @@
     is_on_a_slice_edge_inner_core(:) = .true.
   endif
 
-#ifdef USE_MPI
-  if(USE_NONBLOCKING_COMMS) then
-     if(npoin2D_xi_crust_mantle(1) /= npoin2D_eta_crust_mantle(1) .or. &
-        npoin2D_xi_crust_mantle(2) /= npoin2D_eta_crust_mantle(2) .or. &
-        npoin2D_xi_outer_core(1) /= npoin2D_eta_outer_core(1) .or. &
-        npoin2D_xi_outer_core(2) /= npoin2D_eta_outer_core(2) .or. &
-        npoin2D_xi_inner_core(1) /= npoin2D_eta_inner_core(1) .or. &
-        npoin2D_xi_inner_core(2) /= npoin2D_eta_inner_core(2)) &
-       stop 'non-blocking scheme temporarily requires npoin2D_xi = npoin2D_eta because of the size of some buffers reused'
-  endif
-#endif
-
   vx_crust_mantle = size(factor_common_crust_mantle,2)
   vy_crust_mantle = size(factor_common_crust_mantle,3)
   vz_crust_mantle = size(factor_common_crust_mantle,4)
@@ -2102,7 +2089,7 @@
           iboolfaces_outer_core,iboolcorner_outer_core, &
           iprocfrom_faces,iprocto_faces, &
           iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-          buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+          buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
           buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar,iphase, &
 #endif
           hprime_xx,hprime_yy,hprime_zz, &
@@ -2237,7 +2224,7 @@
             iboolfaces_outer_core,iboolcorner_outer_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_xi_outer_core(1), &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
@@ -2260,7 +2247,7 @@
           iboolfaces_outer_core,iboolcorner_outer_core, &
           iprocfrom_faces,iprocto_faces, &
           iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-          buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+          buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
           buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar,iphase, &
           hprime_xx,hprime_yy,hprime_zz, &
           hprimewgll_xx,hprimewgll_yy,hprimewgll_zz, &
@@ -2280,7 +2267,7 @@
             iboolfaces_outer_core,iboolcorner_outer_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_xi_outer_core(1), &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
@@ -2294,7 +2281,7 @@
             iboolfaces_outer_core,iboolcorner_outer_core, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_scalar,buffer_recv_chunkcorners_scalar, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
@@ -2348,7 +2335,7 @@
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector,iphase, &
 !!!!!!!!!!!!!!!!!!!!!!!!!
           nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
@@ -2533,14 +2520,14 @@
     call assemble_MPI_vector(myrank,accel_crust_mantle,accel_inner_core, &
             iproc_xi,iproc_eta,ichunk,addressing, &
             iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1), &
+            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-            npoin2D_faces_inner_core,npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1), &
+            npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
@@ -2548,8 +2535,6 @@
   endif
 #endif
 
-  if(DEBUG_NONBLOCKING_COMMS) accel_crust_mantle = 1.e27
-
 #ifdef USE_MPI
   if(USE_NONBLOCKING_COMMS) then
     icall = 2
@@ -2582,7 +2567,7 @@
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector,iphase, &
 !!!!!!!!!!!!!!!!!!!!!!!!!
           nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
@@ -2597,20 +2582,6 @@
                               ystore_crust_mantle,zstore_crust_mantle,ibool_crust_mantle)
 
 #ifdef USE_MPI
-  if(DEBUG_NONBLOCKING_COMMS) then
-    do ipoin = 1,npoin2D_xi_crust_mantle(1)
-      if(minval(accel_crust_mantle(:,iboolright_xi_crust_mantle(ipoin))) < 1.e27) call exit_mpi(myrank,'error in new step 1')
-      if(minval(accel_crust_mantle(:,iboolleft_xi_crust_mantle(ipoin))) < 1.e27) call exit_mpi(myrank,'error in new step 2')
-      if(minval(accel_crust_mantle(:,iboolright_eta_crust_mantle(ipoin))) < 1.e27) then
-        print *,'myrank, maxval_abs,ibool = ',myrank, &
-          minval(accel_crust_mantle(:,iboolright_eta_crust_mantle(ipoin))),iboolright_eta_crust_mantle(ipoin)
-        call exit_mpi(myrank,'error in new step 3')
-      endif
-      if(minval(accel_crust_mantle(:,iboolleft_eta_crust_mantle(ipoin))) < 1.e27) call exit_mpi(myrank,'error in new step 4')
-    enddo
-    call exit_mpi(myrank,'everything went well in DEBUG_NONBLOCKING_COMMS, no overlap detected')
-  endif
-
 ! assemble all the contributions between slices using MPI
 ! crust/mantle and inner core handled in the same call
 ! in order to reduce the number of MPI messages by 2
@@ -2619,14 +2590,14 @@
       call assemble_MPI_vector(myrank,accel_crust_mantle,accel_inner_core, &
             iproc_xi,iproc_eta,ichunk,addressing, &
             iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1), &
+            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-            npoin2D_faces_inner_core,npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1), &
+            npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
@@ -2636,14 +2607,14 @@
     call assemble_MPI_vector_block(myrank,accel_crust_mantle,accel_inner_core, &
             iproc_xi,iproc_eta,ichunk,addressing, &
             iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle(1),npoin2D_eta_crust_mantle(1), &
+            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
             iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
             iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-            npoin2D_faces_inner_core,npoin2D_xi_inner_core(1),npoin2D_eta_inner_core(1), &
+            npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
             iboolfaces_inner_core,iboolcorner_inner_core, &
             iprocfrom_faces,iprocto_faces,imsg_type, &
             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-            buffer_send_faces,buffer_received_faces,npoin2D_max_all, &
+            buffer_send_faces,buffer_received_faces,npoin2D_max_all_CM_IC, &
             buffer_send_chunkcorners_vector,buffer_recv_chunkcorners_vector, &
             NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
             NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &



More information about the CIG-COMMITS mailing list