[cig-commits] r20542 - in seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER: EXAMPLES/global_s362ani_small/DATA src/cuda src/meshfem3D src/shared src/specfem3D

danielpeter at geodynamics.org danielpeter at geodynamics.org
Tue Jul 24 15:10:29 PDT 2012


Author: danielpeter
Date: 2012-07-24 15:10:28 -0700 (Tue, 24 Jul 2012)
New Revision: 20542

Modified:
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/EXAMPLES/global_s362ani_small/DATA/Par_file
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_scalar_cuda.cu
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_vector_cuda.cu
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_scalar_mesh.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_vector_mesh.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_chunk_buffers.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/fix_non_blocking_flags.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/get_absorb.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D_par.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/save_arrays_solver.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_color_perm.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_inner_outer.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/test_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/shared/exit_mpi.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/Makefile.in
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_scalar.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_vector.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/check_simulation_stability.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_arrays_buffers_solver.f90
Log:
updates mesh ordering for setting up MPI interfaces

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/EXAMPLES/global_s362ani_small/DATA/Par_file
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/EXAMPLES/global_s362ani_small/DATA/Par_file	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/EXAMPLES/global_s362ani_small/DATA/Par_file	2012-07-24 22:10:28 UTC (rev 20542)
@@ -47,7 +47,7 @@
 ABSORBING_CONDITIONS            = .false.
 
 # record length in minutes
-RECORD_LENGTH_IN_MINUTES        = 30.0d0
+RECORD_LENGTH_IN_MINUTES        = 15.0d0
 
 # save AVS or OpenDX movies
 MOVIE_SURFACE                   = .false.

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_scalar_cuda.cu
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_scalar_cuda.cu	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_scalar_cuda.cu	2012-07-24 22:10:28 UTC (rev 20542)
@@ -42,18 +42,18 @@
 // is followed by a memcpy and MPI operations
 __global__ void prepare_boundary_potential_on_device(realw* d_potential_dot_dot_acoustic,
                                                      realw* d_send_potential_dot_dot_buffer,
-                                                     int num_interfaces_ext_mesh,
-                                                     int max_nibool_interfaces_ext_mesh,
-                                                     int* d_nibool_interfaces_ext_mesh,
-                                                     int* d_ibool_interfaces_ext_mesh) {
+                                                     int num_interfaces,
+                                                     int max_nibool_interfaces,
+                                                     int* d_nibool_interfaces,
+                                                     int* d_ibool_interfaces) {
 
   int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
   int iinterface=0;
 
-  for( iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
-    if(id<d_nibool_interfaces_ext_mesh[iinterface]) {
-      d_send_potential_dot_dot_buffer[(id + max_nibool_interfaces_ext_mesh*iinterface)] =
-      d_potential_dot_dot_acoustic[(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)];
+  for( iinterface=0; iinterface < num_interfaces; iinterface++) {
+    if(id<d_nibool_interfaces[iinterface]) {
+      d_send_potential_dot_dot_buffer[(id + max_nibool_interfaces*iinterface)] =
+      d_potential_dot_dot_acoustic[(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)];
     }
   }
 
@@ -120,19 +120,19 @@
 
 __global__ void assemble_boundary_potential_on_device(realw* d_potential_dot_dot_acoustic,
                                                       realw* d_send_potential_dot_dot_buffer,
-                                                      int num_interfaces_ext_mesh,
-                                                      int max_nibool_interfaces_ext_mesh,
-                                                      int* d_nibool_interfaces_ext_mesh,
-                                                      int* d_ibool_interfaces_ext_mesh) {
+                                                      int num_interfaces,
+                                                      int max_nibool_interfaces,
+                                                      int* d_nibool_interfaces,
+                                                      int* d_ibool_interfaces) {
 
   int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
   int iinterface=0;
 
-  for( iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
-    if(id<d_nibool_interfaces_ext_mesh[iinterface]) {
+  for( iinterface=0; iinterface < num_interfaces; iinterface++) {
+    if(id<d_nibool_interfaces[iinterface]) {
 
-      atomicAdd(&d_potential_dot_dot_acoustic[(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)],
-                d_send_potential_dot_dot_buffer[(id + max_nibool_interfaces_ext_mesh*iinterface)]);
+      atomicAdd(&d_potential_dot_dot_acoustic[(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)],
+                d_send_potential_dot_dot_buffer[(id + max_nibool_interfaces*iinterface)]);
     }
   }
 }

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_vector_cuda.cu
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_vector_cuda.cu	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/assemble_MPI_vector_cuda.cu	2012-07-24 22:10:28 UTC (rev 20542)
@@ -41,22 +41,22 @@
 // prepares a device array with all inter-element edge-nodes -- this
 // is followed by a memcpy and MPI operations
 __global__ void prepare_boundary_accel_on_device(realw* d_accel, realw* d_send_accel_buffer,
-                                                 int num_interfaces_ext_mesh,
-                                                 int max_nibool_interfaces_ext_mesh,
-                                                 int* d_nibool_interfaces_ext_mesh,
-                                                 int* d_ibool_interfaces_ext_mesh) {
+                                                 int num_interfaces,
+                                                 int max_nibool_interfaces,
+                                                 int* d_nibool_interfaces,
+                                                 int* d_ibool_interfaces) {
 
   int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
   int iinterface=0;
 
-  for( iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
-    if(id<d_nibool_interfaces_ext_mesh[iinterface]) {
-      d_send_accel_buffer[3*(id + max_nibool_interfaces_ext_mesh*iinterface)] =
-      d_accel[3*(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)];
-      d_send_accel_buffer[3*(id + max_nibool_interfaces_ext_mesh*iinterface)+1] =
-      d_accel[3*(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)+1];
-      d_send_accel_buffer[3*(id + max_nibool_interfaces_ext_mesh*iinterface)+2] =
-      d_accel[3*(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)+2];
+  for( iinterface=0; iinterface < num_interfaces; iinterface++) {
+    if(id<d_nibool_interfaces[iinterface]) {
+      d_send_accel_buffer[3*(id + max_nibool_interfaces*iinterface)] =
+      d_accel[3*(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)];
+      d_send_accel_buffer[3*(id + max_nibool_interfaces*iinterface)+1] =
+      d_accel[3*(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)+1];
+      d_send_accel_buffer[3*(id + max_nibool_interfaces*iinterface)+2] =
+      d_accel[3*(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)+2];
     }
   }
 
@@ -165,22 +165,22 @@
 /* ----------------------------------------------------------------------------------------------- */
 
 __global__ void assemble_boundary_accel_on_device(realw* d_accel, realw* d_send_accel_buffer,
-                                                  int num_interfaces_ext_mesh,
-                                                  int max_nibool_interfaces_ext_mesh,
-                                                  int* d_nibool_interfaces_ext_mesh,
-                                                  int* d_ibool_interfaces_ext_mesh) {
+                                                  int num_interfaces,
+                                                  int max_nibool_interfaces,
+                                                  int* d_nibool_interfaces,
+                                                  int* d_ibool_interfaces) {
 
   int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
   int iinterface=0;
 
-  for( iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
-    if(id < d_nibool_interfaces_ext_mesh[iinterface]) {
-      atomicAdd(&d_accel[3*(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)],
-                d_send_accel_buffer[3*(id + max_nibool_interfaces_ext_mesh*iinterface)]);
-      atomicAdd(&d_accel[3*(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)+1],
-                d_send_accel_buffer[3*(id + max_nibool_interfaces_ext_mesh*iinterface)+1]);
-      atomicAdd(&d_accel[3*(d_ibool_interfaces_ext_mesh[id+max_nibool_interfaces_ext_mesh*iinterface]-1)+2],
-                d_send_accel_buffer[3*(id + max_nibool_interfaces_ext_mesh*iinterface)+2]);
+  for( iinterface=0; iinterface < num_interfaces; iinterface++) {
+    if(id < d_nibool_interfaces[iinterface]) {
+      atomicAdd(&d_accel[3*(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)],
+                d_send_accel_buffer[3*(id + max_nibool_interfaces*iinterface)]);
+      atomicAdd(&d_accel[3*(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)+1],
+                d_send_accel_buffer[3*(id + max_nibool_interfaces*iinterface)+1]);
+      atomicAdd(&d_accel[3*(d_ibool_interfaces[id+max_nibool_interfaces*iinterface]-1)+2],
+                d_send_accel_buffer[3*(id + max_nibool_interfaces*iinterface)+2]);
     }
   }
 }

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in	2012-07-24 22:10:28 UTC (rev 20542)
@@ -173,7 +173,7 @@
 	$O/setup_MPI_interfaces.o \
 	$O/sort_array_coordinates.o \
 	$O/stretching_function.o \
-	$O/test_MPI_interfaces.mpi.o \
+	$O/test_MPI_interfaces.o \
 	$O/write_AVS_DX_global_chunks_data.o \
 	$O/write_AVS_DX_global_data.o \
 	$O/write_AVS_DX_global_faces_data.o \

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_scalar_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_scalar_mesh.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_scalar_mesh.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -441,10 +441,10 @@
 !-------------------------------------------------------------------------------------------------
 !
 
-  subroutine assemble_MPI_scalar_ext_mesh(NPROC,NGLOB_AB,array_val, &
-                        num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                        nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                        my_neighbours_ext_mesh)
+  subroutine assemble_MPI_scalar(NPROC,NGLOB_AB,array_val, &
+                        num_interfaces,max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        my_neighbours)
 
 ! blocking send/receive
 
@@ -458,15 +458,15 @@
   ! array to assemble
   real(kind=CUSTOM_REAL), dimension(NGLOB_AB) :: array_val
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
 
   ! local parameters
-  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar_ext_mesh
-  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_recv_scalar_ext_mesh
-  integer, dimension(:), allocatable :: request_send_scalar_ext_mesh
-  integer, dimension(:), allocatable :: request_recv_scalar_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_recv_scalar
+  integer, dimension(:), allocatable :: request_send_scalar
+  integer, dimension(:), allocatable :: request_recv_scalar
 
 
   integer ipoin,iinterface,ier
@@ -476,64 +476,64 @@
   ! assemble only if more than one partition
   if(NPROC > 1) then
 
-    allocate(buffer_send_scalar_ext_mesh(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array buffer_send_scalar_ext_mesh'
-    allocate(buffer_recv_scalar_ext_mesh(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array buffer_recv_scalar_ext_mesh'
-    allocate(request_send_scalar_ext_mesh(num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array request_send_scalar_ext_mesh'
-    allocate(request_recv_scalar_ext_mesh(num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array request_recv_scalar_ext_mesh'
+    allocate(buffer_send_scalar(max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_send_scalar'
+    allocate(buffer_recv_scalar(max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_recv_scalar'
+    allocate(request_send_scalar(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_send_scalar'
+    allocate(request_recv_scalar(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_recv_scalar'
 
     ! partition border copy into the buffer
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        buffer_send_scalar_ext_mesh(ipoin,iinterface) = array_val(ibool_interfaces_ext_mesh(ipoin,iinterface))
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        buffer_send_scalar(ipoin,iinterface) = array_val(ibool_interfaces(ipoin,iinterface))
       enddo
     enddo
 
     ! send messages
-    do iinterface = 1, num_interfaces_ext_mesh
+    do iinterface = 1, num_interfaces
       ! non-blocking synchronous send request
-      call isend_cr(buffer_send_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call isend_cr(buffer_send_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_send_scalar_ext_mesh(iinterface) &
+           request_send_scalar(iinterface) &
            )
       ! receive request
-      call irecv_cr(buffer_recv_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call irecv_cr(buffer_recv_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_recv_scalar_ext_mesh(iinterface) &
+           request_recv_scalar(iinterface) &
            )
     enddo
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_scalar(iinterface))
     enddo
 
     ! adding contributions of neighbours
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-             array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) + buffer_recv_scalar_ext_mesh(ipoin,iinterface)
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(ibool_interfaces(ipoin,iinterface)) = &
+             array_val(ibool_interfaces(ipoin,iinterface)) + buffer_recv_scalar(ipoin,iinterface)
       enddo
     enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_scalar(iinterface))
     enddo
 
-    deallocate(buffer_send_scalar_ext_mesh)
-    deallocate(buffer_recv_scalar_ext_mesh)
-    deallocate(request_send_scalar_ext_mesh)
-    deallocate(request_recv_scalar_ext_mesh)
+    deallocate(buffer_send_scalar)
+    deallocate(buffer_recv_scalar)
+    deallocate(request_send_scalar)
+    deallocate(request_recv_scalar)
 
   endif
 
-  end subroutine assemble_MPI_scalar_ext_mesh
+  end subroutine assemble_MPI_scalar
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_vector_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_vector_mesh.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/assemble_MPI_vector_mesh.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -26,10 +26,10 @@
 !=====================================================================
 
 
-  subroutine assemble_MPI_vector_ext_mesh(NPROC,NGLOB_AB,array_val, &
-                        num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                        nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                        my_neighbours_ext_mesh)
+  subroutine assemble_MPI_vector(NPROC,NGLOB_AB,array_val, &
+                        num_interfaces,max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        my_neighbours)
 
   implicit none
 
@@ -41,19 +41,19 @@
 ! array to assemble
   real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_AB) :: array_val
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
 
   ! local parameters
 
   ! send/receive temporary buffers
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector_ext_mesh
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_recv_vector_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_recv_vector
 
   ! requests
-  integer, dimension(:), allocatable :: request_send_vector_ext_mesh
-  integer, dimension(:), allocatable :: request_recv_vector_ext_mesh
+  integer, dimension(:), allocatable :: request_send_vector
+  integer, dimension(:), allocatable :: request_recv_vector
 
   integer ipoin,iinterface,ier
 
@@ -63,63 +63,63 @@
   ! assemble only if more than one partition
   if(NPROC > 1) then
 
-    allocate(buffer_send_vector_ext_mesh(NDIM,max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array buffer_send_vector_ext_mesh'
-    allocate(buffer_recv_vector_ext_mesh(NDIM,max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array buffer_recv_vector_ext_mesh'
-    allocate(request_send_vector_ext_mesh(num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array request_send_vector_ext_mesh'
-    allocate(request_recv_vector_ext_mesh(num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array request_recv_vector_ext_mesh'
+    allocate(buffer_send_vector(NDIM,max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_send_vector'
+    allocate(buffer_recv_vector(NDIM,max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_recv_vector'
+    allocate(request_send_vector(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_send_vector'
+    allocate(request_recv_vector(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_recv_vector'
 
     ! partition border copy into the buffer
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        buffer_send_vector_ext_mesh(:,ipoin,iinterface) = &
-          array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface))
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        buffer_send_vector(:,ipoin,iinterface) = &
+          array_val(:,ibool_interfaces(ipoin,iinterface))
       enddo
     enddo
 
     ! send messages
-    do iinterface = 1, num_interfaces_ext_mesh
-      call isend_cr(buffer_send_vector_ext_mesh(1,1,iinterface), &
-           NDIM*nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+    do iinterface = 1, num_interfaces
+      call isend_cr(buffer_send_vector(1,1,iinterface), &
+           NDIM*nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_send_vector_ext_mesh(iinterface) &
+           request_send_vector(iinterface) &
            )
-      call irecv_cr(buffer_recv_vector_ext_mesh(1,1,iinterface), &
-           NDIM*nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call irecv_cr(buffer_recv_vector(1,1,iinterface), &
+           NDIM*nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_recv_vector_ext_mesh(iinterface) &
+           request_recv_vector(iinterface) &
            )
     enddo
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_vector_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_vector(iinterface))
     enddo
 
     ! adding contributions of neighbours
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-             array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) &
-             + buffer_recv_vector_ext_mesh(:,ipoin,iinterface)
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(:,ibool_interfaces(ipoin,iinterface)) = &
+             array_val(:,ibool_interfaces(ipoin,iinterface)) &
+             + buffer_recv_vector(:,ipoin,iinterface)
       enddo
     enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_vector_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_vector(iinterface))
     enddo
 
-    deallocate(buffer_send_vector_ext_mesh)
-    deallocate(buffer_recv_vector_ext_mesh)
-    deallocate(request_send_vector_ext_mesh)
-    deallocate(request_recv_vector_ext_mesh)
+    deallocate(buffer_send_vector)
+    deallocate(buffer_recv_vector)
+    deallocate(request_send_vector)
+    deallocate(request_recv_vector)
 
   endif
 
-  end subroutine assemble_MPI_vector_ext_mesh
+  end subroutine assemble_MPI_vector

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_MPI_interfaces.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_MPI_interfaces.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -25,278 +25,47 @@
 !
 !=====================================================================
 
-module create_MPI_interfaces_par
 
-  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR,NDIM,IMAIN
 
-  implicit none
+  subroutine create_MPI_interfaces(iregion_code)
 
-  ! indirect addressing for each message for faces and corners of the chunks
-  ! a given slice can belong to at most one corner and at most two faces
-  integer :: NGLOB2DMAX_XY
-
-  ! number of faces between chunks
-  integer :: NUMMSGS_FACES
-
-  ! number of corners between chunks
-  integer :: NCORNERSCHUNKS
-
-  ! number of message types
-  integer :: NUM_MSG_TYPES
-
-  !-----------------------------------------------------------------
-  ! assembly
-  !-----------------------------------------------------------------
-
-  ! ---- arrays to assemble between chunks
-  ! communication pattern for faces between chunks
-  integer, dimension(:),allocatable :: iprocfrom_faces,iprocto_faces,imsg_type
-  ! communication pattern for corners between chunks
-  integer, dimension(:),allocatable :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
-
-  ! this for non blocking MPI
-
-  ! buffers for send and receive between faces of the slices and the chunks
-  ! we use the same buffers to assemble scalars and vectors because vectors are
-  ! always three times bigger and therefore scalars can use the first part
-  ! of the vector buffer in memory even if it has an additional index here
-  integer :: npoin2D_max_all_CM_IC
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_faces,buffer_received_faces
-
-  ! buffers for send and receive between corners of the chunks
-  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
-    buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar
-
-  ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
-  real(kind=CUSTOM_REAL), dimension(:,:),allocatable :: &
-     buffer_send_chunkcorn_vector,buffer_recv_chunkcorn_vector
-
-
-  ! collected MPI interfaces
-  !--------------------------------------
-  ! MPI crust/mantle mesh
-  !--------------------------------------
-  integer :: num_interfaces_crust_mantle
-  integer :: max_nibool_interfaces_crust_mantle
-  integer, dimension(:), allocatable :: my_neighbours_crust_mantle,nibool_interfaces_crust_mantle
-  integer, dimension(:,:), allocatable :: ibool_interfaces_crust_mantle
-
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector_crust_mantle,buffer_recv_vector_crust_mantle
-
-  integer, dimension(:), allocatable :: request_send_vector_crust_mantle,request_recv_vector_crust_mantle
-
-  !--------------------------------------
-  ! MPI inner core mesh
-  !--------------------------------------
-  integer :: num_interfaces_inner_core
-  integer :: max_nibool_interfaces_inner_core
-  integer, dimension(:), allocatable :: my_neighbours_inner_core,nibool_interfaces_inner_core
-  integer, dimension(:,:), allocatable :: ibool_interfaces_inner_core
-
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector_inner_core,buffer_recv_vector_inner_core
-
-  integer, dimension(:), allocatable :: request_send_vector_inner_core,request_recv_vector_inner_core
-
-  !--------------------------------------
-  ! MPI outer core mesh
-  !--------------------------------------
-  integer :: num_interfaces_outer_core
-  integer :: max_nibool_interfaces_outer_core
-  integer, dimension(:), allocatable :: my_neighbours_outer_core,nibool_interfaces_outer_core
-  integer, dimension(:,:), allocatable :: ibool_interfaces_outer_core
-
-  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar_outer_core,buffer_recv_scalar_outer_core
-
-  integer, dimension(:), allocatable :: request_send_scalar_outer_core,request_recv_scalar_outer_core
-
-  ! temporary arrays for elements on slices or edges
-  logical, dimension(:),allocatable :: is_on_a_slice_edge_crust_mantle, &
-    is_on_a_slice_edge_inner_core,is_on_a_slice_edge_outer_core
-
-  logical, dimension(:),allocatable :: mask_ibool
-
-  !--------------------------------------
-  ! crust mantle
-  !--------------------------------------
-  integer :: NSPEC_CRUST_MANTLE
-  integer :: NGLOB_CRUST_MANTLE
-
-  integer :: NGLOB1D_RADIAL_CM
-  integer :: NGLOB2DMAX_XMIN_XMAX_CM
-  integer :: NGLOB2DMAX_YMIN_YMAX_CM
-  integer :: NSPEC2DMAX_XMIN_XMAX_CM
-  integer :: NSPEC2DMAX_YMIN_YMAX_CM
-  integer :: NSPEC2D_BOTTOM_CM
-  integer :: NSPEC2D_TOP_CM
-
-  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
-    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
-  integer, dimension(:),allocatable :: idoubling_crust_mantle
-  integer, dimension(:,:,:,:),allocatable :: ibool_crust_mantle
-
-  ! assembly
-  integer :: npoin2D_faces_crust_mantle(NUMFACES_SHARED)
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
-
-  ! indirect addressing for each corner of the chunks
-  integer, dimension(:,:),allocatable :: iboolcorner_crust_mantle
-
-  ! 2-D addressing and buffers for summation between slices
-  integer, dimension(:),allocatable :: iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle
-  integer, dimension(:),allocatable :: iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle
-
-  integer, dimension(:,:),allocatable :: iboolfaces_crust_mantle
-
-  ! inner / outer elements crust/mantle region
-  integer :: num_phase_ispec_crust_mantle
-  integer :: nspec_inner_crust_mantle,nspec_outer_crust_mantle
-  integer, dimension(:,:), allocatable :: phase_ispec_inner_crust_mantle
-
-  ! mesh coloring
-  integer :: num_colors_outer_crust_mantle,num_colors_inner_crust_mantle
-  integer,dimension(:),allocatable :: num_elem_colors_crust_mantle
-
-  !--------------------------------------
-  ! outer core
-  !--------------------------------------
-  integer :: NSPEC_OUTER_CORE
-  integer :: NGLOB_OUTER_CORE
-
-  integer :: NGLOB1D_RADIAL_OC
-  integer :: NGLOB2DMAX_XMIN_XMAX_OC
-  integer :: NGLOB2DMAX_YMIN_YMAX_OC
-  integer :: NSPEC2DMAX_XMIN_XMAX_OC
-  integer :: NSPEC2DMAX_YMIN_YMAX_OC
-  integer :: NSPEC2D_BOTTOM_OC
-  integer :: NSPEC2D_TOP_OC
-
-  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
-    xstore_outer_core,ystore_outer_core,zstore_outer_core
-  integer, dimension(:),allocatable :: idoubling_outer_core
-  integer, dimension(:,:,:,:),allocatable :: ibool_outer_core
-
-  ! assembly
-  integer :: npoin2D_faces_outer_core(NUMFACES_SHARED)
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_outer_core,npoin2D_eta_outer_core
-
-  ! indirect addressing for each corner of the chunks
-  integer, dimension(:,:),allocatable :: iboolcorner_outer_core
-
-  ! 2-D addressing and buffers for summation between slices
-  integer, dimension(:),allocatable :: iboolleft_xi_outer_core,iboolright_xi_outer_core
-  integer, dimension(:),allocatable :: iboolleft_eta_outer_core,iboolright_eta_outer_core
-
-  integer, dimension(:,:),allocatable :: iboolfaces_outer_core
-
-  ! inner / outer elements outer core region
-  integer :: num_phase_ispec_outer_core
-  integer :: nspec_inner_outer_core,nspec_outer_outer_core
-  integer, dimension(:,:), allocatable :: phase_ispec_inner_outer_core
-
-  ! mesh coloring
-  integer :: num_colors_outer_outer_core,num_colors_inner_outer_core
-  integer,dimension(:),allocatable :: num_elem_colors_outer_core
-
-
-  !--------------------------------------
-  ! inner core
-  !--------------------------------------
-  integer :: NSPEC_INNER_CORE
-  integer :: NGLOB_INNER_CORE
-
-  integer :: NGLOB1D_RADIAL_IC
-  integer :: NGLOB2DMAX_XMIN_XMAX_IC
-  integer :: NGLOB2DMAX_YMIN_YMAX_IC
-  integer :: NSPEC2DMAX_XMIN_XMAX_IC
-  integer :: NSPEC2DMAX_YMIN_YMAX_IC
-  integer :: NSPEC2D_BOTTOM_IC
-  integer :: NSPEC2D_TOP_IC
-
-  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
-    xstore_inner_core,ystore_inner_core,zstore_inner_core
-  integer, dimension(:),allocatable :: idoubling_inner_core
-  integer, dimension(:,:,:,:),allocatable :: ibool_inner_core
-
-
-  ! for matching with central cube in inner core
-  integer, dimension(:), allocatable :: sender_from_slices_to_cube
-  integer, dimension(:,:), allocatable :: ibool_central_cube
-  double precision, dimension(:,:), allocatable :: buffer_slices,buffer_slices2
-  double precision, dimension(:,:,:), allocatable :: buffer_all_cube_from_slices
-  integer nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices,receiver_cube_from_slices
-
-  integer :: nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
-            nspec2D_ymin_inner_core,nspec2D_ymax_inner_core
-
-  integer, dimension(:),allocatable :: ibelm_xmin_inner_core,ibelm_xmax_inner_core
-  integer, dimension(:),allocatable :: ibelm_ymin_inner_core,ibelm_ymax_inner_core
-  integer, dimension(:),allocatable :: ibelm_bottom_inner_core
-  integer, dimension(:),allocatable :: ibelm_top_inner_core
-
-  integer :: npoin2D_faces_inner_core(NUMFACES_SHARED)
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_inner_core,npoin2D_eta_inner_core
-
-  ! indirect addressing for each corner of the chunks
-  integer, dimension(:,:),allocatable :: iboolcorner_inner_core
-
-  ! 2-D addressing and buffers for summation between slices
-  integer, dimension(:),allocatable :: iboolleft_xi_inner_core,iboolright_xi_inner_core
-  integer, dimension(:),allocatable :: iboolleft_eta_inner_core,iboolright_eta_inner_core
-
-  integer, dimension(:,:),allocatable :: iboolfaces_inner_core
-
-  ! inner / outer elements inner core region
-  integer :: num_phase_ispec_inner_core
-  integer :: nspec_inner_inner_core,nspec_outer_inner_core
-  integer, dimension(:,:), allocatable :: phase_ispec_inner_inner_core
-
-  ! mesh coloring
-  integer :: num_colors_outer_inner_core,num_colors_inner_inner_core
-  integer,dimension(:),allocatable :: num_elem_colors_inner_core
-
-end module create_MPI_interfaces_par
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine create_MPI_interfaces()
-
   implicit none
-
+  integer,intent(in):: iregion_code
+  
   ! sets up arrays
-  call cmi_read_addressing()
+  call cmi_allocate_addressing(iregion_code)
 
+  ! reads in arrays
+  call cmi_read_addressing(iregion_code)
+
   ! reads "iboolleft_..txt", "iboolright_..txt" (and "list_messages_..txt", "buffer_...txt") files and sets up MPI buffers
-  call cmi_read_buffers()
+  call cmi_read_buffers(iregion_code)
 
   ! sets up MPI interfaces
-  call setup_MPI_interfaces()
+  call setup_MPI_interfaces(iregion_code)
 
-  ! sets up inner/outer element arrays
-  call setup_Inner_Outer()
-
-  ! sets up mesh coloring
-  call setup_color_perm()
-
-  ! saves interface infos
-  call cmi_save_interfaces()
-
-  ! frees memory
-  call cmi_free_arrays()
-
   end subroutine create_MPI_interfaces
 
 !
 !-------------------------------------------------------------------------------------------------
 !
 
-  subroutine cmi_read_addressing()
+  subroutine cmi_allocate_addressing(iregion_code)
 
-  use meshfem3D_par
+  use meshfem3D_par,only: &
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+    NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+    NSPEC2D_BOTTOM,NSPEC2D_TOP,NSPEC,NGLOB, &
+    NCHUNKS,myrank,NGLOB1D_RADIAL,NUMCORNERS_SHARED,NPROC_XI,NGLLX,NGLLY,NGLLZ,LOCAL_PATH
+    
   use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
   implicit none
 
+  integer,intent(in):: iregion_code
+  
   ! local parameters
   integer :: NUM_FACES,NPROC_ONE_DIRECTION
   integer :: ier
@@ -332,172 +101,229 @@
   ! total number of messages corresponding to these common faces
   NUMMSGS_FACES = NPROC_ONE_DIRECTION*NUM_FACES*NUM_MSG_TYPES
 
-  allocate(iprocfrom_faces(NUMMSGS_FACES), &
-          iprocto_faces(NUMMSGS_FACES), &
-          imsg_type(NUMMSGS_FACES),stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating iproc faces arrays')
-
-  ! communication pattern for corners between chunks
-  allocate(iproc_master_corners(NCORNERSCHUNKS), &
-          iproc_worker1_corners(NCORNERSCHUNKS), &
-          iproc_worker2_corners(NCORNERSCHUNKS),stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating iproc corner arrays')
-
   ! parameters from header file
   NGLOB1D_RADIAL_CM = NGLOB1D_RADIAL(IREGION_CRUST_MANTLE)
   NGLOB1D_RADIAL_OC = NGLOB1D_RADIAL(IREGION_OUTER_CORE)
   NGLOB1D_RADIAL_IC = NGLOB1D_RADIAL(IREGION_INNER_CORE)
 
-  NGLOB2DMAX_XMIN_XMAX_CM = NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE)
-  NGLOB2DMAX_XMIN_XMAX_OC = NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE)
-  NGLOB2DMAX_XMIN_XMAX_IC = NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE)
+  NSPEC_CRUST_MANTLE = 0
+  NGLOB_CRUST_MANTLE = 0
+  
+  NSPEC_OUTER_CORE = 0
+  NGLOB_OUTER_CORE = 0
 
-  NGLOB2DMAX_YMIN_YMAX_CM = NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE)
-  NGLOB2DMAX_YMIN_YMAX_OC = NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE)
-  NGLOB2DMAX_YMIN_YMAX_IC = NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE)
+  NSPEC_INNER_CORE = 0
+  NGLOB_INNER_CORE = 0
+  
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    NGLOB2DMAX_XMIN_XMAX_CM = NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE)
+    NGLOB2DMAX_YMIN_YMAX_CM = NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE)
 
-  NSPEC2DMAX_XMIN_XMAX_CM = NSPEC2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE)
-  NSPEC2DMAX_YMIN_YMAX_CM = NSPEC2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE)
-  NSPEC2D_BOTTOM_CM = NSPEC2D_BOTTOM(IREGION_CRUST_MANTLE)
-  NSPEC2D_TOP_CM = NSPEC2D_TOP(IREGION_CRUST_MANTLE)
+    NSPEC2DMAX_XMIN_XMAX_CM = NSPEC2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE)
+    NSPEC2DMAX_YMIN_YMAX_CM = NSPEC2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE)
+    NSPEC2D_BOTTOM_CM = NSPEC2D_BOTTOM(IREGION_CRUST_MANTLE)
+    NSPEC2D_TOP_CM = NSPEC2D_TOP(IREGION_CRUST_MANTLE)
 
-  NSPEC2DMAX_XMIN_XMAX_IC = NSPEC2DMAX_XMIN_XMAX(IREGION_INNER_CORE)
-  NSPEC2DMAX_YMIN_YMAX_IC = NSPEC2DMAX_YMIN_YMAX(IREGION_INNER_CORE)
-  NSPEC2D_BOTTOM_IC = NSPEC2D_BOTTOM(IREGION_INNER_CORE)
-  NSPEC2D_TOP_IC = NSPEC2D_TOP(IREGION_INNER_CORE)
+    NSPEC_CRUST_MANTLE = NSPEC(IREGION_CRUST_MANTLE)
+    NGLOB_CRUST_MANTLE = NGLOB(IREGION_CRUST_MANTLE)
 
-  NSPEC2DMAX_XMIN_XMAX_OC = NSPEC2DMAX_XMIN_XMAX(IREGION_OUTER_CORE)
-  NSPEC2DMAX_YMIN_YMAX_OC = NSPEC2DMAX_YMIN_YMAX(IREGION_OUTER_CORE)
-  NSPEC2D_BOTTOM_OC = NSPEC2D_BOTTOM(IREGION_OUTER_CORE)
-  NSPEC2D_TOP_OC = NSPEC2D_TOP(IREGION_OUTER_CORE)
+  case( IREGION_OUTER_CORE )
+    NGLOB2DMAX_XMIN_XMAX_OC = NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE)
+    NGLOB2DMAX_YMIN_YMAX_OC = NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE)
 
-  NSPEC_CRUST_MANTLE = NSPEC(IREGION_CRUST_MANTLE)
-  NSPEC_INNER_CORE = NSPEC(IREGION_INNER_CORE)
-  NSPEC_OUTER_CORE = NSPEC(IREGION_OUTER_CORE)
+    NSPEC2DMAX_XMIN_XMAX_OC = NSPEC2DMAX_XMIN_XMAX(IREGION_OUTER_CORE)
+    NSPEC2DMAX_YMIN_YMAX_OC = NSPEC2DMAX_YMIN_YMAX(IREGION_OUTER_CORE)
+    NSPEC2D_BOTTOM_OC = NSPEC2D_BOTTOM(IREGION_OUTER_CORE)
+    NSPEC2D_TOP_OC = NSPEC2D_TOP(IREGION_OUTER_CORE)
 
-  NGLOB_CRUST_MANTLE = NGLOB(IREGION_CRUST_MANTLE)
-  NGLOB_INNER_CORE = NGLOB(IREGION_INNER_CORE)
-  NGLOB_OUTER_CORE = NGLOB(IREGION_OUTER_CORE)
+    NSPEC_OUTER_CORE = NSPEC(IREGION_OUTER_CORE)
+    NGLOB_OUTER_CORE = NGLOB(IREGION_OUTER_CORE)
 
+  case( IREGION_INNER_CORE )
+    NGLOB2DMAX_XMIN_XMAX_IC = NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE)
+    NGLOB2DMAX_YMIN_YMAX_IC = NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE)
+
+    NSPEC2DMAX_XMIN_XMAX_IC = NSPEC2DMAX_XMIN_XMAX(IREGION_INNER_CORE)
+    NSPEC2DMAX_YMIN_YMAX_IC = NSPEC2DMAX_YMIN_YMAX(IREGION_INNER_CORE)
+    NSPEC2D_BOTTOM_IC = NSPEC2D_BOTTOM(IREGION_INNER_CORE)
+    NSPEC2D_TOP_IC = NSPEC2D_TOP(IREGION_INNER_CORE)
+
+    NSPEC_INNER_CORE = NSPEC(IREGION_INNER_CORE)
+    NGLOB_INNER_CORE = NGLOB(IREGION_INNER_CORE)
+
+  case default 
+    stop 'error iregion_code value not recognized'
+  end select
+  
   ! allocates arrays
+  allocate(iprocfrom_faces(NUMMSGS_FACES), &
+          iprocto_faces(NUMMSGS_FACES), &
+          imsg_type(NUMMSGS_FACES),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating iproc faces arrays')
 
+  ! communication pattern for corners between chunks
+  allocate(iproc_master_corners(NCORNERSCHUNKS), &
+          iproc_worker1_corners(NCORNERSCHUNKS), &
+          iproc_worker2_corners(NCORNERSCHUNKS),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating iproc corner arrays')
+  
   allocate(buffer_send_chunkcorn_scalar(NGLOB1D_RADIAL_CM), &
           buffer_recv_chunkcorn_scalar(NGLOB1D_RADIAL_CM))
 
   allocate(buffer_send_chunkcorn_vector(NDIM,NGLOB1D_RADIAL_CM + NGLOB1D_RADIAL_IC), &
           buffer_recv_chunkcorn_vector(NDIM,NGLOB1D_RADIAL_CM + NGLOB1D_RADIAL_IC))
 
-  ! crust mantle
-  allocate(iboolcorner_crust_mantle(NGLOB1D_RADIAL_CM,NUMCORNERS_SHARED))
-  allocate(iboolleft_xi_crust_mantle(NGLOB2DMAX_XMIN_XMAX_CM), &
-          iboolright_xi_crust_mantle(NGLOB2DMAX_XMIN_XMAX_CM))
-  allocate(iboolleft_eta_crust_mantle(NGLOB2DMAX_YMIN_YMAX_CM), &
-          iboolright_eta_crust_mantle(NGLOB2DMAX_YMIN_YMAX_CM))
-  allocate(iboolfaces_crust_mantle(NGLOB2DMAX_XY,NUMFACES_SHARED))
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )  
+    ! crust mantle
+    allocate(iboolcorner_crust_mantle(NGLOB1D_RADIAL_CM,NUMCORNERS_SHARED))
+    allocate(iboolleft_xi_crust_mantle(NGLOB2DMAX_XMIN_XMAX_CM), &
+            iboolright_xi_crust_mantle(NGLOB2DMAX_XMIN_XMAX_CM))
+    allocate(iboolleft_eta_crust_mantle(NGLOB2DMAX_YMIN_YMAX_CM), &
+            iboolright_eta_crust_mantle(NGLOB2DMAX_YMIN_YMAX_CM))
+    allocate(iboolfaces_crust_mantle(NGLOB2DMAX_XY,NUMFACES_SHARED))
 
-  ! outer core
-  allocate(iboolcorner_outer_core(NGLOB1D_RADIAL_OC,NUMCORNERS_SHARED))
-  allocate(iboolleft_xi_outer_core(NGLOB2DMAX_XMIN_XMAX_OC), &
-          iboolright_xi_outer_core(NGLOB2DMAX_XMIN_XMAX_OC))
-  allocate(iboolleft_eta_outer_core(NGLOB2DMAX_YMIN_YMAX_OC), &
-          iboolright_eta_outer_core(NGLOB2DMAX_YMIN_YMAX_OC))
-  allocate(iboolfaces_outer_core(NGLOB2DMAX_XY,NUMFACES_SHARED))
+    ! crust mantle mesh
+    allocate(xstore_crust_mantle(NGLOB_CRUST_MANTLE), &
+            ystore_crust_mantle(NGLOB_CRUST_MANTLE), &
+            zstore_crust_mantle(NGLOB_CRUST_MANTLE))
+    allocate(idoubling_crust_mantle(NSPEC_CRUST_MANTLE))
+    allocate(ibool_crust_mantle(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE), &
+             stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary crust mantle arrays')
 
-  ! inner core
-  allocate(ibelm_xmin_inner_core(NSPEC2DMAX_XMIN_XMAX_IC), &
-          ibelm_xmax_inner_core(NSPEC2DMAX_XMIN_XMAX_IC))
-  allocate(ibelm_ymin_inner_core(NSPEC2DMAX_YMIN_YMAX_IC), &
-          ibelm_ymax_inner_core(NSPEC2DMAX_YMIN_YMAX_IC))
-  allocate(ibelm_bottom_inner_core(NSPEC2D_BOTTOM_IC))
-  allocate(ibelm_top_inner_core(NSPEC2D_TOP_IC))
+    ! allocates temporary arrays
+    allocate( is_on_a_slice_edge_crust_mantle(NSPEC_CRUST_MANTLE), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary is_on_a_slice_edge arrays')
 
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    allocate(iboolcorner_outer_core(NGLOB1D_RADIAL_OC,NUMCORNERS_SHARED))
+    allocate(iboolleft_xi_outer_core(NGLOB2DMAX_XMIN_XMAX_OC), &
+            iboolright_xi_outer_core(NGLOB2DMAX_XMIN_XMAX_OC))
+    allocate(iboolleft_eta_outer_core(NGLOB2DMAX_YMIN_YMAX_OC), &
+            iboolright_eta_outer_core(NGLOB2DMAX_YMIN_YMAX_OC))
+    allocate(iboolfaces_outer_core(NGLOB2DMAX_XY,NUMFACES_SHARED))
 
-  allocate(iboolcorner_inner_core(NGLOB1D_RADIAL_IC,NUMCORNERS_SHARED))
-  allocate(iboolleft_xi_inner_core(NGLOB2DMAX_XMIN_XMAX_IC), &
-          iboolright_xi_inner_core(NGLOB2DMAX_XMIN_XMAX_IC))
-  allocate(iboolleft_eta_inner_core(NGLOB2DMAX_YMIN_YMAX_IC), &
-          iboolright_eta_inner_core(NGLOB2DMAX_YMIN_YMAX_IC))
-  allocate(iboolfaces_inner_core(NGLOB2DMAX_XY,NUMFACES_SHARED))
+    ! outer core mesh
+    allocate(xstore_outer_core(NGLOB_OUTER_CORE), &
+            ystore_outer_core(NGLOB_OUTER_CORE), &
+            zstore_outer_core(NGLOB_OUTER_CORE))
+    allocate(idoubling_outer_core(NSPEC_OUTER_CORE))
+    allocate(ibool_outer_core(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE), &
+             stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary outer core arrays')
 
+    ! allocates temporary arrays
+    allocate( is_on_a_slice_edge_outer_core(NSPEC_OUTER_CORE), &
+             stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary is_on_a_slice_edge arrays')
 
-  ! crust mantle
-  allocate(xstore_crust_mantle(NGLOB_CRUST_MANTLE), &
-          ystore_crust_mantle(NGLOB_CRUST_MANTLE), &
-          zstore_crust_mantle(NGLOB_CRUST_MANTLE))
-  allocate(idoubling_crust_mantle(NSPEC_CRUST_MANTLE))
-  allocate(ibool_crust_mantle(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE), &
-           stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary crust mantle arrays')
+  case( IREGION_INNER_CORE )
+    ! inner core
+    allocate(ibelm_xmin_inner_core(NSPEC2DMAX_XMIN_XMAX_IC), &
+            ibelm_xmax_inner_core(NSPEC2DMAX_XMIN_XMAX_IC))
+    allocate(ibelm_ymin_inner_core(NSPEC2DMAX_YMIN_YMAX_IC), &
+            ibelm_ymax_inner_core(NSPEC2DMAX_YMIN_YMAX_IC))
+    allocate(ibelm_bottom_inner_core(NSPEC2D_BOTTOM_IC))
+    allocate(ibelm_top_inner_core(NSPEC2D_TOP_IC))
 
-  ! outer core
-  allocate(xstore_outer_core(NGLOB_OUTER_CORE), &
-          ystore_outer_core(NGLOB_OUTER_CORE), &
-          zstore_outer_core(NGLOB_OUTER_CORE))
-  allocate(idoubling_outer_core(NSPEC_OUTER_CORE))
-  allocate(ibool_outer_core(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE), &
-           stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary outer core arrays')
 
-  ! inner core
-  allocate(xstore_inner_core(NGLOB_INNER_CORE), &
-          ystore_inner_core(NGLOB_INNER_CORE), &
-          zstore_inner_core(NGLOB_INNER_CORE))
-  allocate(idoubling_inner_core(NSPEC_INNER_CORE))
-  allocate(ibool_inner_core(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE), &
-           stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary inner core arrays')
+    allocate(iboolcorner_inner_core(NGLOB1D_RADIAL_IC,NUMCORNERS_SHARED))
+    allocate(iboolleft_xi_inner_core(NGLOB2DMAX_XMIN_XMAX_IC), &
+            iboolright_xi_inner_core(NGLOB2DMAX_XMIN_XMAX_IC))
+    allocate(iboolleft_eta_inner_core(NGLOB2DMAX_YMIN_YMAX_IC), &
+            iboolright_eta_inner_core(NGLOB2DMAX_YMIN_YMAX_IC))
+    allocate(iboolfaces_inner_core(NGLOB2DMAX_XY,NUMFACES_SHARED))
 
-  ! allocates temporary arrays
-  allocate(mask_ibool(NGLOB_CRUST_MANTLE))
-  allocate( is_on_a_slice_edge_crust_mantle(NSPEC_CRUST_MANTLE), &
-           is_on_a_slice_edge_inner_core(NSPEC_INNER_CORE), &
-           is_on_a_slice_edge_outer_core(NSPEC_OUTER_CORE), &
-           stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary is_on_a_slice_edge arrays')
+    ! inner core mesh
+    allocate(xstore_inner_core(NGLOB_INNER_CORE), &
+            ystore_inner_core(NGLOB_INNER_CORE), &
+            zstore_inner_core(NGLOB_INNER_CORE))
+    allocate(idoubling_inner_core(NSPEC_INNER_CORE))
+    allocate(ibool_inner_core(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE), &
+             stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary inner core arrays')
 
+    ! allocates temporary arrays
+    allocate(is_on_a_slice_edge_inner_core(NSPEC_INNER_CORE), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary is_on_a_slice_edge arrays')
 
+  end select
+
+  ! synchronize processes
+  call sync_all()
+
+  end subroutine cmi_allocate_addressing
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine cmi_read_addressing(iregion_code)
+
+  use meshfem3D_par,only: &
+    myrank,LOCAL_PATH
+    
+  use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+  implicit none
+
+  integer,intent(in):: iregion_code
+  
   ! read coordinates of the mesh
-  ! crust mantle
-  ibool_crust_mantle(:,:,:,:) = -1
-  call cmi_read_solver_data(myrank,IREGION_CRUST_MANTLE, &
-                           NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
-                           xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle,&
-                           ibool_crust_mantle,idoubling_crust_mantle, &
-                           is_on_a_slice_edge_crust_mantle, &
-                           LOCAL_PATH)
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )  
+    ! crust mantle
+    ibool_crust_mantle(:,:,:,:) = -1
+    call cmi_read_solver_data(myrank,IREGION_CRUST_MANTLE, &
+                             NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
+                             xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle,&
+                             ibool_crust_mantle,idoubling_crust_mantle, &
+                             is_on_a_slice_edge_crust_mantle, &
+                             LOCAL_PATH)
 
-  ! check that the number of points in this slice is correct
-  if(minval(ibool_crust_mantle(:,:,:,:)) /= 1 .or. &
-    maxval(ibool_crust_mantle(:,:,:,:)) /= NGLOB_CRUST_MANTLE) &
-      call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in crust and mantle')
+    ! check that the number of points in this slice is correct
+    if(minval(ibool_crust_mantle(:,:,:,:)) /= 1 .or. &
+      maxval(ibool_crust_mantle(:,:,:,:)) /= NGLOB_CRUST_MANTLE) &
+        call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in crust and mantle')
 
-  ! outer core
-  ibool_outer_core(:,:,:,:) = -1
-  call cmi_read_solver_data(myrank,IREGION_OUTER_CORE, &
-                           NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
-                           xstore_outer_core,ystore_outer_core,zstore_outer_core,&
-                           ibool_outer_core,idoubling_outer_core, &
-                           is_on_a_slice_edge_outer_core, &
-                           LOCAL_PATH)
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    ibool_outer_core(:,:,:,:) = -1
+    call cmi_read_solver_data(myrank,IREGION_OUTER_CORE, &
+                             NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
+                             xstore_outer_core,ystore_outer_core,zstore_outer_core,&
+                             ibool_outer_core,idoubling_outer_core, &
+                             is_on_a_slice_edge_outer_core, &
+                             LOCAL_PATH)
 
-  ! check that the number of points in this slice is correct
-  if(minval(ibool_outer_core(:,:,:,:)) /= 1 .or. &
-     maxval(ibool_outer_core(:,:,:,:)) /= NGLOB_OUTER_CORE) &
-    call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in outer core')
+    ! check that the number of points in this slice is correct
+    if(minval(ibool_outer_core(:,:,:,:)) /= 1 .or. &
+       maxval(ibool_outer_core(:,:,:,:)) /= NGLOB_OUTER_CORE) &
+      call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in outer core')
 
-  ! inner core
-  ibool_inner_core(:,:,:,:) = -1
-  call cmi_read_solver_data(myrank,IREGION_INNER_CORE, &
-                           NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
-                           xstore_inner_core,ystore_inner_core,zstore_inner_core,&
-                           ibool_inner_core,idoubling_inner_core, &
-                           is_on_a_slice_edge_inner_core, &
-                           LOCAL_PATH)
+  case( IREGION_INNER_CORE )
+    ! inner core
+    ibool_inner_core(:,:,:,:) = -1
+    call cmi_read_solver_data(myrank,IREGION_INNER_CORE, &
+                             NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                             xstore_inner_core,ystore_inner_core,zstore_inner_core,&
+                             ibool_inner_core,idoubling_inner_core, &
+                             is_on_a_slice_edge_inner_core, &
+                             LOCAL_PATH)
 
-  ! check that the number of points in this slice is correct
-  if(minval(ibool_inner_core(:,:,:,:)) /= 1 .or. maxval(ibool_inner_core(:,:,:,:)) /= NGLOB_INNER_CORE) &
-    call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in inner core')
+    ! check that the number of points in this slice is correct
+    if(minval(ibool_inner_core(:,:,:,:)) /= 1 .or. maxval(ibool_inner_core(:,:,:,:)) /= NGLOB_INNER_CORE) &
+      call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in inner core')
 
+  end select
+
   ! synchronize processes
   call sync_all()
 
@@ -507,12 +333,22 @@
 !-------------------------------------------------------------------------------------------------
 !
 
-  subroutine cmi_read_buffers()
+  subroutine cmi_read_buffers(iregion_code)
 
-  use meshfem3D_par
+  use meshfem3D_par,only: myrank,&
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB1D_RADIAL,NSPEC2D_BOTTOM, &
+    NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+    NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS,OUTPUT_FILES,IIN,INCLUDE_CENTRAL_CUBE, &
+    iproc_xi,iproc_eta,ichunk,addressing
+    
   use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par  
   implicit none
 
+  integer,intent(in):: iregion_code
+  
   ! local parameters
   integer :: ier
   integer njunk1,njunk2
@@ -523,203 +359,245 @@
 
   ! read 2-D addressing for summation between slices with MPI
 
-  ! mantle and crust
-  if(myrank == 0) then
-    write(IMAIN,*)
-    write(IMAIN,*) 'crust/mantle region:'
-  endif
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! mantle and crust
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'crust/mantle region:'
+    endif
+    ! initializes
+    npoin2D_xi_crust_mantle(:) = 0
+    npoin2D_eta_crust_mantle(:) = 0
+    
+    call read_arrays_buffers_mesher(IREGION_CRUST_MANTLE,myrank,iboolleft_xi_crust_mantle, &
+               iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
+               npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+               iprocfrom_faces,iprocto_faces,imsg_type, &
+               iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+               iboolfaces_crust_mantle,npoin2D_faces_crust_mantle, &
+               iboolcorner_crust_mantle, &
+               NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE), &
+               NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
+               NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
 
-  call read_arrays_buffers_mesher(IREGION_CRUST_MANTLE,myrank,iboolleft_xi_crust_mantle, &
-             iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-             npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
-             iprocfrom_faces,iprocto_faces,imsg_type, &
-             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-             iboolfaces_crust_mantle,npoin2D_faces_crust_mantle, &
-             iboolcorner_crust_mantle, &
-             NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE), &
-             NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
-             NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
+    ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
+    !          assign flags for each element which is on a rim of the slice
+    !          thus, they include elements on top and bottom not shared with other MPI partitions
+    !
+    !          we will re-set these flags when setting up inner/outer elements, but will
+    !          use these arrays for now as initial guess for the search for elements which share a global point
+    !          between different MPI processes
+    call fix_non_blocking_slices(is_on_a_slice_edge_crust_mantle,iboolright_xi_crust_mantle, &
+           iboolleft_xi_crust_mantle,iboolright_eta_crust_mantle,iboolleft_eta_crust_mantle, &
+           npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle,ibool_crust_mantle, &
+           NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE,NGLOB2DMAX_XMIN_XMAX_CM,NGLOB2DMAX_YMIN_YMAX_CM)
 
-  ! outer core
-  if(myrank == 0) write(IMAIN,*) 'outer core region:'
+    ! debug: saves element flags
+    if( DEBUG_FLAGS ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_crust_mantle_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
+                                xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
+                                ibool_crust_mantle, &
+                                is_on_a_slice_edge_crust_mantle,filename)
+    endif
 
-  call read_arrays_buffers_mesher(IREGION_OUTER_CORE,myrank, &
-             iboolleft_xi_outer_core,iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core, &
-             npoin2D_xi_outer_core,npoin2D_eta_outer_core, &
-             iprocfrom_faces,iprocto_faces,imsg_type, &
-             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-             iboolfaces_outer_core,npoin2D_faces_outer_core, &
-             iboolcorner_outer_core, &
-             NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE), &
-             NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
-             NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
+    ! added this to reduce the size of the buffers
+    ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+    !npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)), &
+    !                            maxval(npoin2D_eta_crust_mantle(:) + npoin2D_eta_inner_core(:)))
+    npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_crust_mantle(:)), &
+                                maxval(npoin2D_eta_crust_mantle(:)))
 
-  ! inner core
-  if(myrank == 0) write(IMAIN,*) 'inner core region:'
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'outer core region:'
+    endif    
+    npoin2D_xi_outer_core(:) = 0
+    npoin2D_eta_outer_core(:) = 0
+    
+    call read_arrays_buffers_mesher(IREGION_OUTER_CORE,myrank, &
+               iboolleft_xi_outer_core,iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core, &
+               npoin2D_xi_outer_core,npoin2D_eta_outer_core, &
+               iprocfrom_faces,iprocto_faces,imsg_type, &
+               iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+               iboolfaces_outer_core,npoin2D_faces_outer_core, &
+               iboolcorner_outer_core, &
+               NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE), &
+               NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
+               NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
 
-  call read_arrays_buffers_mesher(IREGION_INNER_CORE,myrank, &
-             iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-             npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
-             iprocfrom_faces,iprocto_faces,imsg_type, &
-             iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-             iboolfaces_inner_core,npoin2D_faces_inner_core, &
-             iboolcorner_inner_core, &
-             NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE), &
-             NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_INNER_CORE), &
-             NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
+    ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
+    !          assign flags for each element which is on a rim of the slice
+    !          thus, they include elements on top and bottom not shared with other MPI partitions
+    !
+    !          we will re-set these flags when setting up inner/outer elements, but will
+    !          use these arrays for now as initial guess for the search for elements which share a global point
+    !          between different MPI processes
+    call fix_non_blocking_slices(is_on_a_slice_edge_outer_core,iboolright_xi_outer_core, &
+           iboolleft_xi_outer_core,iboolright_eta_outer_core,iboolleft_eta_outer_core, &
+           npoin2D_xi_outer_core,npoin2D_eta_outer_core,ibool_outer_core, &
+           NSPEC_OUTER_CORE,NGLOB_OUTER_CORE,NGLOB2DMAX_XMIN_XMAX_OC,NGLOB2DMAX_YMIN_YMAX_OC)
 
-  ! synchronizes processes
-  call sync_all()
+    ! debug: saves element flags
+    if( DEBUG_FLAGS ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_outer_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
+                                xstore_outer_core,ystore_outer_core,zstore_outer_core, &
+                                ibool_outer_core, &
+                                is_on_a_slice_edge_outer_core,filename)
+    endif
 
-  ! read coupling arrays for inner core
-  ! create name of database
-  call create_name_database(prname,myrank,IREGION_INNER_CORE,LOCAL_PATH)
+    ! added this to reduce the size of the buffers
+    ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+    npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_outer_core(:)), &
+                                maxval(npoin2D_eta_outer_core(:)))
 
-  ! read info for vertical edges for central cube matching in inner core
-  open(unit=IIN,file=prname(1:len_trim(prname))//'boundary.bin', &
-        status='old',form='unformatted',action='read',iostat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error opening boundary.bin file')
+  case( IREGION_INNER_CORE )
+    ! inner core
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'inner core region:'
+    endif
+    npoin2D_xi_inner_core(:) = 0
+    npoin2D_eta_inner_core(:) = 0    
+    call read_arrays_buffers_mesher(IREGION_INNER_CORE,myrank, &
+               iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
+               npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
+               iprocfrom_faces,iprocto_faces,imsg_type, &
+               iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+               iboolfaces_inner_core,npoin2D_faces_inner_core, &
+               iboolcorner_inner_core, &
+               NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE), &
+               NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_INNER_CORE), &
+               NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
 
-  read(IIN) nspec2D_xmin_inner_core
-  read(IIN) nspec2D_xmax_inner_core
-  read(IIN) nspec2D_ymin_inner_core
-  read(IIN) nspec2D_ymax_inner_core
-  read(IIN) njunk1
-  read(IIN) njunk2
+    ! read coupling arrays for inner core
+    ! create name of database
+    call create_name_database(prname,myrank,IREGION_INNER_CORE,LOCAL_PATH)
 
-  ! boundary parameters
-  read(IIN) ibelm_xmin_inner_core
-  read(IIN) ibelm_xmax_inner_core
-  read(IIN) ibelm_ymin_inner_core
-  read(IIN) ibelm_ymax_inner_core
-  read(IIN) ibelm_bottom_inner_core
-  read(IIN) ibelm_top_inner_core
-  close(IIN)
+    ! read info for vertical edges for central cube matching in inner core
+    open(unit=IIN,file=prname(1:len_trim(prname))//'boundary.bin', &
+          status='old',form='unformatted',action='read',iostat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error opening boundary.bin file')
 
+    read(IIN) nspec2D_xmin_inner_core
+    read(IIN) nspec2D_xmax_inner_core
+    read(IIN) nspec2D_ymin_inner_core
+    read(IIN) nspec2D_ymax_inner_core
+    read(IIN) njunk1
+    read(IIN) njunk2
 
-  ! added this to reduce the size of the buffers
-  ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
-  npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)), &
-                        maxval(npoin2D_eta_crust_mantle(:) + npoin2D_eta_inner_core(:)))
+    ! boundary parameters
+    read(IIN) ibelm_xmin_inner_core
+    read(IIN) ibelm_xmax_inner_core
+    read(IIN) ibelm_ymin_inner_core
+    read(IIN) ibelm_ymax_inner_core
+    read(IIN) ibelm_bottom_inner_core
+    read(IIN) ibelm_top_inner_core
+    close(IIN)
 
-  allocate(buffer_send_faces(NDIM,npoin2D_max_all_CM_IC,NUMFACES_SHARED), &
-          buffer_received_faces(NDIM,npoin2D_max_all_CM_IC,NUMFACES_SHARED),stat=ier)
-  if( ier /= 0 ) call exit_MPI(myrank,'error allocating mpi buffer')
+    ! central cube buffers
+    if(INCLUDE_CENTRAL_CUBE) then
 
-  ! central cube buffers
-  if(INCLUDE_CENTRAL_CUBE) then
+      if(myrank == 0) then
+        write(IMAIN,*)
+        write(IMAIN,*) 'including central cube'
+      endif
+      call sync_all()
 
-    if(myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) 'including central cube'
-    endif
-    call sync_all()
+      ! compute number of messages to expect in cube as well as their size
+      call comp_central_cube_buffer_size(iproc_xi,iproc_eta,ichunk, &
+                  NPROC_XI,NPROC_ETA,NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+                  nb_msgs_theor_in_cube,npoin2D_cube_from_slices)
 
-    ! compute number of messages to expect in cube as well as their size
-    call comp_central_cube_buffer_size(iproc_xi,iproc_eta,ichunk, &
-                NPROC_XI,NPROC_ETA,NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
-                nb_msgs_theor_in_cube,npoin2D_cube_from_slices)
+      ! this value is used for dynamic memory allocation, therefore make sure it is never zero
+      if(nb_msgs_theor_in_cube > 0) then
+        non_zero_nb_msgs_theor_in_cube = nb_msgs_theor_in_cube
+      else
+        non_zero_nb_msgs_theor_in_cube = 1
+      endif
 
-    ! this value is used for dynamic memory allocation, therefore make sure it is never zero
-    if(nb_msgs_theor_in_cube > 0) then
-      non_zero_nb_msgs_theor_in_cube = nb_msgs_theor_in_cube
-    else
-      non_zero_nb_msgs_theor_in_cube = 1
-    endif
+      ! allocate buffers for cube and slices
+      allocate(sender_from_slices_to_cube(non_zero_nb_msgs_theor_in_cube), &
+              buffer_all_cube_from_slices(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM), &
+              buffer_slices(npoin2D_cube_from_slices,NDIM), &
+              buffer_slices2(npoin2D_cube_from_slices,NDIM), &
+              ibool_central_cube(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices),stat=ier)
+      if( ier /= 0 ) call exit_MPI(myrank,'error allocating cube buffers')
 
-    ! allocate buffers for cube and slices
-    allocate(sender_from_slices_to_cube(non_zero_nb_msgs_theor_in_cube), &
-            buffer_all_cube_from_slices(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM), &
-            buffer_slices(npoin2D_cube_from_slices,NDIM), &
-            buffer_slices2(npoin2D_cube_from_slices,NDIM), &
-            ibool_central_cube(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices),stat=ier)
-    if( ier /= 0 ) call exit_MPI(myrank,'error allocating cube buffers')
+      ! handles the communications with the central cube if it was included in the mesh
+      ! create buffers to assemble with the central cube
+      call create_central_cube_buffers(myrank,iproc_xi,iproc_eta,ichunk, &
+                 NPROC_XI,NPROC_ETA,NCHUNKS, &
+                 NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                 NSPEC2DMAX_XMIN_XMAX(IREGION_INNER_CORE),NSPEC2DMAX_YMIN_YMAX(IREGION_INNER_CORE), &
+                 NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+                 addressing,ibool_inner_core,idoubling_inner_core, &
+                 xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                 nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
+                 nspec2D_ymin_inner_core,nspec2D_ymax_inner_core, &
+                 ibelm_xmin_inner_core,ibelm_xmax_inner_core, &
+                 ibelm_ymin_inner_core,ibelm_ymax_inner_core,ibelm_bottom_inner_core, &
+                 nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices, &
+                 receiver_cube_from_slices,sender_from_slices_to_cube,ibool_central_cube, &
+                 buffer_slices,buffer_slices2,buffer_all_cube_from_slices)
 
-    ! handles the communications with the central cube if it was included in the mesh
-    ! create buffers to assemble with the central cube
-    call create_central_cube_buffers(myrank,iproc_xi,iproc_eta,ichunk, &
-               NPROC_XI,NPROC_ETA,NCHUNKS, &
-               NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
-               NSPEC2DMAX_XMIN_XMAX(IREGION_INNER_CORE),NSPEC2DMAX_YMIN_YMAX(IREGION_INNER_CORE), &
-               NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
-               addressing,ibool_inner_core,idoubling_inner_core, &
-               xstore_inner_core,ystore_inner_core,zstore_inner_core, &
-               nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
-               nspec2D_ymin_inner_core,nspec2D_ymax_inner_core, &
-               ibelm_xmin_inner_core,ibelm_xmax_inner_core, &
-               ibelm_ymin_inner_core,ibelm_ymax_inner_core,ibelm_bottom_inner_core, &
-               nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices, &
-               receiver_cube_from_slices,sender_from_slices_to_cube,ibool_central_cube, &
-               buffer_slices,buffer_slices2,buffer_all_cube_from_slices)
+      if(myrank == 0) write(IMAIN,*) ''
 
-    if(myrank == 0) write(IMAIN,*) ''
+    else
 
-  else
+      ! allocate fictitious buffers for cube and slices with a dummy size
+      ! just to be able to use them as arguments in subroutine calls
+      allocate(sender_from_slices_to_cube(1), &
+              buffer_all_cube_from_slices(1,1,1), &
+              buffer_slices(1,1), &
+              buffer_slices2(1,1), &
+              ibool_central_cube(1,1),stat=ier)
+      if( ier /= 0 ) call exit_MPI(myrank,'error allocating dummy buffers')
 
-    ! allocate fictitious buffers for cube and slices with a dummy size
-    ! just to be able to use them as arguments in subroutine calls
-    allocate(sender_from_slices_to_cube(1), &
-            buffer_all_cube_from_slices(1,1,1), &
-            buffer_slices(1,1), &
-            buffer_slices2(1,1), &
-            ibool_central_cube(1,1),stat=ier)
-    if( ier /= 0 ) call exit_MPI(myrank,'error allocating dummy buffers')
+    endif
 
-  endif
+    ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
+    !          assign flags for each element which is on a rim of the slice
+    !          thus, they include elements on top and bottom not shared with other MPI partitions
+    !
+    !          we will re-set these flags when setting up inner/outer elements, but will
+    !          use these arrays for now as initial guess for the search for elements which share a global point
+    !          between different MPI processes
+    call fix_non_blocking_slices(is_on_a_slice_edge_inner_core,iboolright_xi_inner_core, &
+           iboolleft_xi_inner_core,iboolright_eta_inner_core,iboolleft_eta_inner_core, &
+           npoin2D_xi_inner_core,npoin2D_eta_inner_core,ibool_inner_core, &
+           NSPEC_INNER_CORE,NGLOB_INNER_CORE,NGLOB2DMAX_XMIN_XMAX_IC,NGLOB2DMAX_YMIN_YMAX_IC)
 
-  ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
-  !          assign flags for each element which is on a rim of the slice
-  !          thus, they include elements on top and bottom not shared with other MPI partitions
-  !
-  !          we will re-set these flags when setting up inner/outer elements, but will
-  !          use these arrays for now as initial guess for the search for elements which share a global point
-  !          between different MPI processes
-  call fix_non_blocking_slices(is_on_a_slice_edge_crust_mantle,iboolright_xi_crust_mantle, &
-         iboolleft_xi_crust_mantle,iboolright_eta_crust_mantle,iboolleft_eta_crust_mantle, &
-         npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle,ibool_crust_mantle, &
-         mask_ibool,NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE,NGLOB2DMAX_XMIN_XMAX_CM,NGLOB2DMAX_YMIN_YMAX_CM)
+    if(INCLUDE_CENTRAL_CUBE) then
+      ! updates flags for elements on slice boundaries
+      call fix_non_blocking_central_cube(is_on_a_slice_edge_inner_core, &
+           ibool_inner_core,NSPEC_INNER_CORE,NGLOB_INNER_CORE,nb_msgs_theor_in_cube,ibelm_bottom_inner_core, &
+           idoubling_inner_core,npoin2D_cube_from_slices, &
+           ibool_central_cube,NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+           ichunk,NPROC_XI)
+    endif
 
-  call fix_non_blocking_slices(is_on_a_slice_edge_outer_core,iboolright_xi_outer_core, &
-         iboolleft_xi_outer_core,iboolright_eta_outer_core,iboolleft_eta_outer_core, &
-         npoin2D_xi_outer_core,npoin2D_eta_outer_core,ibool_outer_core, &
-         mask_ibool,NSPEC_OUTER_CORE,NGLOB_OUTER_CORE,NGLOB2DMAX_XMIN_XMAX_OC,NGLOB2DMAX_YMIN_YMAX_OC)
+    ! debug: saves element flags
+    if( DEBUG_FLAGS ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_inner_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                                xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                                ibool_inner_core, &
+                                is_on_a_slice_edge_inner_core,filename)
+    endif
 
-  call fix_non_blocking_slices(is_on_a_slice_edge_inner_core,iboolright_xi_inner_core, &
-         iboolleft_xi_inner_core,iboolright_eta_inner_core,iboolleft_eta_inner_core, &
-         npoin2D_xi_inner_core,npoin2D_eta_inner_core,ibool_inner_core, &
-         mask_ibool,NSPEC_INNER_CORE,NGLOB_INNER_CORE,NGLOB2DMAX_XMIN_XMAX_IC,NGLOB2DMAX_YMIN_YMAX_IC)
+    ! added this to reduce the size of the buffers
+    ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+    npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_inner_core(:)), &
+                                maxval(npoin2D_eta_inner_core(:)))
 
-  if(INCLUDE_CENTRAL_CUBE) then
-    ! updates flags for elements on slice boundaries
-    call fix_non_blocking_central_cube(is_on_a_slice_edge_inner_core, &
-         ibool_inner_core,NSPEC_INNER_CORE,NGLOB_INNER_CORE,nb_msgs_theor_in_cube,ibelm_bottom_inner_core, &
-         idoubling_inner_core,npoin2D_cube_from_slices, &
-         ibool_central_cube,NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
-         ichunk,NPROC_XI)
-  endif
+  end select
+  
 
-  ! debug: saves element flags
-  if( DEBUG_FLAGS ) then
-    ! crust mantle
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_crust_mantle_proc',myrank
-    call write_VTK_data_elem_l(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
-                              xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
-                              ibool_crust_mantle, &
-                              is_on_a_slice_edge_crust_mantle,filename)
-    ! outer core
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_outer_core_proc',myrank
-    call write_VTK_data_elem_l(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
-                              xstore_outer_core,ystore_outer_core,zstore_outer_core, &
-                              ibool_outer_core, &
-                              is_on_a_slice_edge_outer_core,filename)
-    ! inner core
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_inner_core_proc',myrank
-    call write_VTK_data_elem_l(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
-                              xstore_inner_core,ystore_inner_core,zstore_inner_core, &
-                              ibool_inner_core, &
-                              is_on_a_slice_edge_inner_core,filename)
-  endif
-
   end subroutine cmi_read_buffers
 
 !
@@ -727,125 +605,144 @@
 !
 
 
-  subroutine cmi_save_interfaces()
+  subroutine cmi_save_MPI_interfaces(iregion_code)
 
-  use meshfem3D_par
+  use meshfem3D_par,only: &
+    myrank,LOCAL_PATH
+    
   use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+  
   implicit none
 
-  ! crust mantle
-  call cmi_save_solver_data(myrank,IREGION_CRUST_MANTLE,LOCAL_PATH, &
-                           num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
-                           my_neighbours_crust_mantle,nibool_interfaces_crust_mantle, &
-                           ibool_interfaces_crust_mantle, &
-                           nspec_inner_crust_mantle,nspec_outer_crust_mantle, &
-                           num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
-                           num_colors_outer_crust_mantle,num_colors_inner_crust_mantle, &
-                           num_elem_colors_crust_mantle)
+  integer,intent(in):: iregion_code
+  
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    call cmi_save_solver_data(myrank,IREGION_CRUST_MANTLE,LOCAL_PATH, &
+                             num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
+                             my_neighbours_crust_mantle,nibool_interfaces_crust_mantle, &
+                             ibool_interfaces_crust_mantle, &
+                             nspec_inner_crust_mantle,nspec_outer_crust_mantle, &
+                             num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
+                             num_colors_outer_crust_mantle,num_colors_inner_crust_mantle, &
+                             num_elem_colors_crust_mantle)
 
 
-  ! outer core
-  call cmi_save_solver_data(myrank,IREGION_OUTER_CORE,LOCAL_PATH, &
-                           num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
-                           my_neighbours_outer_core,nibool_interfaces_outer_core, &
-                           ibool_interfaces_outer_core, &
-                           nspec_inner_outer_core,nspec_outer_outer_core, &
-                           num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
-                           num_colors_outer_outer_core,num_colors_inner_outer_core, &
-                           num_elem_colors_outer_core)
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    call cmi_save_solver_data(myrank,IREGION_OUTER_CORE,LOCAL_PATH, &
+                             num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
+                             my_neighbours_outer_core,nibool_interfaces_outer_core, &
+                             ibool_interfaces_outer_core, &
+                             nspec_inner_outer_core,nspec_outer_outer_core, &
+                             num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
+                             num_colors_outer_outer_core,num_colors_inner_outer_core, &
+                             num_elem_colors_outer_core)
 
+  case( IREGION_INNER_CORE )
+    ! inner core
+    call cmi_save_solver_data(myrank,IREGION_INNER_CORE,LOCAL_PATH, &
+                             num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
+                             my_neighbours_inner_core,nibool_interfaces_inner_core, &
+                             ibool_interfaces_inner_core, &
+                             nspec_inner_inner_core,nspec_outer_inner_core, &
+                             num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
+                             num_colors_outer_inner_core,num_colors_inner_inner_core, &
+                             num_elem_colors_inner_core)
 
-  ! inner core
-  call cmi_save_solver_data(myrank,IREGION_INNER_CORE,LOCAL_PATH, &
-                           num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
-                           my_neighbours_inner_core,nibool_interfaces_inner_core, &
-                           ibool_interfaces_inner_core, &
-                           nspec_inner_inner_core,nspec_outer_inner_core, &
-                           num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
-                           num_colors_outer_inner_core,num_colors_inner_inner_core, &
-                           num_elem_colors_inner_core)
+  end select
 
+  end subroutine cmi_save_MPI_interfaces
 
-  end subroutine cmi_save_interfaces
 
 
-
 !
 !-------------------------------------------------------------------------------------------------
 !
 
-  subroutine cmi_free_arrays()
+  subroutine cmi_free_MPI_arrays(iregion_code)
 
-  use meshfem3D_par
   use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par  
   implicit none
 
-  ! synchronize processes
-  call sync_all()
-
+  integer,intent(in):: iregion_code
+  
+  ! free memory
   deallocate(iprocfrom_faces,iprocto_faces,imsg_type)
   deallocate(iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners)
   deallocate(buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar)
   deallocate(buffer_send_chunkcorn_vector,buffer_recv_chunkcorn_vector)
 
-  ! crust mantle
-  deallocate(iboolcorner_crust_mantle)
-  deallocate(iboolleft_xi_crust_mantle, &
-          iboolright_xi_crust_mantle)
-  deallocate(iboolleft_eta_crust_mantle, &
-          iboolright_eta_crust_mantle)
-  deallocate(iboolfaces_crust_mantle)
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    deallocate(iboolcorner_crust_mantle)
+    deallocate(iboolleft_xi_crust_mantle, &
+            iboolright_xi_crust_mantle)
+    deallocate(iboolleft_eta_crust_mantle, &
+            iboolright_eta_crust_mantle)
+    deallocate(iboolfaces_crust_mantle)
 
-  deallocate(phase_ispec_inner_crust_mantle)
-  deallocate(num_elem_colors_crust_mantle)
+    deallocate(phase_ispec_inner_crust_mantle)
+    deallocate(num_elem_colors_crust_mantle)
 
-  ! outer core
-  deallocate(iboolcorner_outer_core)
-  deallocate(iboolleft_xi_outer_core, &
-          iboolright_xi_outer_core)
-  deallocate(iboolleft_eta_outer_core, &
-          iboolright_eta_outer_core)
-  deallocate(iboolfaces_outer_core)
+    deallocate(xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle)
+    deallocate(idoubling_crust_mantle,ibool_crust_mantle)
 
-  deallocate(phase_ispec_inner_outer_core)
-  deallocate(num_elem_colors_outer_core)
+    deallocate(is_on_a_slice_edge_crust_mantle)
+    
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    deallocate(iboolcorner_outer_core)
+    deallocate(iboolleft_xi_outer_core, &
+            iboolright_xi_outer_core)
+    deallocate(iboolleft_eta_outer_core, &
+            iboolright_eta_outer_core)
+    deallocate(iboolfaces_outer_core)
 
-  ! inner core
-  deallocate(ibelm_xmin_inner_core, &
-          ibelm_xmax_inner_core)
-  deallocate(ibelm_ymin_inner_core, &
-          ibelm_ymax_inner_core)
-  deallocate(ibelm_bottom_inner_core)
-  deallocate(ibelm_top_inner_core)
+    deallocate(phase_ispec_inner_outer_core)
+    deallocate(num_elem_colors_outer_core)
 
-  deallocate(iboolcorner_inner_core)
-  deallocate(iboolleft_xi_inner_core, &
-          iboolright_xi_inner_core)
-  deallocate(iboolleft_eta_inner_core, &
-          iboolright_eta_inner_core)
-  deallocate(iboolfaces_inner_core)
+    deallocate(xstore_outer_core,ystore_outer_core,zstore_outer_core)
+    deallocate(idoubling_outer_core,ibool_outer_core)
 
-  deallocate(xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle)
-  deallocate(idoubling_crust_mantle,ibool_crust_mantle)
+    deallocate(is_on_a_slice_edge_outer_core)
+    
+  case( IREGION_INNER_CORE )
+    ! inner core
+    deallocate(ibelm_xmin_inner_core, &
+            ibelm_xmax_inner_core)
+    deallocate(ibelm_ymin_inner_core, &
+            ibelm_ymax_inner_core)
+    deallocate(ibelm_bottom_inner_core)
+    deallocate(ibelm_top_inner_core)
 
-  deallocate(xstore_outer_core,ystore_outer_core,zstore_outer_core)
-  deallocate(idoubling_outer_core,ibool_outer_core)
+    deallocate(iboolcorner_inner_core)
+    deallocate(iboolleft_xi_inner_core, &
+            iboolright_xi_inner_core)
+    deallocate(iboolleft_eta_inner_core, &
+            iboolright_eta_inner_core)
+    deallocate(iboolfaces_inner_core)
 
-  deallocate(xstore_inner_core,ystore_inner_core,zstore_inner_core)
-  deallocate(idoubling_inner_core,ibool_inner_core)
+    deallocate(xstore_inner_core,ystore_inner_core,zstore_inner_core)
+    deallocate(idoubling_inner_core,ibool_inner_core)
 
-  deallocate(phase_ispec_inner_inner_core)
-  deallocate(num_elem_colors_inner_core)
+    deallocate(phase_ispec_inner_inner_core)
+    deallocate(num_elem_colors_inner_core)
 
-  deallocate(mask_ibool)
+    deallocate(is_on_a_slice_edge_inner_core)
+    
+  end select
+  
+  end subroutine cmi_free_MPI_arrays
 
-  ! frees temporary allocated arrays
-  deallocate(is_on_a_slice_edge_crust_mantle, &
-            is_on_a_slice_edge_outer_core, &
-            is_on_a_slice_edge_inner_core)
-
-  end subroutine cmi_free_arrays
-
 !
 !-------------------------------------------------------------------------------------------------
 !

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_chunk_buffers.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_chunk_buffers.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_chunk_buffers.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -35,7 +35,9 @@
                                   NGLOB1D_RADIAL_CORNER,NGLOB1D_RADIAL_MAX, &
                                   NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
                                   myrank,LOCAL_PATH,addressing, &
-                                  ichunk_slice,iproc_xi_slice,iproc_eta_slice,NCHUNKS)
+                                  ichunk_slice,iproc_xi_slice,iproc_eta_slice,NCHUNKS, &
+                                  nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, &
+                                  ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax)
 
   implicit none
 
@@ -57,7 +59,11 @@
   integer nglob_ori
 
   integer NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX
-  integer NPROC,NPROC_XI,NPROC_ETA,NPROCTOT,NGLOB1D_RADIAL_MAX
+  integer NPROC,NPROC_XI,NPROC_ETA,NPROCTOT
+
+  integer NGLOB1D_RADIAL_MAX
+  integer, dimension(MAX_NUM_REGIONS,NB_SQUARE_CORNERS) :: NGLOB1D_RADIAL_CORNER
+    
   integer NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
 
   integer myrank
@@ -71,30 +77,25 @@
 
   integer NCHUNKS
 
+  ! boundary parameters per slice
+  integer nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax
+  integer ibelm_xmin(NSPEC2DMAX_XMIN_XMAX),ibelm_xmax(NSPEC2DMAX_XMIN_XMAX)
+  integer ibelm_ymin(NSPEC2DMAX_YMIN_YMAX),ibelm_ymax(NSPEC2DMAX_YMIN_YMAX)
+
   ! local parameters
   integer NGLOB1D_RADIAL
-
-  integer, dimension(MAX_NUM_REGIONS,NB_SQUARE_CORNERS) :: NGLOB1D_RADIAL_CORNER
-
   integer nglob
-
-
   character(len=150) OUTPUT_FILES,ERR_MSG
-
   ! mask for ibool to mark points already found
   logical, dimension(:), allocatable ::  mask_ibool
-
   ! array to store points selected for the chunk face buffer
   integer NGLOB2DMAX_XY
   integer, dimension(:), allocatable :: ibool_selected
-
   double precision, dimension(:), allocatable :: xstore_selected,ystore_selected,zstore_selected
-
   ! arrays for sorting routine
   integer, dimension(:), allocatable :: ind,ninseg,iglob,locval,iwork
   logical, dimension(:), allocatable :: ifseg
   double precision, dimension(:), allocatable :: work
-
   ! pairs generated theoretically
   ! four sides for each of the three types of messages
   integer, dimension(:), allocatable :: iproc_sender,iproc_receiver,npoin2D_send,npoin2D_receive
@@ -124,11 +125,6 @@
   integer iproc_edge_send,iproc_edge_receive
   integer imsg_type,iside,imode_comm,iedge
 
-  ! boundary parameters per slice
-  integer nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, njunk
-  integer ibelm_xmin(NSPEC2DMAX_XMIN_XMAX),ibelm_xmax(NSPEC2DMAX_XMIN_XMAX)
-  integer ibelm_ymin(NSPEC2DMAX_YMIN_YMAX),ibelm_ymax(NSPEC2DMAX_YMIN_YMAX)
-
   integer npoin2D,npoin2D_send_local,npoin2D_receive_local
 
   integer i,j,k,ispec,ispec2D,ipoin2D
@@ -490,24 +486,6 @@
             if(minval(ibool(:,:,:,1:nspec)) /= 1 .or. maxval(ibool(:,:,:,1:nspec)) /= nglob) &
               call exit_MPI(myrank,ERR_MSG)
 
-! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
-
-! read boundary parameters
-
-            open(unit=IIN,file=prname(1:len_trim(prname))//'boundary.bin',status='old',action='read',form='unformatted')
-            read(IIN) nspec2D_xmin
-            read(IIN) nspec2D_xmax
-            read(IIN) nspec2D_ymin
-            read(IIN) nspec2D_ymax
-            read(IIN) njunk
-            read(IIN) njunk
-
-            read(IIN) ibelm_xmin
-            read(IIN) ibelm_xmax
-            read(IIN) ibelm_ymin
-            read(IIN) ibelm_ymax
-            close(IIN)
-
 ! read 1D buffers to remove corner points
             open(unit=IIN,file=prname(1:len_trim(prname))//'ibool1D_leftxi_lefteta.txt',status='old',action='read')
             do ipoin1D = 1,NGLOB1D_RADIAL_CORNER(iregion_code,1)

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -25,138 +25,8 @@
 !
 !=====================================================================
 
-  module create_regions_mesh_par
 
-  use constants,only: NGLLX,NGLLY,NGLLZ,NGNOD,NGNOD2D,NDIM,NDIM2D
 
-  implicit none
-
-  ! topology of the elements
-  integer, dimension(NGNOD) :: iaddx,iaddy,iaddz
-
-  ! Gauss-Lobatto-Legendre points and weights of integration
-  double precision, dimension(NGLLX) :: xigll,wxgll
-  double precision, dimension(NGLLY) :: yigll,wygll
-  double precision, dimension(NGLLZ) :: zigll,wzgll
-
-  ! 3D shape functions and their derivatives
-  double precision, dimension(NGNOD,NGLLX,NGLLY,NGLLZ) :: shape3D
-  double precision, dimension(NDIM,NGNOD,NGLLX,NGLLY,NGLLZ) :: dershape3D
-
-  ! 2D shape functions and their derivatives
-  double precision, dimension(NGNOD2D,NGLLY,NGLLZ) :: shape2D_x
-  double precision, dimension(NGNOD2D,NGLLX,NGLLZ) :: shape2D_y
-  double precision, dimension(NGNOD2D,NGLLX,NGLLY) :: shape2D_bottom,shape2D_top
-  double precision, dimension(NDIM2D,NGNOD2D,NGLLY,NGLLZ) :: dershape2D_x
-  double precision, dimension(NDIM2D,NGNOD2D,NGLLX,NGLLZ) :: dershape2D_y
-  double precision, dimension(NDIM2D,NGNOD2D,NGLLX,NGLLY) :: dershape2D_bottom,dershape2D_top
-
-  end module create_regions_mesh_par
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  module create_regions_mesh_par2
-
-  use constants,only: CUSTOM_REAL,N_SLS
-
-  implicit none
-
-  integer :: nspec_stacey,nspec_actually,nspec_att
-
-  integer :: ifirst_region,ilast_region
-  integer, dimension(:), allocatable :: perm_layer
-
-  ! for model density and anisotropy
-  integer :: nspec_ani
-  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: rhostore,dvpstore, &
-    kappavstore,kappahstore,muvstore,muhstore,eta_anisostore
-
-  ! the 21 coefficients for an anisotropic medium in reduced notation
-  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: &
-    c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
-    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
-    c36store,c44store,c45store,c46store,c55store,c56store,c66store
-
-  ! boundary locator
-  logical, dimension(:,:), allocatable :: iboun
-
-  ! arrays with mesh parameters
-  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: xixstore,xiystore,xizstore, &
-    etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore
-
-  ! mass matrices
-  real(kind=CUSTOM_REAL), dimension(:), allocatable :: rmassx,rmassy,rmassz
-  integer :: nglob_xy
-
-  ! mass matrix and bathymetry for ocean load
-  integer nglob_oceans
-  real(kind=CUSTOM_REAL), dimension(:), allocatable :: rmass_ocean_load
-
-  ! boundary parameters locator
-  integer, dimension(:), allocatable :: ibelm_xmin,ibelm_xmax, &
-    ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top
-
-  ! 2-D jacobians and normals
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: &
-    jacobian2D_xmin,jacobian2D_xmax, &
-    jacobian2D_ymin,jacobian2D_ymax,jacobian2D_bottom,jacobian2D_top
-
-  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: &
-    normal_xmin,normal_xmax,normal_ymin,normal_ymax,normal_bottom,normal_top
-
-  ! MPI cut-planes parameters along xi and along eta
-  logical, dimension(:,:), allocatable :: iMPIcut_xi,iMPIcut_eta
-
-  ! Stacey, indices for Clayton-Engquist absorbing conditions
-  integer, dimension(:,:), allocatable :: nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta
-  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: rho_vp,rho_vs
-
-
-  ! number of elements on the boundaries
-  integer nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax
-
-  ! attenuation
-  double precision, dimension(:,:,:,:),   allocatable :: Qmu_store
-  double precision, dimension(:,:,:,:,:), allocatable :: tau_e_store
-  double precision, dimension(N_SLS)                  :: tau_s
-  double precision  T_c_source
-
-  logical :: USE_ONE_LAYER_SB
-
-  integer NUMBER_OF_MESH_LAYERS,layer_shift,cpt, &
-    first_layer_aniso,last_layer_aniso,FIRST_ELT_NON_ANISO
-
-  double precision, dimension(:,:), allocatable :: stretch_tab
-
-  integer :: nb_layer_above_aniso,FIRST_ELT_ABOVE_ANISO
-
-  logical :: ACTUALLY_STORE_ARRAYS
-
-  ! Boundary Mesh
-  integer NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho
-  integer, dimension(:), allocatable :: ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot, &
-    ibelm_670_top,ibelm_670_bot
-  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: normal_moho,normal_400,normal_670
-  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: jacobian2D_moho,jacobian2D_400,jacobian2D_670
-  integer ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,ispec2D_400_bot, &
-    ispec2D_670_top,ispec2D_670_bot
-  double precision r_moho,r_400,r_670
-
-  ! flags for transverse isotropic elements
-  logical, dimension(:), allocatable :: ispec_is_tiso
-
-  ! name of the database file
-  character(len=150) :: prname
-
-  end module create_regions_mesh_par2
-
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
   subroutine create_regions_mesh(iregion_code,ibool,idoubling,is_on_a_slice_edge, &
                           xstore,ystore,zstore, &
                           nspec, &
@@ -307,10 +177,12 @@
 
     ! Stacey
     if(NCHUNKS /= 6) then
-         call get_absorb(myrank,prname,iboun,nspec,nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta, &
+      call get_absorb(myrank,prname,iboun,nspec,nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta, &
                          NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX,NSPEC2D_BOTTOM)
-    endif
 
+      deallocate(nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta)
+    endif
+    
   ! only create mass matrix and save all the final arrays in the second pass
   case( 2 )
     ! precomputes jacobian for 2d absorbing boundary surfaces
@@ -332,6 +204,32 @@
               NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX,&
               xigll,yigll,zigll)
 
+    ! create chunk buffers if more than one chunk
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*) '  ...creating chunk buffers'
+    endif
+    if(NCHUNKS > 1) then
+      call create_chunk_buffers(iregion_code,nspec,ibool,idoubling, &
+                              xstore,ystore,zstore, &
+                              nglob_theor, &
+                              NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+                              NPROC_XI,NPROC_ETA, &
+                              NPROC,NPROCTOT, &
+                              NGLOB1D_RADIAL_CORNER,maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:)), &
+                              NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+                              myrank,LOCAL_PATH,addressing, &
+                              ichunk_slice,iproc_xi_slice,iproc_eta_slice,NCHUNKS, &
+                              nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, &
+                              ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax)
+    else
+      if(myrank == 0) then
+        write(IMAIN,*)
+        write(IMAIN,*) 'only one chunk, no need to create chunk buffers'
+        write(IMAIN,*)
+      endif
+    endif
+
     !uncomment: adds model smoothing for point profile models
     !    if( THREE_D_MODEL == THREE_D_MODEL_PPM ) then
     !     call smooth_model(myrank, nproc_xi,nproc_eta,&
@@ -438,30 +336,34 @@
     deallocate(rmassx,rmassy,rmassz)
     deallocate(rmass_ocean_load)
 
-    ! create chunk buffers if more than one chunk
+    ! setup mpi communication interfaces
     call sync_all()
     if( myrank == 0) then
-      write(IMAIN,*) '  ...creating chunk buffers'
+      write(IMAIN,*) '  ...preparing MPI interfaces'
+    endif    
+    call create_MPI_interfaces(iregion_code)
+
+    ! sets up inner/outer element arrays
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*) '  ...element inner/outer separation '
     endif
-    if(NCHUNKS > 1) then
-      call create_chunk_buffers(iregion_code,nspec,ibool,idoubling, &
-                              xstore,ystore,zstore, &
-                              nglob_theor, &
-                              NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
-                              NPROC_XI,NPROC_ETA, &
-                              NPROC,NPROCTOT, &
-                              NGLOB1D_RADIAL_CORNER,maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:)), &
-                              NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
-                              myrank,LOCAL_PATH,addressing, &
-                              ichunk_slice,iproc_xi_slice,iproc_eta_slice,NCHUNKS)
-    else
-      if(myrank == 0) then
-        write(IMAIN,*)
-        write(IMAIN,*) 'only one chunk, no need to create chunk buffers'
-        write(IMAIN,*)
-      endif
+    call setup_inner_outer(iregion_code)
+    
+    ! sets up mesh coloring
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*) '  ...element mesh coloring '
     endif
+    call setup_color_perm(iregion_code)
 
+    ! saves MPI interface infos
+    call cmi_save_MPI_interfaces(iregion_code)
+
+    ! frees memory
+    call cmi_free_MPI_arrays(iregion_code)
+
+
     ! boundary mesh
     if (SAVE_BOUNDARY_MESH .and. iregion_code == IREGION_CRUST_MANTLE) then
       ! user output
@@ -500,8 +402,6 @@
 
   end select  ! end of test if first or second pass
 
-  deallocate(stretch_tab)
-  deallocate(perm_layer)
 
   ! deallocate these arrays after each pass
   ! because they have a different size in each pass to save memory
@@ -525,16 +425,17 @@
   deallocate(normal_xmin,normal_xmax,normal_ymin,normal_ymax)
   deallocate(normal_bottom,normal_top)
   deallocate(iMPIcut_xi,iMPIcut_eta)
-  deallocate(nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta)
+
   deallocate(rho_vp,rho_vs)
   deallocate(Qmu_store)
   deallocate(tau_e_store)
+  
   deallocate(ibelm_moho_top,ibelm_moho_bot)
   deallocate(ibelm_400_top,ibelm_400_bot)
   deallocate(ibelm_670_top,ibelm_670_bot)
   deallocate(normal_moho,normal_400,normal_670)
-  deallocate(jacobian2D_moho,jacobian2D_400,jacobian2D_670)
 
+
   ! user output
   if(myrank == 0 ) write(IMAIN,*)
 
@@ -679,14 +580,16 @@
   if(ier /= 0) stop 'error in allocate 13'
 
   ! Stacey
-  allocate(nimin(2,NSPEC2DMAX_YMIN_YMAX), &
-          nimax(2,NSPEC2DMAX_YMIN_YMAX), &
-          njmin(2,NSPEC2DMAX_XMIN_XMAX), &
-          njmax(2,NSPEC2DMAX_XMIN_XMAX), &
-          nkmin_xi(2,NSPEC2DMAX_XMIN_XMAX), &
-          nkmin_eta(2,NSPEC2DMAX_YMIN_YMAX),stat=ier)
-  if(ier /= 0) stop 'error in allocate 14'
-
+  if( ipass == 1 .and. NCHUNKS /= 6 ) then
+    allocate(nimin(2,NSPEC2DMAX_YMIN_YMAX), &
+            nimax(2,NSPEC2DMAX_YMIN_YMAX), &
+            njmin(2,NSPEC2DMAX_XMIN_XMAX), &
+            njmax(2,NSPEC2DMAX_XMIN_XMAX), &
+            nkmin_xi(2,NSPEC2DMAX_XMIN_XMAX), &
+            nkmin_eta(2,NSPEC2DMAX_YMIN_YMAX),stat=ier)
+    if(ier /= 0) stop 'error in allocate 14'
+  endif
+  
   ! MPI cut-planes parameters along xi and along eta
   allocate(iMPIcut_xi(2,nspec), &
           iMPIcut_eta(2,nspec),stat=ier)
@@ -1027,6 +930,10 @@
 
   enddo !ilayer_loop
 
+  deallocate(stretch_tab)
+  deallocate(perm_layer)
+  deallocate(jacobian2D_moho,jacobian2D_400,jacobian2D_670)
+      
   if(myrank == 0 ) write(IMAIN,*)
 
   ! define central cube in inner core
@@ -1182,7 +1089,7 @@
                                   xstore,ystore,zstore,iregion_code, &
                                   npoin2D_xi,npoin2D_eta)
 
-! creates global indexing array ibool
+! sets up MPI cutplane arrays
 
   use meshfem3d_par,only: &
     myrank,NGLLX,NGLLY,NGLLZ, &

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/fix_non_blocking_flags.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/fix_non_blocking_flags.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/fix_non_blocking_flags.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -31,7 +31,7 @@
   subroutine fix_non_blocking_slices(is_on_a_slice_edge,iboolright_xi, &
          iboolleft_xi,iboolright_eta,iboolleft_eta, &
          npoin2D_xi,npoin2D_eta,ibool, &
-         mask_ibool,nspec,nglob,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX)
+         nspec,nglob,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX)
 
   implicit none
 
@@ -48,8 +48,8 @@
 
   integer, dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
 
+  ! local parameters
   logical, dimension(nglob) :: mask_ibool
-
   integer :: ipoin,ispec,i,j,k
 
 ! clean the mask
@@ -121,13 +121,10 @@
 
   integer, dimension(NSPEC2D_BOTTOM_INNER_CORE) :: ibelm_bottom_inner_core
 
-! local to global mapping
   integer, dimension(nspec) :: idoubling_inner_core
 
-! this mask is declared as integer in the calling program because it is used elsewhere
-! to store integers, and it is reused here as a logical to save memory
+  ! local parameters
   logical, dimension(nglob) :: mask_ibool
-
   integer :: ipoin,ispec,i,j,k,imsg,ispec2D
 
   if(ichunk /= CHUNK_AB .and. ichunk /= CHUNK_AB_ANTIPODE) then

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/get_absorb.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/get_absorb.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/get_absorb.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -131,14 +131,15 @@
     call exit_MPI(myrank,'ispecb5 should equal NSPEC2D_BOTTOM in absorbing boundary detection')
 
 ! save these temporary arrays for the solver for Stacey conditions
-      open(unit=27,file=prname(1:len_trim(prname))//'stacey.bin',status='unknown',form='unformatted',action='write')
-      write(27) nimin
-      write(27) nimax
-      write(27) njmin
-      write(27) njmax
-      write(27) nkmin_xi
-      write(27) nkmin_eta
-      close(27)
+  open(unit=27,file=prname(1:len_trim(prname))//'stacey.bin', &
+        status='unknown',form='unformatted',action='write')
+  write(27) nimin
+  write(27) nimax
+  write(27) njmin
+  write(27) njmax
+  write(27) nkmin_xi
+  write(27) nkmin_eta
+  close(27)
 
   end subroutine get_absorb
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -348,9 +348,6 @@
   ! creates meshes for regions crust/mantle, outer core and inner core
   call create_meshes()
 
-  ! setup mpi communication interfaces
-  call create_MPI_interfaces()
-
   ! outputs mesh infos and saves new header file
   call finalize_mesher()
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D_par.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D_par.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/meshfem3D_par.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -27,17 +27,17 @@
 !
 ! United States and French Government Sponsorship Acknowledged.
 
-module constants
+  module constants
 
   include "constants.h"
 
-end module constants
+  end module constants
 
 !
 !-------------------------------------------------------------------------------------------------
 !
 
-module meshfem3D_models_par
+  module meshfem3D_models_par
 
 !---
 !
@@ -444,7 +444,7 @@
 ! to create a reference model based on 1D_REF but with 3D crust and 410/660 topography
   logical,parameter :: USE_1D_REFERENCE = .false.
 
-end module meshfem3D_models_par
+  end module meshfem3D_models_par
 
 
 !
@@ -452,7 +452,7 @@
 !
 
 
-module meshfem3D_par
+  module meshfem3D_par
 
 ! main parameter module for specfem simulations
 
@@ -564,4 +564,408 @@
   logical :: CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA
   integer, dimension(MAX_NUM_REGIONS) :: NGLOB1D_RADIAL_TEMP
 
-end module meshfem3D_par
+  end module meshfem3D_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module create_regions_mesh_par
+
+  use constants,only: NGLLX,NGLLY,NGLLZ,NGNOD,NGNOD2D,NDIM,NDIM2D
+
+  implicit none
+
+  ! topology of the elements
+  integer, dimension(NGNOD) :: iaddx,iaddy,iaddz
+
+  ! Gauss-Lobatto-Legendre points and weights of integration
+  double precision, dimension(NGLLX) :: xigll,wxgll
+  double precision, dimension(NGLLY) :: yigll,wygll
+  double precision, dimension(NGLLZ) :: zigll,wzgll
+
+  ! 3D shape functions and their derivatives
+  double precision, dimension(NGNOD,NGLLX,NGLLY,NGLLZ) :: shape3D
+  double precision, dimension(NDIM,NGNOD,NGLLX,NGLLY,NGLLZ) :: dershape3D
+
+  ! 2D shape functions and their derivatives
+  double precision, dimension(NGNOD2D,NGLLY,NGLLZ) :: shape2D_x
+  double precision, dimension(NGNOD2D,NGLLX,NGLLZ) :: shape2D_y
+  double precision, dimension(NGNOD2D,NGLLX,NGLLY) :: shape2D_bottom,shape2D_top
+  double precision, dimension(NDIM2D,NGNOD2D,NGLLY,NGLLZ) :: dershape2D_x
+  double precision, dimension(NDIM2D,NGNOD2D,NGLLX,NGLLZ) :: dershape2D_y
+  double precision, dimension(NDIM2D,NGNOD2D,NGLLX,NGLLY) :: dershape2D_bottom,dershape2D_top
+
+  end module create_regions_mesh_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module create_regions_mesh_par2
+
+  use constants,only: CUSTOM_REAL,N_SLS
+
+  implicit none
+
+  integer :: nspec_stacey,nspec_actually,nspec_att
+
+  integer :: ifirst_region,ilast_region
+  integer, dimension(:), allocatable :: perm_layer
+
+  ! for model density and anisotropy
+  integer :: nspec_ani
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: rhostore,dvpstore, &
+    kappavstore,kappahstore,muvstore,muhstore,eta_anisostore
+
+  ! the 21 coefficients for an anisotropic medium in reduced notation
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: &
+    c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+    c36store,c44store,c45store,c46store,c55store,c56store,c66store
+
+  ! boundary locator
+  logical, dimension(:,:), allocatable :: iboun
+
+  ! arrays with mesh parameters
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: xixstore,xiystore,xizstore, &
+    etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore
+
+  ! mass matrices
+  real(kind=CUSTOM_REAL), dimension(:), allocatable :: rmassx,rmassy,rmassz
+  integer :: nglob_xy
+
+  ! mass matrix and bathymetry for ocean load
+  integer nglob_oceans
+  real(kind=CUSTOM_REAL), dimension(:), allocatable :: rmass_ocean_load
+
+  ! boundary parameters locator
+  integer, dimension(:), allocatable :: ibelm_xmin,ibelm_xmax, &
+    ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top
+
+  ! 2-D jacobians and normals
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: &
+    jacobian2D_xmin,jacobian2D_xmax, &
+    jacobian2D_ymin,jacobian2D_ymax,jacobian2D_bottom,jacobian2D_top
+
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: &
+    normal_xmin,normal_xmax,normal_ymin,normal_ymax,normal_bottom,normal_top
+
+  ! MPI cut-planes parameters along xi and along eta
+  logical, dimension(:,:), allocatable :: iMPIcut_xi,iMPIcut_eta
+
+  ! Stacey, indices for Clayton-Engquist absorbing conditions
+  integer, dimension(:,:), allocatable :: nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: rho_vp,rho_vs
+
+
+  ! number of elements on the boundaries
+  integer nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax
+
+  ! attenuation
+  double precision, dimension(:,:,:,:),   allocatable :: Qmu_store
+  double precision, dimension(:,:,:,:,:), allocatable :: tau_e_store
+  double precision, dimension(N_SLS)                  :: tau_s
+  double precision  T_c_source
+
+  logical :: USE_ONE_LAYER_SB
+
+  integer NUMBER_OF_MESH_LAYERS,layer_shift,cpt, &
+    first_layer_aniso,last_layer_aniso,FIRST_ELT_NON_ANISO
+
+  double precision, dimension(:,:), allocatable :: stretch_tab
+
+  integer :: nb_layer_above_aniso,FIRST_ELT_ABOVE_ANISO
+
+  logical :: ACTUALLY_STORE_ARRAYS
+
+  ! Boundary Mesh
+  integer NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho
+  integer, dimension(:), allocatable :: ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot, &
+    ibelm_670_top,ibelm_670_bot
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: normal_moho,normal_400,normal_670
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: jacobian2D_moho,jacobian2D_400,jacobian2D_670
+  integer ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,ispec2D_400_bot, &
+    ispec2D_670_top,ispec2D_670_bot
+  double precision r_moho,r_400,r_670
+
+  ! flags for transverse isotropic elements
+  logical, dimension(:), allocatable :: ispec_is_tiso
+
+  ! name of the database file
+  character(len=150) :: prname
+
+  end module create_regions_mesh_par2
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module create_MPI_interfaces_par
+
+  use constants,only: &
+    CUSTOM_REAL,NDIM,IMAIN, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
+
+  implicit none
+  
+  ! indirect addressing for each message for faces and corners of the chunks
+  ! a given slice can belong to at most one corner and at most two faces
+  integer :: NGLOB2DMAX_XY
+
+  ! number of faces between chunks
+  integer :: NUMMSGS_FACES
+
+  ! number of corners between chunks
+  integer :: NCORNERSCHUNKS
+
+  ! number of message types
+  integer :: NUM_MSG_TYPES
+
+  !-----------------------------------------------------------------
+  ! assembly
+  !-----------------------------------------------------------------
+
+  ! ---- arrays to assemble between chunks
+  ! communication pattern for faces between chunks
+  integer, dimension(:),allocatable :: iprocfrom_faces,iprocto_faces,imsg_type
+  ! communication pattern for corners between chunks
+  integer, dimension(:),allocatable :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
+
+  ! this for non blocking MPI
+
+  ! buffers for send and receive between faces of the slices and the chunks
+  ! we use the same buffers to assemble scalars and vectors because vectors are
+  ! always three times bigger and therefore scalars can use the first part
+  ! of the vector buffer in memory even if it has an additional index here
+  integer :: npoin2D_max_all_CM_IC
+
+  ! buffers for send and receive between corners of the chunks
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar
+
+  ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+  real(kind=CUSTOM_REAL), dimension(:,:),allocatable :: &
+     buffer_send_chunkcorn_vector,buffer_recv_chunkcorn_vector
+  
+  end module create_MPI_interfaces_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module MPI_crust_mantle_par
+
+  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+  
+  implicit none
+  
+  ! collected MPI interfaces
+  !--------------------------------------
+  ! MPI crust/mantle mesh
+  !--------------------------------------
+  integer :: num_interfaces_crust_mantle
+  integer :: max_nibool_interfaces_crust_mantle
+  integer, dimension(:), allocatable :: my_neighbours_crust_mantle,nibool_interfaces_crust_mantle
+  integer, dimension(:,:), allocatable :: ibool_interfaces_crust_mantle
+
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector_crust_mantle,buffer_recv_vector_crust_mantle
+
+  integer, dimension(:), allocatable :: request_send_vector_crust_mantle,request_recv_vector_crust_mantle
+
+  ! temporary arrays for elements on slices or edges
+  logical, dimension(:),allocatable :: is_on_a_slice_edge_crust_mantle
+
+  !--------------------------------------
+  ! crust mantle
+  !--------------------------------------
+  integer :: NSPEC_CRUST_MANTLE
+  integer :: NGLOB_CRUST_MANTLE
+
+  integer :: NGLOB1D_RADIAL_CM
+  integer :: NGLOB2DMAX_XMIN_XMAX_CM
+  integer :: NGLOB2DMAX_YMIN_YMAX_CM
+  integer :: NSPEC2DMAX_XMIN_XMAX_CM
+  integer :: NSPEC2DMAX_YMIN_YMAX_CM
+  integer :: NSPEC2D_BOTTOM_CM
+  integer :: NSPEC2D_TOP_CM
+
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
+  integer, dimension(:),allocatable :: idoubling_crust_mantle
+  integer, dimension(:,:,:,:),allocatable :: ibool_crust_mantle
+
+  ! assembly
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_crust_mantle
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner_crust_mantle
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle
+  integer, dimension(:),allocatable :: iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle
+
+  integer, dimension(:,:),allocatable :: iboolfaces_crust_mantle
+
+  ! inner / outer elements crust/mantle region
+  integer :: num_phase_ispec_crust_mantle
+  integer :: nspec_inner_crust_mantle,nspec_outer_crust_mantle
+  integer, dimension(:,:), allocatable :: phase_ispec_inner_crust_mantle
+
+  ! mesh coloring
+  integer :: num_colors_outer_crust_mantle,num_colors_inner_crust_mantle
+  integer,dimension(:),allocatable :: num_elem_colors_crust_mantle
+
+  end module MPI_crust_mantle_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module MPI_inner_core_par
+
+  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+  
+  implicit none
+  
+  !--------------------------------------
+  ! MPI inner core mesh
+  !--------------------------------------
+  integer :: num_interfaces_inner_core
+  integer :: max_nibool_interfaces_inner_core
+  integer, dimension(:), allocatable :: my_neighbours_inner_core,nibool_interfaces_inner_core
+  integer, dimension(:,:), allocatable :: ibool_interfaces_inner_core
+
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector_inner_core,buffer_recv_vector_inner_core
+
+  integer, dimension(:), allocatable :: request_send_vector_inner_core,request_recv_vector_inner_core
+
+  ! temporary arrays for elements on slices or edges
+  logical, dimension(:),allocatable :: is_on_a_slice_edge_inner_core
+
+  !--------------------------------------
+  ! inner core
+  !--------------------------------------
+  integer :: NSPEC_INNER_CORE
+  integer :: NGLOB_INNER_CORE
+
+  integer :: NGLOB1D_RADIAL_IC
+  integer :: NGLOB2DMAX_XMIN_XMAX_IC
+  integer :: NGLOB2DMAX_YMIN_YMAX_IC
+  integer :: NSPEC2DMAX_XMIN_XMAX_IC
+  integer :: NSPEC2DMAX_YMIN_YMAX_IC
+  integer :: NSPEC2D_BOTTOM_IC
+  integer :: NSPEC2D_TOP_IC
+
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    xstore_inner_core,ystore_inner_core,zstore_inner_core
+  integer, dimension(:),allocatable :: idoubling_inner_core
+  integer, dimension(:,:,:,:),allocatable :: ibool_inner_core
+
+
+  ! for matching with central cube in inner core
+  integer, dimension(:), allocatable :: sender_from_slices_to_cube
+  integer, dimension(:,:), allocatable :: ibool_central_cube
+  double precision, dimension(:,:), allocatable :: buffer_slices,buffer_slices2
+  double precision, dimension(:,:,:), allocatable :: buffer_all_cube_from_slices
+  integer nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices,receiver_cube_from_slices
+
+  integer :: nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
+            nspec2D_ymin_inner_core,nspec2D_ymax_inner_core
+
+  integer, dimension(:),allocatable :: ibelm_xmin_inner_core,ibelm_xmax_inner_core
+  integer, dimension(:),allocatable :: ibelm_ymin_inner_core,ibelm_ymax_inner_core
+  integer, dimension(:),allocatable :: ibelm_bottom_inner_core
+  integer, dimension(:),allocatable :: ibelm_top_inner_core
+
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_inner_core
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_inner_core,npoin2D_eta_inner_core
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner_inner_core
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi_inner_core,iboolright_xi_inner_core
+  integer, dimension(:),allocatable :: iboolleft_eta_inner_core,iboolright_eta_inner_core
+
+  integer, dimension(:,:),allocatable :: iboolfaces_inner_core
+
+  ! inner / outer elements inner core region
+  integer :: num_phase_ispec_inner_core
+  integer :: nspec_inner_inner_core,nspec_outer_inner_core
+  integer, dimension(:,:), allocatable :: phase_ispec_inner_inner_core
+
+  ! mesh coloring
+  integer :: num_colors_outer_inner_core,num_colors_inner_inner_core
+  integer,dimension(:),allocatable :: num_elem_colors_inner_core
+
+  end module MPI_inner_core_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module MPI_outer_core_par
+
+  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+  
+  implicit none
+  
+  !--------------------------------------
+  ! MPI outer core mesh
+  !--------------------------------------
+  integer :: num_interfaces_outer_core
+  integer :: max_nibool_interfaces_outer_core
+  integer, dimension(:), allocatable :: my_neighbours_outer_core,nibool_interfaces_outer_core
+  integer, dimension(:,:), allocatable :: ibool_interfaces_outer_core
+
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar_outer_core,buffer_recv_scalar_outer_core
+
+  integer, dimension(:), allocatable :: request_send_scalar_outer_core,request_recv_scalar_outer_core
+
+  ! temporary arrays for elements on slices or edges
+  logical, dimension(:),allocatable :: is_on_a_slice_edge_outer_core
+
+  !--------------------------------------
+  ! outer core
+  !--------------------------------------
+  integer :: NSPEC_OUTER_CORE
+  integer :: NGLOB_OUTER_CORE
+
+  integer :: NGLOB1D_RADIAL_OC
+  integer :: NGLOB2DMAX_XMIN_XMAX_OC
+  integer :: NGLOB2DMAX_YMIN_YMAX_OC
+  integer :: NSPEC2DMAX_XMIN_XMAX_OC
+  integer :: NSPEC2DMAX_YMIN_YMAX_OC
+  integer :: NSPEC2D_BOTTOM_OC
+  integer :: NSPEC2D_TOP_OC
+
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    xstore_outer_core,ystore_outer_core,zstore_outer_core
+  integer, dimension(:),allocatable :: idoubling_outer_core
+  integer, dimension(:,:,:,:),allocatable :: ibool_outer_core
+
+  ! assembly
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_outer_core
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_outer_core,npoin2D_eta_outer_core
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner_outer_core
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi_outer_core,iboolright_xi_outer_core
+  integer, dimension(:),allocatable :: iboolleft_eta_outer_core,iboolright_eta_outer_core
+
+  integer, dimension(:,:),allocatable :: iboolfaces_outer_core
+
+  ! inner / outer elements outer core region
+  integer :: num_phase_ispec_outer_core
+  integer :: nspec_inner_outer_core,nspec_outer_outer_core
+  integer, dimension(:,:), allocatable :: phase_ispec_inner_outer_core
+
+  ! mesh coloring
+  integer :: num_colors_outer_outer_core,num_colors_inner_outer_core
+  integer,dimension(:),allocatable :: num_elem_colors_outer_core
+
+  end module MPI_outer_core_par

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/save_arrays_solver.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/save_arrays_solver.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/save_arrays_solver.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -458,7 +458,3 @@
   endif ! SAVE_MESH_FILES
 
   end subroutine save_arrays_solver
-
-
-
-

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_MPI_interfaces.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_MPI_interfaces.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -26,12 +26,18 @@
 !=====================================================================
 
 
-  subroutine setup_MPI_interfaces()
+  subroutine setup_MPI_interfaces(iregion_code)
 
-  use meshfem3D_par,only: INCLUDE_CENTRAL_CUBE,myrank
+  use meshfem3D_par,only: &
+    INCLUDE_CENTRAL_CUBE,myrank,NUMFACES_SHARED
+    
   use create_MPI_interfaces_par
+  use MPI_inner_core_par,only: &
+    non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices
   implicit none
 
+  integer,intent(in):: iregion_code
+  
   ! local parameters
   ! assigns initial maximum arrays
   ! for global slices, maximum number of neighbor is around 17 ( 8 horizontal, max of 8 on bottom )
@@ -57,18 +63,23 @@
   if( ier /= 0 ) call exit_mpi(myrank,'error allocating ibool_neighbours')
 
   ! sets up MPI interfaces between different processes
-  ! crust/mantle
-  call setup_MPI_interfaces_cm(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
-                              max_nibool,ibool_neighbours)
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )  
+    ! crust/mantle
+    call setup_MPI_interfaces_cm(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                max_nibool,ibool_neighbours)
 
-  ! outer core
-  call setup_MPI_interfaces_oc(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
-                              max_nibool,ibool_neighbours)
+  case( IREGION_OUTER_CORE ) 
+    ! outer core
+    call setup_MPI_interfaces_oc(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                max_nibool,ibool_neighbours)
 
-  ! inner core
-  call setup_MPI_interfaces_ic(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
-                              max_nibool,ibool_neighbours)
-
+  case( IREGION_INNER_CORE ) 
+    ! inner core
+    call setup_MPI_interfaces_ic(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                max_nibool,ibool_neighbours)
+  end select
+  
   ! frees temporary array
   deallocate(ibool_neighbours)
   deallocate(my_neighbours,nibool_neighbours)
@@ -85,8 +96,14 @@
   subroutine setup_MPI_interfaces_cm(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
                                     max_nibool,ibool_neighbours)
 
-  use meshfem3D_par
+  use meshfem3D_par,only: &
+    myrank,iproc_xi,iproc_eta,ichunk,addressing,INCLUDE_CENTRAL_CUBE, &
+    NPROC_XI,NPROC_ETA,NPROCTOT, &
+    NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NCHUNKS, &
+    OUTPUT_FILES
+    
   use create_MPI_interfaces_par
+  use MPI_crust_mantle_par  
   implicit none
 
   integer :: MAX_NEIGHBOURS,max_nibool
@@ -215,8 +232,14 @@
   subroutine setup_MPI_interfaces_oc(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
                                     max_nibool,ibool_neighbours)
 
-  use meshfem3D_par
+  use meshfem3D_par,only: &
+    myrank,iproc_xi,iproc_eta,ichunk,addressing,INCLUDE_CENTRAL_CUBE, &
+    NPROC_XI,NPROC_ETA,NPROCTOT, &
+    NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NCHUNKS, &
+    OUTPUT_FILES
+    
   use create_MPI_interfaces_par
+  use MPI_outer_core_par
   implicit none
 
   integer :: MAX_NEIGHBOURS,max_nibool
@@ -347,8 +370,14 @@
   subroutine setup_MPI_interfaces_ic(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
                                     max_nibool,ibool_neighbours)
 
-  use meshfem3D_par
+  use meshfem3D_par,only: &
+    myrank,iproc_xi,iproc_eta,ichunk,addressing,INCLUDE_CENTRAL_CUBE, &
+    NPROC_XI,NPROC_ETA,NPROCTOT, &
+    NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NCHUNKS, &
+    OUTPUT_FILES,IFLAG_IN_FICTITIOUS_CUBE,NGLLX,NGLLY,NGLLZ,NSPEC2D_BOTTOM
+    
   use create_MPI_interfaces_par
+  use MPI_inner_core_par  
   implicit none
 
   integer :: MAX_NEIGHBOURS,max_nibool

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_color_perm.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_color_perm.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_color_perm.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -27,16 +27,20 @@
 
 
 
-  subroutine setup_color_perm()
+  subroutine setup_color_perm(iregion_code)
 
   use meshfem3D_par,only: &
     myrank,IMAIN,USE_MESH_COLORING_GPU,SAVE_MESH_FILES, &
     IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
 
-  use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
 
   implicit none
 
+  integer,intent(in) :: iregion_code
+  
   ! local parameters
   integer, dimension(:), allocatable :: perm
   integer :: ier
@@ -48,165 +52,171 @@
     write(IMAIN,*) 'mesh coloring: ',USE_MESH_COLORING_GPU
   endif
 
-  ! crust mantle
-  ! initializes
-  num_colors_outer_crust_mantle = 0
-  num_colors_inner_crust_mantle = 0
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    ! initializes
+    num_colors_outer_crust_mantle = 0
+    num_colors_inner_crust_mantle = 0
 
-  ! mesh coloring
-  if( USE_MESH_COLORING_GPU ) then
+    ! mesh coloring
+    if( USE_MESH_COLORING_GPU ) then
 
-    !daniel: safety stop...
-    call exit_mpi(myrank,'MESH COLORING not fully implemented yet, please recompile...')
+      !daniel: safety stop...
+      call exit_mpi(myrank,'MESH COLORING not fully implemented yet, please recompile...')
 
-    ! user output
-    if(myrank == 0) write(IMAIN,*) '  coloring crust mantle... '
+      ! user output
+      if(myrank == 0) write(IMAIN,*) '  coloring crust mantle... '
 
-    ! crust/mantle region
-    nspec = NSPEC_CRUST_MANTLE
-    nglob = NGLOB_CRUST_MANTLE
-    idomain = IREGION_CRUST_MANTLE
+      ! crust/mantle region
+      nspec = NSPEC_CRUST_MANTLE
+      nglob = NGLOB_CRUST_MANTLE
+      idomain = IREGION_CRUST_MANTLE
 
-    ! creates coloring of elements
-    allocate(perm(nspec),stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm crust mantle array')
-    perm(:) = 0
+      ! creates coloring of elements
+      allocate(perm(nspec),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm crust mantle array')
+      perm(:) = 0
 
-    call setup_color(myrank,nspec,nglob,ibool_crust_mantle,perm, &
-                    idomain,is_on_a_slice_edge_crust_mantle, &
-                    num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
-                    SAVE_MESH_FILES)
+      call setup_color(myrank,nspec,nglob,ibool_crust_mantle,perm, &
+                      idomain,is_on_a_slice_edge_crust_mantle, &
+                      num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
+                      SAVE_MESH_FILES)
 
-    ! checks
-    if(minval(perm) /= 1) &
-      call exit_MPI(myrank, 'minval(perm) should be 1')
-    if(maxval(perm) /= num_phase_ispec_crust_mantle) &
-      call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_crust_mantle')
+      ! checks
+      if(minval(perm) /= 1) &
+        call exit_MPI(myrank, 'minval(perm) should be 1')
+      if(maxval(perm) /= num_phase_ispec_crust_mantle) &
+        call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_crust_mantle')
 
-    ! sorts array according to permutation
-    call sync_all()
-    if(myrank == 0) then
-      write(IMAIN,*) '     mesh permutation:'
+      ! sorts array according to permutation
+      call sync_all()
+      if(myrank == 0) then
+        write(IMAIN,*) '     mesh permutation:'
+      endif
+      call setup_permutation(myrank,nspec,nglob,ibool_crust_mantle, &
+                            idomain,perm, &
+                            num_colors_outer_crust_mantle,num_colors_inner_crust_mantle, &
+                            num_elem_colors_crust_mantle, &
+                            num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
+                            SAVE_MESH_FILES)
+
+      deallocate(perm)
+    else
+      ! dummy array
+      allocate(num_elem_colors_crust_mantle(num_colors_outer_crust_mantle+num_colors_inner_crust_mantle),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_crust_mantle array')
     endif
-    call setup_permutation(myrank,nspec,nglob,ibool_crust_mantle, &
-                          idomain,perm, &
-                          num_colors_outer_crust_mantle,num_colors_inner_crust_mantle, &
-                          num_elem_colors_crust_mantle, &
-                          num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
-                          SAVE_MESH_FILES)
 
-    deallocate(perm)
-  else
-    ! dummy array
-    allocate(num_elem_colors_crust_mantle(num_colors_outer_crust_mantle+num_colors_inner_crust_mantle),stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_crust_mantle array')
-  endif
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    ! initializes
+    num_colors_outer_outer_core = 0
+    num_colors_inner_outer_core = 0
 
-  ! outer core
-  ! initializes
-  num_colors_outer_outer_core = 0
-  num_colors_inner_outer_core = 0
+    ! mesh coloring
+    if( USE_MESH_COLORING_GPU ) then
 
-  ! mesh coloring
-  if( USE_MESH_COLORING_GPU ) then
+      ! user output
+      if(myrank == 0) write(IMAIN,*) '  coloring outer core... '
 
-    ! user output
-    if(myrank == 0) write(IMAIN,*) '  coloring outer core... '
+      ! outer core region
+      nspec = NSPEC_OUTER_CORE
+      nglob = NGLOB_OUTER_CORE
+      idomain = IREGION_OUTER_CORE
 
-    ! outer core region
-    nspec = NSPEC_OUTER_CORE
-    nglob = NGLOB_OUTER_CORE
-    idomain = IREGION_OUTER_CORE
+      ! creates coloring of elements
+      allocate(perm(nspec),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm outer_core array')
+      perm(:) = 0
 
-    ! creates coloring of elements
-    allocate(perm(nspec),stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm outer_core array')
-    perm(:) = 0
+      call setup_color(myrank,nspec,nglob,ibool_outer_core,perm, &
+                      idomain,is_on_a_slice_edge_outer_core, &
+                      num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
+                      SAVE_MESH_FILES)
 
-    call setup_color(myrank,nspec,nglob,ibool_outer_core,perm, &
-                    idomain,is_on_a_slice_edge_outer_core, &
-                    num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
-                    SAVE_MESH_FILES)
+      ! checks
+      if(minval(perm) /= 1) &
+        call exit_MPI(myrank, 'minval(perm) should be 1')
+      if(maxval(perm) /= num_phase_ispec_outer_core) &
+        call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_outer_core')
 
-    ! checks
-    if(minval(perm) /= 1) &
-      call exit_MPI(myrank, 'minval(perm) should be 1')
-    if(maxval(perm) /= num_phase_ispec_outer_core) &
-      call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_outer_core')
+      ! sorts array according to permutation
+      call sync_all()
+      if(myrank == 0) then
+        write(IMAIN,*) '     mesh permutation:'
+      endif
+      call setup_permutation(myrank,nspec,nglob,ibool_outer_core, &
+                            idomain,perm, &
+                            num_colors_outer_outer_core,num_colors_inner_outer_core, &
+                            num_elem_colors_outer_core, &
+                            num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
+                            SAVE_MESH_FILES)
 
-    ! sorts array according to permutation
-    call sync_all()
-    if(myrank == 0) then
-      write(IMAIN,*) '     mesh permutation:'
+      deallocate(perm)
+    else
+      ! dummy array
+      allocate(num_elem_colors_outer_core(num_colors_outer_outer_core+num_colors_inner_outer_core),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_outer_core array')
     endif
-    call setup_permutation(myrank,nspec,nglob,ibool_outer_core, &
-                          idomain,perm, &
-                          num_colors_outer_outer_core,num_colors_inner_outer_core, &
-                          num_elem_colors_outer_core, &
-                          num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
-                          SAVE_MESH_FILES)
 
-    deallocate(perm)
-  else
-    ! dummy array
-    allocate(num_elem_colors_outer_core(num_colors_outer_outer_core+num_colors_inner_outer_core),stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_outer_core array')
-  endif
+  case( IREGION_INNER_CORE )
+    ! inner core
+    ! initializes
+    num_colors_outer_inner_core = 0
+    num_colors_inner_inner_core = 0
 
-  ! inner core
-  ! initializes
-  num_colors_outer_inner_core = 0
-  num_colors_inner_inner_core = 0
+    ! mesh coloring
+    if( USE_MESH_COLORING_GPU ) then
 
-  ! mesh coloring
-  if( USE_MESH_COLORING_GPU ) then
+      ! user output
+      if(myrank == 0) write(IMAIN,*) '  coloring inner core... '
 
-    ! user output
-    if(myrank == 0) write(IMAIN,*) '  coloring inner core... '
+      ! inner core region
+      nspec = NSPEC_INNER_CORE
+      nglob = NGLOB_INNER_CORE
+      idomain = IREGION_INNER_CORE
 
-    ! inner core region
-    nspec = NSPEC_INNER_CORE
-    nglob = NGLOB_INNER_CORE
-    idomain = IREGION_INNER_CORE
+      ! creates coloring of elements
+      allocate(perm(nspec),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm inner_core array')
+      perm(:) = 0
 
-    ! creates coloring of elements
-    allocate(perm(nspec),stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm inner_core array')
-    perm(:) = 0
+      call setup_color(myrank,nspec,nglob,ibool_inner_core,perm, &
+                      idomain,is_on_a_slice_edge_inner_core, &
+                      num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
+                      SAVE_MESH_FILES)
 
-    call setup_color(myrank,nspec,nglob,ibool_inner_core,perm, &
-                    idomain,is_on_a_slice_edge_inner_core, &
-                    num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
-                    SAVE_MESH_FILES)
+      ! checks
+      ! inner core contains ficticious elements not counted for
+      if(minval(perm) < 0) &
+        call exit_MPI(myrank, 'minval(perm) should be at least 0')
+      if(maxval(perm) > num_phase_ispec_inner_core) then
+        print*,'error perm inner core:',minval(perm),maxval(perm),num_phase_ispec_inner_core
+        call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_inner_core')
+      endif
 
-    ! checks
-    ! inner core contains ficticious elements not counted for
-    if(minval(perm) < 0) &
-      call exit_MPI(myrank, 'minval(perm) should be at least 0')
-    if(maxval(perm) > num_phase_ispec_inner_core) then
-      print*,'error perm inner core:',minval(perm),maxval(perm),num_phase_ispec_inner_core
-      call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_inner_core')
-    endif
+      ! sorts array according to permutation
+      call sync_all()
+      if(myrank == 0) then
+        write(IMAIN,*) '     mesh permutation:'
+      endif
+      call setup_permutation(myrank,nspec,nglob,ibool_inner_core, &
+                            idomain,perm, &
+                            num_colors_outer_inner_core,num_colors_inner_inner_core, &
+                            num_elem_colors_inner_core, &
+                            num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
+                            SAVE_MESH_FILES)
 
-    ! sorts array according to permutation
-    call sync_all()
-    if(myrank == 0) then
-      write(IMAIN,*) '     mesh permutation:'
+      deallocate(perm)
+    else
+      ! dummy array
+      allocate(num_elem_colors_inner_core(num_colors_outer_inner_core+num_colors_inner_inner_core),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_inner_core array')
     endif
-    call setup_permutation(myrank,nspec,nglob,ibool_inner_core, &
-                          idomain,perm, &
-                          num_colors_outer_inner_core,num_colors_inner_inner_core, &
-                          num_elem_colors_inner_core, &
-                          num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
-                          SAVE_MESH_FILES)
 
-    deallocate(perm)
-  else
-    ! dummy array
-    allocate(num_elem_colors_inner_core(num_colors_outer_inner_core+num_colors_inner_inner_core),stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_inner_core array')
-  endif
-
+  end select
+  
   end subroutine setup_color_perm
 
 !
@@ -223,13 +233,17 @@
     LOCAL_PATH,MAX_NUMBER_OF_COLORS,IMAIN,NGLLX,NGLLY,NGLLZ,IFLAG_IN_FICTITIOUS_CUBE, &
     IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
 
-  use create_MPI_interfaces_par,only: &
+  use MPI_crust_mantle_par,only: &
     num_colors_outer_crust_mantle,num_colors_inner_crust_mantle,num_elem_colors_crust_mantle, &
-    num_colors_outer_outer_core,num_colors_inner_outer_core,num_elem_colors_outer_core, &
+    xstore => xstore_crust_mantle,ystore => ystore_crust_mantle,zstore => zstore_crust_mantle
+    
+  use MPI_outer_core_par,only: &
+    num_colors_outer_outer_core,num_colors_inner_outer_core,num_elem_colors_outer_core
+  
+  use MPI_inner_core_par,only: &
     num_colors_outer_inner_core,num_colors_inner_inner_core,num_elem_colors_inner_core, &
-    idoubling_inner_core, &
-    xstore => xstore_crust_mantle,ystore => ystore_crust_mantle,zstore => zstore_crust_mantle
-
+    idoubling_inner_core  
+  
   implicit none
 
   integer :: myrank,nspec,nglob
@@ -558,11 +572,15 @@
     CUSTOM_REAL,LOCAL_PATH,NGLLX,NGLLY,NGLLZ,IMAIN, &
     IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
 
-  use create_MPI_interfaces_par,only: &
+  use MPI_crust_mantle_par,only: &
     NSPEC_CRUST_MANTLE,ibool_crust_mantle,is_on_a_slice_edge_crust_mantle, &
-    NSPEC_OUTER_CORE,ibool_outer_core,is_on_a_slice_edge_outer_core, &
-    NSPEC_INNER_CORE,ibool_inner_core,is_on_a_slice_edge_inner_core, &
     xstore => xstore_crust_mantle,ystore => ystore_crust_mantle,zstore => zstore_crust_mantle
+    
+  use MPI_outer_core_par,only: &
+    NSPEC_OUTER_CORE,ibool_outer_core,is_on_a_slice_edge_outer_core
+  
+  use MPI_inner_core_par,only: &
+    NSPEC_INNER_CORE,ibool_inner_core,is_on_a_slice_edge_inner_core
 
   implicit none
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_inner_outer.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_inner_outer.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/setup_inner_outer.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -26,11 +26,19 @@
 !=====================================================================
 
 
-  subroutine setup_Inner_Outer()
+  subroutine setup_inner_outer(iregion_code)
 
-  use meshfem3D_par
-  use create_MPI_interfaces_par
+  use meshfem3D_par,only: &
+    myrank,OUTPUT_FILES,IMAIN, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
+    
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+  
   implicit none
+  
+  integer,intent(in) :: iregion_code
 
   ! local parameters
   real :: percentage_edge
@@ -43,123 +51,136 @@
   !
   ! note: arrays is_on_a_slice_edge_.. have flags set for elements which need to
   !         communicate with other MPI processes
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust_mantle
+    nspec_outer_crust_mantle = count( is_on_a_slice_edge_crust_mantle )
+    nspec_inner_crust_mantle = NSPEC_CRUST_MANTLE - nspec_outer_crust_mantle
 
-  ! crust_mantle
-  nspec_outer_crust_mantle = count( is_on_a_slice_edge_crust_mantle )
-  nspec_inner_crust_mantle = NSPEC_CRUST_MANTLE - nspec_outer_crust_mantle
+    num_phase_ispec_crust_mantle = max(nspec_inner_crust_mantle,nspec_outer_crust_mantle)
 
-  num_phase_ispec_crust_mantle = max(nspec_inner_crust_mantle,nspec_outer_crust_mantle)
+    allocate(phase_ispec_inner_crust_mantle(num_phase_ispec_crust_mantle,2),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_crust_mantle')
 
-  allocate(phase_ispec_inner_crust_mantle(num_phase_ispec_crust_mantle,2),stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_crust_mantle')
+    phase_ispec_inner_crust_mantle(:,:) = 0
+    iinner = 0
+    iouter = 0
+    do ispec=1,NSPEC_CRUST_MANTLE
+      if( is_on_a_slice_edge_crust_mantle(ispec) ) then
+        ! outer element
+        iouter = iouter + 1
+        phase_ispec_inner_crust_mantle(iouter,1) = ispec
+      else
+        ! inner element
+        iinner = iinner + 1
+        phase_ispec_inner_crust_mantle(iinner,2) = ispec
+      endif
+    enddo
+    
+    ! user output
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'for overlapping of communications with calculations:'
+      write(IMAIN,*)
+      percentage_edge = 100. * nspec_outer_crust_mantle / real(NSPEC_CRUST_MANTLE)
+      write(IMAIN,*) 'percentage of edge elements in crust/mantle ',percentage_edge,'%'
+      write(IMAIN,*) 'percentage of volume elements in crust/mantle ',100. - percentage_edge,'%'
+      write(IMAIN,*)
+    endif
 
-  phase_ispec_inner_crust_mantle(:,:) = 0
-  iinner = 0
-  iouter = 0
-  do ispec=1,NSPEC_CRUST_MANTLE
-    if( is_on_a_slice_edge_crust_mantle(ispec) ) then
-      ! outer element
-      iouter = iouter + 1
-      phase_ispec_inner_crust_mantle(iouter,1) = ispec
-    else
-      ! inner element
-      iinner = iinner + 1
-      phase_ispec_inner_crust_mantle(iinner,2) = ispec
+    ! debug: saves element flags
+    if( DEBUG_INTERFACES ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_crust_mantle_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
+                                xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
+                                ibool_crust_mantle, &
+                                is_on_a_slice_edge_crust_mantle,filename)
     endif
-  enddo
 
-  ! outer_core
-  nspec_outer_outer_core = count( is_on_a_slice_edge_outer_core )
-  nspec_inner_outer_core = NSPEC_OUTER_CORE - nspec_outer_outer_core
+  case( IREGION_OUTER_CORE )
+    ! outer_core
+    nspec_outer_outer_core = count( is_on_a_slice_edge_outer_core )
+    nspec_inner_outer_core = NSPEC_OUTER_CORE - nspec_outer_outer_core
 
-  num_phase_ispec_outer_core = max(nspec_inner_outer_core,nspec_outer_outer_core)
+    num_phase_ispec_outer_core = max(nspec_inner_outer_core,nspec_outer_outer_core)
 
-  allocate(phase_ispec_inner_outer_core(num_phase_ispec_outer_core,2),stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_outer_core')
+    allocate(phase_ispec_inner_outer_core(num_phase_ispec_outer_core,2),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_outer_core')
 
-  phase_ispec_inner_outer_core(:,:) = 0
-  iinner = 0
-  iouter = 0
-  do ispec=1,NSPEC_OUTER_CORE
-    if( is_on_a_slice_edge_outer_core(ispec) ) then
-      ! outer element
-      iouter = iouter + 1
-      phase_ispec_inner_outer_core(iouter,1) = ispec
-    else
-      ! inner element
-      iinner = iinner + 1
-      phase_ispec_inner_outer_core(iinner,2) = ispec
+    phase_ispec_inner_outer_core(:,:) = 0
+    iinner = 0
+    iouter = 0
+    do ispec=1,NSPEC_OUTER_CORE
+      if( is_on_a_slice_edge_outer_core(ispec) ) then
+        ! outer element
+        iouter = iouter + 1
+        phase_ispec_inner_outer_core(iouter,1) = ispec
+      else
+        ! inner element
+        iinner = iinner + 1
+        phase_ispec_inner_outer_core(iinner,2) = ispec
+      endif
+    enddo
+
+    ! user output
+    if(myrank == 0) then
+      percentage_edge = 100.* nspec_outer_outer_core / real(NSPEC_OUTER_CORE)
+      write(IMAIN,*) 'percentage of edge elements in outer core ',percentage_edge,'%'
+      write(IMAIN,*) 'percentage of volume elements in outer core ',100. - percentage_edge,'%'
+      write(IMAIN,*)
     endif
-  enddo
 
-  ! inner_core
-  nspec_outer_inner_core = count( is_on_a_slice_edge_inner_core )
-  nspec_inner_inner_core = NSPEC_INNER_CORE - nspec_outer_inner_core
-
-  num_phase_ispec_inner_core = max(nspec_inner_inner_core,nspec_outer_inner_core)
-
-  allocate(phase_ispec_inner_inner_core(num_phase_ispec_inner_core,2),stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_inner_core')
-
-  phase_ispec_inner_inner_core(:,:) = 0
-  iinner = 0
-  iouter = 0
-  do ispec=1,NSPEC_INNER_CORE
-    if( is_on_a_slice_edge_inner_core(ispec) ) then
-      ! outer element
-      iouter = iouter + 1
-      phase_ispec_inner_inner_core(iouter,1) = ispec
-    else
-      ! inner element
-      iinner = iinner + 1
-      phase_ispec_inner_inner_core(iinner,2) = ispec
+    ! debug: saves element flags
+    if( DEBUG_INTERFACES ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_outer_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
+                                xstore_outer_core,ystore_outer_core,zstore_outer_core, &
+                                ibool_outer_core, &
+                                is_on_a_slice_edge_outer_core,filename)
     endif
-  enddo
 
-  ! user output
-  if(myrank == 0) then
+  case( IREGION_INNER_CORE )
+    ! inner_core
+    nspec_outer_inner_core = count( is_on_a_slice_edge_inner_core )
+    nspec_inner_inner_core = NSPEC_INNER_CORE - nspec_outer_inner_core
 
-    write(IMAIN,*)
-    write(IMAIN,*) 'for overlapping of communications with calculations:'
-    write(IMAIN,*)
+    num_phase_ispec_inner_core = max(nspec_inner_inner_core,nspec_outer_inner_core)
 
-    percentage_edge = 100. * nspec_outer_crust_mantle / real(NSPEC_CRUST_MANTLE)
-    write(IMAIN,*) 'percentage of edge elements in crust/mantle ',percentage_edge,'%'
-    write(IMAIN,*) 'percentage of volume elements in crust/mantle ',100. - percentage_edge,'%'
-    write(IMAIN,*)
+    allocate(phase_ispec_inner_inner_core(num_phase_ispec_inner_core,2),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_inner_core')
 
-    percentage_edge = 100.* nspec_outer_outer_core / real(NSPEC_OUTER_CORE)
-    write(IMAIN,*) 'percentage of edge elements in outer core ',percentage_edge,'%'
-    write(IMAIN,*) 'percentage of volume elements in outer core ',100. - percentage_edge,'%'
-    write(IMAIN,*)
+    phase_ispec_inner_inner_core(:,:) = 0
+    iinner = 0
+    iouter = 0
+    do ispec=1,NSPEC_INNER_CORE
+      if( is_on_a_slice_edge_inner_core(ispec) ) then
+        ! outer element
+        iouter = iouter + 1
+        phase_ispec_inner_inner_core(iouter,1) = ispec
+      else
+        ! inner element
+        iinner = iinner + 1
+        phase_ispec_inner_inner_core(iinner,2) = ispec
+      endif
+    enddo
+    
+    ! user output
+    if(myrank == 0) then
+      percentage_edge = 100. * nspec_outer_inner_core / real(NSPEC_INNER_CORE)
+      write(IMAIN,*) 'percentage of edge elements in inner core ',percentage_edge,'%'
+      write(IMAIN,*) 'percentage of volume elements in inner core ',100. - percentage_edge,'%'
+      write(IMAIN,*)
+    endif
 
-    percentage_edge = 100. * nspec_outer_inner_core / real(NSPEC_INNER_CORE)
-    write(IMAIN,*) 'percentage of edge elements in inner core ',percentage_edge,'%'
-    write(IMAIN,*) 'percentage of volume elements in inner core ',100. - percentage_edge,'%'
-    write(IMAIN,*)
+    ! debug: saves element flags
+    if( DEBUG_INTERFACES ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_inner_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                                xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                                ibool_inner_core, &
+                                is_on_a_slice_edge_inner_core,filename)
+    endif
 
-  endif
-
-  ! debug: saves element flags
-  if( DEBUG_INTERFACES ) then
-    ! crust mantle
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_crust_mantle_proc',myrank
-    call write_VTK_data_elem_l(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
-                              xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
-                              ibool_crust_mantle, &
-                              is_on_a_slice_edge_crust_mantle,filename)
-    ! outer core
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_outer_core_proc',myrank
-    call write_VTK_data_elem_l(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
-                              xstore_outer_core,ystore_outer_core,zstore_outer_core, &
-                              ibool_outer_core, &
-                              is_on_a_slice_edge_outer_core,filename)
-    ! inner core
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_inner_core_proc',myrank
-    call write_VTK_data_elem_l(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
-                              xstore_inner_core,ystore_inner_core,zstore_inner_core, &
-                              ibool_inner_core, &
-                              is_on_a_slice_edge_inner_core,filename)
-  endif
-
+  end select
+  
   end subroutine setup_Inner_Outer

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/test_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/test_MPI_interfaces.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/test_MPI_interfaces.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -33,11 +33,12 @@
 
   use constants
   use meshfem3D_par,only: NPROCTOT,myrank
-  use create_MPI_interfaces_par,only: NGLOB_CRUST_MANTLE,NGLOB_OUTER_CORE,NGLOB_INNER_CORE
+  use MPI_crust_mantle_par,only: NGLOB_CRUST_MANTLE
+  use MPI_outer_core_par,only: NGLOB_OUTER_CORE
+  use MPI_inner_core_par,only: NGLOB_INNER_CORE
+  
   implicit none
 
-  include 'mpif.h'
-
   integer,intent(in) :: iregion_code
   integer,intent(in) :: num_interfaces,max_nibool_interfaces
   integer,dimension(num_interfaces),intent(in) :: my_neighbours,nibool_interfaces
@@ -47,7 +48,6 @@
   integer,dimension(:),allocatable :: dummy_i
   integer,dimension(:,:),allocatable :: test_interfaces
   integer,dimension(:,:),allocatable :: test_interfaces_nibool
-  integer :: msg_status(MPI_STATUS_SIZE)
   integer :: ineighbour,iproc,inum,i,j,ier,ipoints,max_num,iglob
   logical :: is_okay
   logical,dimension(:),allocatable :: mask
@@ -124,7 +124,7 @@
 
   ! checks neighbors
   ! gets maximum interfaces from all processes
-  call MPI_REDUCE(num_interfaces,max_num,1,MPI_INTEGER,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call max_all_i(num_interfaces,max_num)
 
   ! master gathers infos
   if( myrank == 0 ) then
@@ -150,23 +150,32 @@
     ! collects from other processes
     do iproc=1,NPROCTOT-1
       ! gets number of interfaces
-      call MPI_RECV(inum,1,MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+      !call MPI_RECV(inum,1,MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+      call recv_singlei(inum,iproc,itag)
       dummy_i(iproc) = inum
       if( inum > 0 ) then
-        call MPI_RECV(test_interfaces(1:inum,iproc),inum, &
-                      MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
-        call MPI_RECV(test_interfaces_nibool(1:inum,iproc),inum, &
-                      MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+        !call MPI_RECV(test_interfaces(1:inum,iproc),inum, &
+        !              MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+        call recv_i(test_interfaces(1:inum,iproc),inum,iproc,itag)
+        
+        !call MPI_RECV(test_interfaces_nibool(1:inum,iproc),inum, &
+        !              MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+        call recv_i(test_interfaces_nibool(1:inum,iproc),inum,iproc,itag)        
       endif
     enddo
   else
     ! sends infos to master process
-    call MPI_SEND(num_interfaces,1,MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+    !call MPI_SEND(num_interfaces,1,MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+    call send_singlei(num_interfaces,0,itag)
     if( num_interfaces > 0 ) then
-      call MPI_SEND(my_neighbours(1:num_interfaces),num_interfaces, &
-                    MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
-      call MPI_SEND(nibool_interfaces(1:num_interfaces),num_interfaces, &
-                    MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+      !call MPI_SEND(my_neighbours(1:num_interfaces),num_interfaces, &
+      !              MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+      call send_i(my_neighbours(1:num_interfaces),num_interfaces,0,itag)
+
+      !call MPI_SEND(nibool_interfaces(1:num_interfaces),num_interfaces, &
+      !              MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+      call send_i(nibool_interfaces(1:num_interfaces),num_interfaces,0,itag)
+                    
     endif
   endif
   call sync_all()
@@ -239,10 +248,10 @@
 
   use meshfem3D_par,only: NPROCTOT,myrank
   use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+
   implicit none
 
-  include 'mpif.h'
-
   ! local parameters
   real(kind=CUSTOM_REAL),dimension(:,:),allocatable :: test_flag_vector
   integer :: i,j,iglob,ier
@@ -270,17 +279,17 @@
   enddo
   ! total number of  interface points
   i = sum(nibool_interfaces_crust_mantle)
-  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,inum)  
 
   ! total number of unique points (some could be shared between different processes)
   i = nint( sum(test_flag_vector) )
   num_unique= i
-  call MPI_REDUCE(i,icount,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,icount)
 
   ! maximum valence
   i = maxval( valence(:) )
   num_max_valence = i
-  call MPI_REDUCE(i,ival,1,MPI_INTEGER,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call max_all_i(i,ival)  
 
   ! user output
   if( myrank == 0 ) then
@@ -293,7 +302,7 @@
   test_flag_vector(:,:) = 1.0_CUSTOM_REAL
 
   ! adds contributions from different partitions to flag arrays
-  call assemble_MPI_vector_ext_mesh(NPROCTOT,NGLOB_CRUST_MANTLE, &
+  call assemble_MPI_vector(NPROCTOT,NGLOB_CRUST_MANTLE, &
                       test_flag_vector, &
                       num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
                       nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
@@ -324,7 +333,7 @@
   endif
 
   ! total number of assembly points
-  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,inum)    
 
   ! points defined by interfaces
   if( myrank == 0 ) then
@@ -354,10 +363,10 @@
 
   use meshfem3D_par,only: NPROCTOT,myrank
   use create_MPI_interfaces_par
+  use MPI_outer_core_par
+  
   implicit none
 
-  include 'mpif.h'
-
   ! local parameters
   real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
   integer :: i,j,iglob,ier
@@ -383,16 +392,16 @@
     enddo
   enddo
   i = sum(nibool_interfaces_outer_core)
-  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,inum)  
 
   i = nint( sum(test_flag) )
   num_unique = i
-  call MPI_REDUCE(i,icount,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,icount)  
 
   ! maximum valence
   i = maxval( valence(:) )
   num_max_valence = i
-  call MPI_REDUCE(i,ival,1,MPI_INTEGER,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call max_all_i(i,ival)  
 
   if( myrank == 0 ) then
     write(IMAIN,*) '  total MPI interface points : ',inum
@@ -404,7 +413,7 @@
   test_flag(:) = 1.0_CUSTOM_REAL
 
   ! adds contributions from different partitions to flag arrays
-  call assemble_MPI_scalar_ext_mesh(NPROCTOT,NGLOB_OUTER_CORE, &
+  call assemble_MPI_scalar(NPROCTOT,NGLOB_OUTER_CORE, &
                                 test_flag, &
                                 num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
                                 nibool_interfaces_outer_core,ibool_interfaces_outer_core,&
@@ -432,9 +441,8 @@
     print*,'error test outer core : rank',myrank,'unique mpi points:',i,num_unique
     call exit_mpi(myrank,'error MPI assembly outer core')
   endif
+  call sum_all_i(i,inum)  
 
-  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
-
   ! output
   if( myrank == 0 ) then
     ! checks
@@ -464,10 +472,10 @@
 
   use meshfem3D_par,only: NPROCTOT,myrank
   use create_MPI_interfaces_par
+  use MPI_inner_core_par
+  
   implicit none
 
-  include 'mpif.h'
-
   ! local parameters
   real(kind=CUSTOM_REAL),dimension(:,:),allocatable :: test_flag_vector
   integer :: i,j,iglob,ier
@@ -494,16 +502,16 @@
     enddo
   enddo
   i = sum(nibool_interfaces_inner_core)
-  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,inum)  
 
   i = nint( sum(test_flag_vector) )
   num_unique= i
-  call MPI_REDUCE(i,icount,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
+  call sum_all_i(i,icount)  
 
   ! maximum valence
   i = maxval( valence(:) )
   num_max_valence = i
-  call MPI_REDUCE(i,ival,1,MPI_INTEGER,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call max_all_i(i,ival)  
 
   if( myrank == 0 ) then
     write(IMAIN,*) '  total MPI interface points : ',inum
@@ -515,7 +523,7 @@
   test_flag_vector = 1.0_CUSTOM_REAL
 
   ! adds contributions from different partitions to flag arrays
-  call assemble_MPI_vector_ext_mesh(NPROCTOT,NGLOB_INNER_CORE, &
+  call assemble_MPI_vector(NPROCTOT,NGLOB_INNER_CORE, &
                       test_flag_vector, &
                       num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
                       nibool_interfaces_inner_core,ibool_interfaces_inner_core,&
@@ -545,9 +553,8 @@
     print*,'error test inner core : rank',myrank,'unique mpi points:',i,num_unique
     call exit_mpi(myrank,'error MPI assembly inner core')
   endif
+  call sum_all_i(i,inum)
 
-  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)
-
   if( myrank == 0 ) then
     ! checks
     if( inum /= icount ) then

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/shared/exit_mpi.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/shared/exit_mpi.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/shared/exit_mpi.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -285,6 +285,25 @@
 !-------------------------------------------------------------------------------------------------
 !
 
+  subroutine sum_all_i(sendbuf, recvbuf)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  integer :: sendbuf, recvbuf
+  integer :: ier
+
+  call MPI_REDUCE(sendbuf,recvbuf,1,MPI_INTEGER, &
+                  MPI_SUM,0,MPI_COMM_WORLD,ier)
+
+  end subroutine sum_all_i
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
   subroutine sum_all_dp(sendbuf, recvbuf)
 
   implicit none
@@ -343,6 +362,94 @@
 !-------------------------------------------------------------------------------------------------
 !
 
+
+  subroutine recv_singlei(recvbuf, dest, recvtag)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  integer :: dest,recvtag
+  integer :: recvbuf
+
+  ! MPI status of messages to be received
+  integer :: msg_status(MPI_STATUS_SIZE)
+  integer :: ier
+
+  call MPI_RECV(recvbuf,1,MPI_INTEGER,dest,recvtag,MPI_COMM_WORLD,msg_status,ier)
+
+  end subroutine recv_singlei
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+
+  subroutine recv_i(recvbuf, recvcount, dest, recvtag)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  integer :: dest,recvtag
+  integer :: recvcount
+  integer,dimension(recvcount) :: recvbuf
+
+  ! MPI status of messages to be received
+  integer :: msg_status(MPI_STATUS_SIZE)
+  integer :: ier
+
+  call MPI_RECV(recvbuf,recvcount,MPI_INTEGER,dest,recvtag,MPI_COMM_WORLD,msg_status,ier)
+
+  end subroutine recv_i
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine send_i(sendbuf, sendcount, dest, sendtag)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  !integer sendbuf,sendcount,dest,sendtag
+  integer dest,sendtag
+  integer sendcount
+  integer,dimension(sendcount):: sendbuf
+  integer ier
+
+  call MPI_SEND(sendbuf,sendcount,MPI_INTEGER,dest,sendtag,MPI_COMM_WORLD,ier)
+
+  end subroutine send_i
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine send_singlei(sendbuf, dest, sendtag)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  !integer sendbuf,sendcount,dest,sendtag
+  integer :: dest,sendtag
+  integer :: sendbuf
+  integer :: ier
+
+  call MPI_SEND(sendbuf,1,MPI_INTEGER,dest,sendtag,MPI_COMM_WORLD,ier)
+
+  end subroutine send_singlei
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
   subroutine world_size(size)
 
   implicit none

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/Makefile.in
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/Makefile.in	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/Makefile.in	2012-07-24 22:10:28 UTC (rev 20542)
@@ -131,7 +131,6 @@
 	$O/param_reader.cc.o \
 	$O/spline_routines.shared.o \
 	$O/netlib_specfun_erf.check.o \
-	$O/read_arrays_buffers_solver.mpicheck.o \
 	$O/read_compute_parameters.shared.o \
 	$O/read_parameter_file.shared.o \
 	$O/read_value_parameters.shared.o \

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_scalar.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_scalar.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_scalar.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -30,10 +30,10 @@
 !----
 
 
-  subroutine assemble_MPI_scalar_ext_mesh(NPROC,NGLOB_AB,array_val, &
-                        num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                        nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                        my_neighbours_ext_mesh)
+  subroutine assemble_MPI_scalar(NPROC,NGLOB_AB,array_val, &
+                        num_interfaces,max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        my_neighbours)
 
 ! blocking send/receive
 
@@ -47,15 +47,15 @@
   ! array to assemble
   real(kind=CUSTOM_REAL), dimension(NGLOB_AB) :: array_val
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
 
   ! local parameters
-  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar_ext_mesh
-  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_recv_scalar_ext_mesh
-  integer, dimension(:), allocatable :: request_send_scalar_ext_mesh
-  integer, dimension(:), allocatable :: request_recv_scalar_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_recv_scalar
+  integer, dimension(:), allocatable :: request_send_scalar
+  integer, dimension(:), allocatable :: request_recv_scalar
 
 
   integer ipoin,iinterface,ier
@@ -65,66 +65,66 @@
   ! assemble only if more than one partition
   if(NPROC > 1) then
 
-    allocate(buffer_send_scalar_ext_mesh(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array buffer_send_scalar_ext_mesh'
-    allocate(buffer_recv_scalar_ext_mesh(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array buffer_recv_scalar_ext_mesh'
-    allocate(request_send_scalar_ext_mesh(num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array request_send_scalar_ext_mesh'
-    allocate(request_recv_scalar_ext_mesh(num_interfaces_ext_mesh),stat=ier)
-    if( ier /= 0 ) stop 'error allocating array request_recv_scalar_ext_mesh'
+    allocate(buffer_send_scalar(max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_send_scalar'
+    allocate(buffer_recv_scalar(max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_recv_scalar'
+    allocate(request_send_scalar(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_send_scalar'
+    allocate(request_recv_scalar(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_recv_scalar'
 
     ! partition border copy into the buffer
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        buffer_send_scalar_ext_mesh(ipoin,iinterface) = array_val(ibool_interfaces_ext_mesh(ipoin,iinterface))
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        buffer_send_scalar(ipoin,iinterface) = array_val(ibool_interfaces(ipoin,iinterface))
       enddo
     enddo
 
     ! send messages
-    do iinterface = 1, num_interfaces_ext_mesh
+    do iinterface = 1, num_interfaces
       ! non-blocking synchronous send request
-      call isend_cr(buffer_send_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call isend_cr(buffer_send_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_send_scalar_ext_mesh(iinterface) &
+           request_send_scalar(iinterface) &
            )
       ! receive request
-      call irecv_cr(buffer_recv_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call irecv_cr(buffer_recv_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_recv_scalar_ext_mesh(iinterface) &
+           request_recv_scalar(iinterface) &
            )
     enddo
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_scalar(iinterface))
     enddo
 
     ! adding contributions of neighbours
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-             array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) + buffer_recv_scalar_ext_mesh(ipoin,iinterface)
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(ibool_interfaces(ipoin,iinterface)) = &
+             array_val(ibool_interfaces(ipoin,iinterface)) + buffer_recv_scalar(ipoin,iinterface)
       enddo
     enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_scalar(iinterface))
     enddo
 
-    deallocate(buffer_send_scalar_ext_mesh)
-    deallocate(buffer_recv_scalar_ext_mesh)
-    deallocate(request_send_scalar_ext_mesh)
-    deallocate(request_recv_scalar_ext_mesh)
+    deallocate(buffer_send_scalar)
+    deallocate(buffer_recv_scalar)
+    deallocate(request_send_scalar)
+    deallocate(request_recv_scalar)
 
   endif
 
-  end subroutine assemble_MPI_scalar_ext_mesh
+  end subroutine assemble_MPI_scalar
 
 !
 !-------------------------------------------------------------------------------------------------
@@ -132,12 +132,12 @@
 
 
 
-  subroutine assemble_MPI_scalar_ext_mesh_s(NPROC,NGLOB_AB,array_val, &
-                        buffer_send_scalar_ext_mesh,buffer_recv_scalar_ext_mesh, &
-                        num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                        nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                        my_neighbours_ext_mesh, &
-                        request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh)
+  subroutine assemble_MPI_scalar_s(NPROC,NGLOB_AB,array_val, &
+                        buffer_send_scalar,buffer_recv_scalar, &
+                        num_interfaces,max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        my_neighbours, &
+                        request_send_scalar,request_recv_scalar)
 
 ! non-blocking MPI send
 
@@ -147,18 +147,18 @@
 
   integer :: NPROC
   integer :: NGLOB_AB
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
 ! array to send
   real(kind=CUSTOM_REAL), dimension(NGLOB_AB) :: array_val
 
 
-  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_send_scalar_ext_mesh,buffer_recv_scalar_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces,num_interfaces) :: &
+       buffer_send_scalar,buffer_recv_scalar
 
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
+  integer, dimension(num_interfaces) :: request_send_scalar,request_recv_scalar
 
   integer ipoin,iinterface
 
@@ -166,45 +166,45 @@
   if(NPROC > 1) then
 
     ! partition border copy into the buffer
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        buffer_send_scalar_ext_mesh(ipoin,iinterface) = &
-          array_val(ibool_interfaces_ext_mesh(ipoin,iinterface))
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        buffer_send_scalar(ipoin,iinterface) = &
+          array_val(ibool_interfaces(ipoin,iinterface))
       enddo
     enddo
 
     ! send messages
-    do iinterface = 1, num_interfaces_ext_mesh
+    do iinterface = 1, num_interfaces
       ! non-blocking synchronous send request
-      call isend_cr(buffer_send_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call isend_cr(buffer_send_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_send_scalar_ext_mesh(iinterface) &
+           request_send_scalar(iinterface) &
            )
       ! receive request
-      call irecv_cr(buffer_recv_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call irecv_cr(buffer_recv_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_recv_scalar_ext_mesh(iinterface) &
+           request_recv_scalar(iinterface) &
            )
 
     enddo
 
   endif
 
-  end subroutine assemble_MPI_scalar_ext_mesh_s
+  end subroutine assemble_MPI_scalar_s
 
 !
 !-------------------------------------------------------------------------------------------------
 !
 
-  subroutine assemble_MPI_scalar_ext_mesh_w(NPROC,NGLOB_AB,array_val, &
-                        buffer_recv_scalar_ext_mesh,num_interfaces_ext_mesh, &
-                        max_nibool_interfaces_ext_mesh, &
-                        nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                        request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh)
+  subroutine assemble_MPI_scalar_w(NPROC,NGLOB_AB,array_val, &
+                        buffer_recv_scalar,num_interfaces, &
+                        max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        request_send_scalar,request_recv_scalar)
 
 ! waits for send/receiver to be completed and assembles contributions
 
@@ -214,17 +214,17 @@
 
   integer :: NPROC
   integer :: NGLOB_AB
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 ! array to assemble
   real(kind=CUSTOM_REAL), dimension(NGLOB_AB) :: array_val
 
 
-  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_recv_scalar_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces,num_interfaces) :: &
+       buffer_recv_scalar
 
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh
+  integer, dimension(num_interfaces) :: nibool_interfaces
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
+  integer, dimension(num_interfaces) :: request_send_scalar,request_recv_scalar
 
   integer ipoin,iinterface
 
@@ -232,44 +232,44 @@
   if(NPROC > 1) then
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_scalar(iinterface))
     enddo
 
     ! adding contributions of neighbours
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-             array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) &
-             + buffer_recv_scalar_ext_mesh(ipoin,iinterface)
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(ibool_interfaces(ipoin,iinterface)) = &
+             array_val(ibool_interfaces(ipoin,iinterface)) &
+             + buffer_recv_scalar(ipoin,iinterface)
       enddo
     enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_scalar(iinterface))
     enddo
 
   endif
 
-  end subroutine assemble_MPI_scalar_ext_mesh_w
+  end subroutine assemble_MPI_scalar_w
 
 !
 !-------------------------------------------------------------------------------------------------
 !
 
   subroutine assemble_MPI_scalar_send_cuda(NPROC, &
-                                          buffer_send_scalar_ext_mesh,buffer_recv_scalar_ext_mesh, &
-                                          num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                                          nibool_interfaces_ext_mesh, &
-                                          my_neighbours_ext_mesh, &
-                                          request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh, &
+                                          buffer_send_scalar,buffer_recv_scalar, &
+                                          num_interfaces,max_nibool_interfaces, &
+                                          nibool_interfaces, &
+                                          my_neighbours, &
+                                          request_send_scalar,request_recv_scalar, &
                                           FORWARD_OR_ADJOINT)
 
 ! non-blocking MPI send
 
   ! sends data
-  ! note: assembling data already filled into buffer_send_scalar_ext_mesh array
+  ! note: assembling data already filled into buffer_send_scalar array
 
   use constants_solver
   use specfem_par,only: Mesh_pointer
@@ -277,13 +277,13 @@
   implicit none
 
   integer :: NPROC
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
-  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_send_scalar_ext_mesh,buffer_recv_scalar_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces,num_interfaces) :: &
+       buffer_send_scalar,buffer_recv_scalar
 
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(num_interfaces) :: request_send_scalar,request_recv_scalar
 
   integer :: FORWARD_OR_ADJOINT
 
@@ -296,24 +296,24 @@
     ! preparation of the contribution between partitions using MPI
     ! transfers mpi buffers to CPU
     call transfer_boun_pot_from_device(Mesh_pointer, &
-                                      buffer_send_scalar_ext_mesh, &
+                                      buffer_send_scalar, &
                                       FORWARD_OR_ADJOINT)
 
     ! send messages
-    do iinterface = 1, num_interfaces_ext_mesh
+    do iinterface = 1, num_interfaces
       ! non-blocking synchronous send request
-      call isend_cr(buffer_send_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call isend_cr(buffer_send_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_send_scalar_ext_mesh(iinterface) &
+           request_send_scalar(iinterface) &
            )
       ! receive request
-      call irecv_cr(buffer_recv_scalar_ext_mesh(1:nibool_interfaces_ext_mesh(iinterface),iinterface), &
-           nibool_interfaces_ext_mesh(iinterface), &
-           my_neighbours_ext_mesh(iinterface), &
+      call irecv_cr(buffer_recv_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
            itag, &
-           request_recv_scalar_ext_mesh(iinterface) &
+           request_recv_scalar(iinterface) &
            )
 
     enddo
@@ -327,9 +327,9 @@
 !
 
   subroutine assemble_MPI_scalar_write_cuda(NPROC, &
-                                            buffer_recv_scalar_ext_mesh, &
-                                            num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                                            request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh, &
+                                            buffer_recv_scalar, &
+                                            num_interfaces,max_nibool_interfaces, &
+                                            request_send_scalar,request_recv_scalar, &
                                             FORWARD_OR_ADJOINT)
 
 ! waits for send/receiver to be completed and assembles contributions
@@ -341,11 +341,11 @@
 
   integer :: NPROC
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
-  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_recv_scalar_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_scalar_ext_mesh,request_recv_scalar_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(max_nibool_interfaces,num_interfaces) :: &
+       buffer_recv_scalar
+  integer, dimension(num_interfaces) :: request_send_scalar,request_recv_scalar
 
   integer :: FORWARD_OR_ADJOINT
 
@@ -356,27 +356,27 @@
   if(NPROC > 1) then
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_scalar(iinterface))
     enddo
 
     ! adding contributions of neighbours
     call transfer_asmbl_pot_to_device(Mesh_pointer, &
-                                    buffer_recv_scalar_ext_mesh, &
+                                    buffer_recv_scalar, &
                                     FORWARD_OR_ADJOINT)
 
     ! note: adding contributions of neighbours has been done just above for cuda
-    !do iinterface = 1, num_interfaces_ext_mesh
-    !  do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-    !    array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-    !         array_val(ibool_interfaces_ext_mesh(ipoin,iinterface)) &
-    !         + buffer_recv_scalar_ext_mesh(ipoin,iinterface)
+    !do iinterface = 1, num_interfaces
+    !  do ipoin = 1, nibool_interfaces(iinterface)
+    !    array_val(ibool_interfaces(ipoin,iinterface)) = &
+    !         array_val(ibool_interfaces(ipoin,iinterface)) &
+    !         + buffer_recv_scalar(ipoin,iinterface)
     !  enddo
     !enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_scalar_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_scalar(iinterface))
     enddo
 
   endif

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_vector.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_vector.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_vector.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -31,13 +31,13 @@
 
 ! non-blocking routines
 
-  subroutine assemble_MPI_vector_ext_mesh_s(NPROC,NGLOB_AB, &
+  subroutine assemble_MPI_vector_s(NPROC,NGLOB_AB, &
                                            array_val, &
-                                           buffer_send_vector_ext_mesh,buffer_recv_vector_ext_mesh, &
-                                           num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                                           nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                                           my_neighbours_ext_mesh, &
-                                           request_send_vector_ext_mesh,request_recv_vector_ext_mesh)
+                                           buffer_send_vector,buffer_recv_vector, &
+                                           num_interfaces,max_nibool_interfaces, &
+                                           nibool_interfaces,ibool_interfaces, &
+                                           my_neighbours, &
+                                           request_send_vector,request_recv_vector)
 
 ! sends data
 
@@ -51,14 +51,14 @@
   ! array to assemble
   real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_AB) :: array_val
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
-  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_send_vector_ext_mesh,buffer_recv_vector_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces,num_interfaces) :: &
+       buffer_send_vector,buffer_recv_vector
 
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_vector_ext_mesh,request_recv_vector_ext_mesh
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
+  integer, dimension(num_interfaces) :: request_send_vector,request_recv_vector
 
   integer ipoin,iinterface
 
@@ -68,32 +68,32 @@
   if(NPROC > 1) then
 
      ! partition border copy into the buffer
-     do iinterface = 1, num_interfaces_ext_mesh
-        do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-           buffer_send_vector_ext_mesh(:,ipoin,iinterface) = &
-                array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface))
+     do iinterface = 1, num_interfaces
+        do ipoin = 1, nibool_interfaces(iinterface)
+           buffer_send_vector(:,ipoin,iinterface) = &
+                array_val(:,ibool_interfaces(ipoin,iinterface))
         enddo
      enddo
 
      ! send messages
-     do iinterface = 1, num_interfaces_ext_mesh
-        call isend_cr(buffer_send_vector_ext_mesh(1,1,iinterface), &
-             NDIM*nibool_interfaces_ext_mesh(iinterface), &
-             my_neighbours_ext_mesh(iinterface), &
+     do iinterface = 1, num_interfaces
+        call isend_cr(buffer_send_vector(1,1,iinterface), &
+             NDIM*nibool_interfaces(iinterface), &
+             my_neighbours(iinterface), &
              itag, &
-             request_send_vector_ext_mesh(iinterface) &
+             request_send_vector(iinterface) &
              )
-        call irecv_cr(buffer_recv_vector_ext_mesh(1,1,iinterface), &
-             NDIM*nibool_interfaces_ext_mesh(iinterface), &
-             my_neighbours_ext_mesh(iinterface), &
+        call irecv_cr(buffer_recv_vector(1,1,iinterface), &
+             NDIM*nibool_interfaces(iinterface), &
+             my_neighbours(iinterface), &
              itag, &
-             request_recv_vector_ext_mesh(iinterface) &
+             request_recv_vector(iinterface) &
              )
      enddo
 
   endif
 
-  end subroutine assemble_MPI_vector_ext_mesh_s
+  end subroutine assemble_MPI_vector_s
 
 !
 !-------------------------------------------------------------------------------------------------
@@ -107,15 +107,15 @@
 
 
   subroutine assemble_MPI_vector_send_cuda(NPROC, &
-                                          buffer_send_vector_ext_mesh,buffer_recv_vector_ext_mesh, &
-                                          num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                                          nibool_interfaces_ext_mesh, &
-                                          my_neighbours_ext_mesh, &
-                                          request_send_vector_ext_mesh,request_recv_vector_ext_mesh,&
+                                          buffer_send_vector,buffer_recv_vector, &
+                                          num_interfaces,max_nibool_interfaces, &
+                                          nibool_interfaces, &
+                                          my_neighbours, &
+                                          request_send_vector,request_recv_vector,&
                                           IREGION,FORWARD_OR_ADJOINT)
 
   ! sends data
-  ! note: array to assemble already filled into buffer_send_vector_ext_mesh array
+  ! note: array to assemble already filled into buffer_send_vector array
   use constants_solver
   use specfem_par,only: Mesh_pointer
 
@@ -123,13 +123,13 @@
 
   integer :: NPROC
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
-  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_send_vector_ext_mesh,buffer_recv_vector_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces,num_interfaces) :: &
+       buffer_send_vector,buffer_recv_vector
 
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh,my_neighbours_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_vector_ext_mesh,request_recv_vector_ext_mesh
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(num_interfaces) :: request_send_vector,request_recv_vector
 
   integer :: IREGION
   integer :: FORWARD_OR_ADJOINT
@@ -143,22 +143,22 @@
     ! preparation of the contribution between partitions using MPI
     ! transfers mpi buffers to CPU
     call transfer_boun_accel_from_device(Mesh_pointer, &
-                                        buffer_send_vector_ext_mesh,&
+                                        buffer_send_vector,&
                                         IREGION,FORWARD_OR_ADJOINT)
 
      ! send messages
-     do iinterface = 1, num_interfaces_ext_mesh
-        call isend_cr(buffer_send_vector_ext_mesh(1,1,iinterface), &
-             NDIM*nibool_interfaces_ext_mesh(iinterface), &
-             my_neighbours_ext_mesh(iinterface), &
+     do iinterface = 1, num_interfaces
+        call isend_cr(buffer_send_vector(1,1,iinterface), &
+             NDIM*nibool_interfaces(iinterface), &
+             my_neighbours(iinterface), &
              itag, &
-             request_send_vector_ext_mesh(iinterface) &
+             request_send_vector(iinterface) &
              )
-        call irecv_cr(buffer_recv_vector_ext_mesh(1,1,iinterface), &
-             NDIM*nibool_interfaces_ext_mesh(iinterface), &
-             my_neighbours_ext_mesh(iinterface), &
+        call irecv_cr(buffer_recv_vector(1,1,iinterface), &
+             NDIM*nibool_interfaces(iinterface), &
+             my_neighbours(iinterface), &
              itag, &
-             request_recv_vector_ext_mesh(iinterface) &
+             request_recv_vector(iinterface) &
              )
      enddo
 
@@ -170,12 +170,12 @@
 !-------------------------------------------------------------------------------------------------
 !
 
-  subroutine assemble_MPI_vector_ext_mesh_w(NPROC,NGLOB_AB, &
+  subroutine assemble_MPI_vector_w(NPROC,NGLOB_AB, &
                                            array_val, &
-                                           buffer_recv_vector_ext_mesh, &
-                                           num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                                           nibool_interfaces_ext_mesh,ibool_interfaces_ext_mesh, &
-                                           request_send_vector_ext_mesh,request_recv_vector_ext_mesh)
+                                           buffer_recv_vector, &
+                                           num_interfaces,max_nibool_interfaces, &
+                                           nibool_interfaces,ibool_interfaces, &
+                                           request_send_vector,request_recv_vector)
 
 ! waits for data to receive and assembles
 
@@ -189,14 +189,14 @@
   ! array to assemble
   real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_AB) :: array_val
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
-  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_recv_vector_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces,num_interfaces) :: &
+       buffer_recv_vector
 
-  integer, dimension(num_interfaces_ext_mesh) :: nibool_interfaces_ext_mesh
-  integer, dimension(max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: ibool_interfaces_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_vector_ext_mesh,request_recv_vector_ext_mesh
+  integer, dimension(num_interfaces) :: nibool_interfaces
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
+  integer, dimension(num_interfaces) :: request_send_vector,request_recv_vector
 
   integer ipoin,iinterface
 
@@ -206,27 +206,27 @@
   if(NPROC > 1) then
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_vector_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_vector(iinterface))
     enddo
 
     ! adding contributions of neighbours
-    do iinterface = 1, num_interfaces_ext_mesh
-      do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-        array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-             array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) &
-             + buffer_recv_vector_ext_mesh(:,ipoin,iinterface)
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(:,ibool_interfaces(ipoin,iinterface)) = &
+             array_val(:,ibool_interfaces(ipoin,iinterface)) &
+             + buffer_recv_vector(:,ipoin,iinterface)
       enddo
     enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_vector_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_vector(iinterface))
     enddo
 
   endif
 
-  end subroutine assemble_MPI_vector_ext_mesh_w
+  end subroutine assemble_MPI_vector_w
 
 
 !
@@ -234,9 +234,9 @@
 !
 
   subroutine assemble_MPI_vector_write_cuda(NPROC, &
-                                            buffer_recv_vector_ext_mesh, &
-                                            num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh, &
-                                            request_send_vector_ext_mesh,request_recv_vector_ext_mesh, &
+                                            buffer_recv_vector, &
+                                            num_interfaces,max_nibool_interfaces, &
+                                            request_send_vector,request_recv_vector, &
                                             IREGION,FORWARD_OR_ADJOINT )
 
 ! waits for data to receive and assembles
@@ -248,11 +248,11 @@
 
   integer :: NPROC
 
-  integer :: num_interfaces_ext_mesh,max_nibool_interfaces_ext_mesh
+  integer :: num_interfaces,max_nibool_interfaces
 
-  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces_ext_mesh,num_interfaces_ext_mesh) :: &
-       buffer_recv_vector_ext_mesh
-  integer, dimension(num_interfaces_ext_mesh) :: request_send_vector_ext_mesh,request_recv_vector_ext_mesh
+  real(kind=CUSTOM_REAL), dimension(NDIM,max_nibool_interfaces,num_interfaces) :: &
+       buffer_recv_vector
+  integer, dimension(num_interfaces) :: request_send_vector,request_recv_vector
 
   integer :: IREGION
   integer :: FORWARD_OR_ADJOINT
@@ -267,26 +267,26 @@
   if(NPROC > 1) then
 
     ! wait for communications completion (recv)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_recv_vector_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_vector(iinterface))
     enddo
 
     ! adding contributions of neighbours
     call transfer_asmbl_accel_to_device(Mesh_pointer, &
-                                      buffer_recv_vector_ext_mesh, &
+                                      buffer_recv_vector, &
                                       IREGION,FORWARD_OR_ADJOINT)
 
     ! This step is done via previous function transfer_and_assemble...
-    ! do iinterface = 1, num_interfaces_ext_mesh
-    !   do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
-    !     array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
-    !          array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) + buffer_recv_vector_ext_mesh(:,ipoin,iinterface)
+    ! do iinterface = 1, num_interfaces
+    !   do ipoin = 1, nibool_interfaces(iinterface)
+    !     array_val(:,ibool_interfaces(ipoin,iinterface)) = &
+    !          array_val(:,ibool_interfaces(ipoin,iinterface)) + buffer_recv_vector(:,ipoin,iinterface)
     !   enddo
     ! enddo
 
     ! wait for communications completion (send)
-    do iinterface = 1, num_interfaces_ext_mesh
-      call wait_req(request_send_vector_ext_mesh(iinterface))
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_vector(iinterface))
     enddo
 
   endif

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/check_simulation_stability.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/check_simulation_stability.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/check_simulation_stability.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -85,7 +85,6 @@
   data weekday_name /'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'/
   integer :: year,mon,day,hr,minutes,timestamp,julian_day_number,day_of_week, &
              timestamp_remote,year_remote,mon_remote,day_remote,hr_remote,minutes_remote,day_of_week_remote
-  integer :: ier
   integer, external :: idaywk
   ! timing
   double precision, external :: wtime

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -180,7 +180,7 @@
 
       if(.NOT. GPU_MODE) then
         ! on CPU
-        call assemble_MPI_scalar_ext_mesh_s(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+        call assemble_MPI_scalar_s(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
                                 accel_outer_core, &
                                 buffer_send_scalar_outer_core,buffer_recv_scalar_outer_core, &
                                 num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
@@ -203,7 +203,7 @@
       if( SIMULATION_TYPE == 3 ) then
         if(.NOT. GPU_MODE) then
           ! on CPU
-          call assemble_MPI_scalar_ext_mesh_s(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+          call assemble_MPI_scalar_s(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
                                 b_accel_outer_core, &
                                 b_buffer_send_scalar_outer_core,b_buffer_recv_scalar_outer_core, &
                                 num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
@@ -228,7 +228,7 @@
       ! waits for send/receive requests to be completed and assembles values
       if(.NOT. GPU_MODE) then
         ! on CPU
-        call assemble_MPI_scalar_ext_mesh_w(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+        call assemble_MPI_scalar_w(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
                                 accel_outer_core, &
                                 buffer_recv_scalar_outer_core,num_interfaces_outer_core,&
                                 max_nibool_interfaces_outer_core, &
@@ -247,7 +247,7 @@
       if( SIMULATION_TYPE == 3 ) then
         if(.NOT. GPU_MODE) then
           ! on CPU
-          call assemble_MPI_scalar_ext_mesh_w(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+          call assemble_MPI_scalar_w(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
                                 b_accel_outer_core, &
                                 b_buffer_recv_scalar_outer_core,num_interfaces_outer_core,&
                                 max_nibool_interfaces_outer_core, &

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -331,7 +331,7 @@
         ! on CPU
         ! sends accel values to corresponding MPI interface neighbors
         ! crust mantle
-        call assemble_MPI_vector_ext_mesh_s(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+        call assemble_MPI_vector_s(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                       accel_crust_mantle, &
                       buffer_send_vector_crust_mantle,buffer_recv_vector_crust_mantle, &
                       num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
@@ -339,7 +339,7 @@
                       my_neighbours_crust_mantle, &
                       request_send_vector_crust_mantle,request_recv_vector_crust_mantle)
         ! inner core
-        call assemble_MPI_vector_ext_mesh_s(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+        call assemble_MPI_vector_s(NPROCTOT_VAL,NGLOB_INNER_CORE, &
                       accel_inner_core, &
                       buffer_send_vector_inner_core,buffer_recv_vector_inner_core, &
                       num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
@@ -374,7 +374,7 @@
           ! on CPU
           ! sends accel values to corresponding MPI interface neighbors
           ! crust mantle
-          call assemble_MPI_vector_ext_mesh_s(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+          call assemble_MPI_vector_s(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                         b_accel_crust_mantle, &
                         b_buffer_send_vector_crust_mantle,b_buffer_recv_vector_crust_mantle, &
                         num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
@@ -382,7 +382,7 @@
                         my_neighbours_crust_mantle, &
                         b_request_send_vector_crust_mantle,b_request_recv_vector_crust_mantle)
           ! inner core
-          call assemble_MPI_vector_ext_mesh_s(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+          call assemble_MPI_vector_s(NPROCTOT_VAL,NGLOB_INNER_CORE, &
                         b_accel_inner_core, &
                         b_buffer_send_vector_inner_core,b_buffer_recv_vector_inner_core, &
                         num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
@@ -417,14 +417,14 @@
       if(.NOT. GPU_MODE) then
         ! on CPU
         ! crust mantle
-        call assemble_MPI_vector_ext_mesh_w(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+        call assemble_MPI_vector_w(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                               accel_crust_mantle, &
                               buffer_recv_vector_crust_mantle,num_interfaces_crust_mantle,&
                               max_nibool_interfaces_crust_mantle, &
                               nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle, &
                               request_send_vector_crust_mantle,request_recv_vector_crust_mantle)
         ! inner core
-        call assemble_MPI_vector_ext_mesh_w(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+        call assemble_MPI_vector_w(NPROCTOT_VAL,NGLOB_INNER_CORE, &
                               accel_inner_core, &
                               buffer_recv_vector_inner_core,num_interfaces_inner_core,&
                               max_nibool_interfaces_inner_core, &
@@ -455,14 +455,14 @@
         if(.NOT. GPU_MODE) then
           ! on CPU
           ! crust mantle
-          call assemble_MPI_vector_ext_mesh_w(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+          call assemble_MPI_vector_w(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                               b_accel_crust_mantle, &
                               b_buffer_recv_vector_crust_mantle,num_interfaces_crust_mantle,&
                               max_nibool_interfaces_crust_mantle, &
                               nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle, &
                               b_request_send_vector_crust_mantle,b_request_recv_vector_crust_mantle)
           ! inner core
-          call assemble_MPI_vector_ext_mesh_w(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+          call assemble_MPI_vector_w(NPROCTOT_VAL,NGLOB_INNER_CORE, &
                               b_accel_inner_core, &
                               b_buffer_recv_vector_inner_core,num_interfaces_inner_core,&
                               max_nibool_interfaces_inner_core, &

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -244,7 +244,7 @@
 
   ! ocean load
   if (OCEANS_VAL) then
-    call assemble_MPI_scalar_ext_mesh(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+    call assemble_MPI_scalar(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                         rmass_ocean_load, &
                         num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
                         nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
@@ -253,34 +253,34 @@
 
   ! crust and mantle
   if(NCHUNKS_VAL /= 6 .and. ABSORBING_CONDITIONS) then
-     call assemble_MPI_scalar_ext_mesh(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+     call assemble_MPI_scalar(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                            rmassx_crust_mantle, &
                            num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
                            nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
                            my_neighbours_crust_mantle)
 
-     call assemble_MPI_scalar_ext_mesh(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+     call assemble_MPI_scalar(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                            rmassy_crust_mantle, &
                            num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
                            nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
                            my_neighbours_crust_mantle)
   endif
 
-  call assemble_MPI_scalar_ext_mesh(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+  call assemble_MPI_scalar(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
                         rmassz_crust_mantle, &
                         num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
                         nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
                         my_neighbours_crust_mantle)
 
   ! outer core
-  call assemble_MPI_scalar_ext_mesh(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+  call assemble_MPI_scalar(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
                         rmass_outer_core, &
                         num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
                         nibool_interfaces_outer_core,ibool_interfaces_outer_core,&
                         my_neighbours_outer_core)
 
   ! inner core
-  call assemble_MPI_scalar_ext_mesh(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+  call assemble_MPI_scalar(NPROCTOT_VAL,NGLOB_INNER_CORE, &
                         rmass_inner_core, &
                         num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
                         nibool_interfaces_inner_core,ibool_interfaces_inner_core,&

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_arrays_buffers_solver.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_arrays_buffers_solver.f90	2012-07-24 22:09:06 UTC (rev 20541)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_arrays_buffers_solver.f90	2012-07-24 22:10:28 UTC (rev 20542)
@@ -25,313 +25,315 @@
 !
 !=====================================================================
 
-  subroutine read_arrays_buffers_solver(iregion_code,myrank, &
-                               iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
-                               npoin2D_xi,npoin2D_eta, &
-                               iprocfrom_faces,iprocto_faces,imsg_type, &
-                               iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
-                               iboolfaces,npoin2D_faces,iboolcorner, &
-                               NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NGLOB1D_RADIAL, &
-                               NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
-
-  implicit none
-
-! standard include of the MPI library
-  include 'mpif.h'
-
-  include "constants.h"
-
-  integer iregion_code,myrank,NCHUNKS,ier
-
-  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
-  integer NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NGLOB1D_RADIAL
-  integer NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA
-
-  integer npoin2D_faces(NUMFACES_SHARED)
-
-  character(len=150) LOCAL_PATH
-
-  integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces
-  integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner
-  integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi,iboolright_xi
-  integer, dimension(NGLOB2DMAX_YMIN_YMAX) :: iboolleft_eta,iboolright_eta
-
-  integer, dimension(NUMMSGS_FACES) :: iprocfrom_faces,iprocto_faces,imsg_type
-
-! allocate array for messages for corners
-  integer, dimension(NCORNERSCHUNKS) :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
-
-  integer npoin2D_xi_mesher,npoin2D_eta_mesher
-  integer npoin1D_corner
-
-  integer imsg,icount_faces,icount_corners
-  integer ipoin1D,ipoin2D
-
-  double precision xdummy,ydummy,zdummy
-
-! processor identification
-  character(len=150) OUTPUT_FILES,prname,filename
-
-! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
-
-! get the base pathname for output files
-  call get_value_string(OUTPUT_FILES, 'OUTPUT_FILES', 'OUTPUT_FILES')
-
-! create the name for the database of the current slide and region
-  call create_name_database(prname,myrank,iregion_code,LOCAL_PATH)
-
-! read 2-D addressing for summation between slices along xi with MPI
-
-! read iboolleft_xi of this slice
-  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolleft_xi.txt', &
-        status='old',action='read',iostat=ier)
-  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolleft_xi file')
-
-  npoin2D_xi(1) = 1
- 350  continue
-  read(IIN,*) iboolleft_xi(npoin2D_xi(1)),xdummy,ydummy,zdummy
-  if(iboolleft_xi(npoin2D_xi(1)) > 0) then
-      npoin2D_xi(1) = npoin2D_xi(1) + 1
-      goto 350
-  endif
-! subtract the line that contains the flag after the last point
-  npoin2D_xi(1) = npoin2D_xi(1) - 1
-! read nb of points given by the mesher
-  read(IIN,*) npoin2D_xi_mesher
-  if(npoin2D_xi(1) > NGLOB2DMAX_XMIN_XMAX .or. npoin2D_xi(1) /= npoin2D_xi_mesher) &
-      call exit_MPI(myrank,'incorrect iboolleft_xi read')
-  close(IIN)
-
-! read iboolright_xi of this slice
-  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolright_xi.txt', &
-        status='old',action='read',iostat=ier)
-  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolright_xi file')
-
-  npoin2D_xi(2) = 1
- 360  continue
-  read(IIN,*) iboolright_xi(npoin2D_xi(2)),xdummy,ydummy,zdummy
-  if(iboolright_xi(npoin2D_xi(2)) > 0) then
-      npoin2D_xi(2) = npoin2D_xi(2) + 1
-      goto 360
-  endif
-! subtract the line that contains the flag after the last point
-  npoin2D_xi(2) = npoin2D_xi(2) - 1
-! read nb of points given by the mesher
-  read(IIN,*) npoin2D_xi_mesher
-  if(npoin2D_xi(2) > NGLOB2DMAX_XMIN_XMAX .or. npoin2D_xi(2) /= npoin2D_xi_mesher) &
-      call exit_MPI(myrank,'incorrect iboolright_xi read')
-  close(IIN)
-
-  if(myrank == 0) then
-    write(IMAIN,*) '  #max of points in MPI buffers along xi npoin2D_xi = ', &
-                                maxval(npoin2D_xi(:))
-    write(IMAIN,*) '  #max of array elements transferred npoin2D_xi*NDIM = ', &
-                                maxval(npoin2D_xi(:))*NDIM
-    write(IMAIN,*)
-  endif
-
-! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
-
-! read 2-D addressing for summation between slices along eta with MPI
-
-! read iboolleft_eta of this slice
-  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolleft_eta.txt', &
-        status='old',action='read',iostat=ier)
-  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolleft_eta file')
-
-  npoin2D_eta(1) = 1
- 370  continue
-  read(IIN,*) iboolleft_eta(npoin2D_eta(1)),xdummy,ydummy,zdummy
-  if(iboolleft_eta(npoin2D_eta(1)) > 0) then
-      npoin2D_eta(1) = npoin2D_eta(1) + 1
-      goto 370
-  endif
-! subtract the line that contains the flag after the last point
-  npoin2D_eta(1) = npoin2D_eta(1) - 1
-! read nb of points given by the mesher
-  read(IIN,*) npoin2D_eta_mesher
-  if(npoin2D_eta(1) > NGLOB2DMAX_YMIN_YMAX .or. npoin2D_eta(1) /= npoin2D_eta_mesher) &
-      call exit_MPI(myrank,'incorrect iboolleft_eta read')
-  close(IIN)
-
-! read iboolright_eta of this slice
-  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolright_eta.txt', &
-        status='old',action='read',iostat=ier)
-  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolright_eta file')
-
-  npoin2D_eta(2) = 1
- 380  continue
-  read(IIN,*) iboolright_eta(npoin2D_eta(2)),xdummy,ydummy,zdummy
-  if(iboolright_eta(npoin2D_eta(2)) > 0) then
-      npoin2D_eta(2) = npoin2D_eta(2) + 1
-      goto 380
-  endif
-! subtract the line that contains the flag after the last point
-  npoin2D_eta(2) = npoin2D_eta(2) - 1
-! read nb of points given by the mesher
-  read(IIN,*) npoin2D_eta_mesher
-  if(npoin2D_eta(2) > NGLOB2DMAX_YMIN_YMAX .or. npoin2D_eta(2) /= npoin2D_eta_mesher) &
-      call exit_MPI(myrank,'incorrect iboolright_eta read')
-  close(IIN)
-
-  if(myrank == 0) then
-    write(IMAIN,*) '  #max of points in MPI buffers along eta npoin2D_eta = ', &
-                                maxval(npoin2D_eta(:))
-    write(IMAIN,*) '  #max of array elements transferred npoin2D_eta*NDIM = ', &
-                                maxval(npoin2D_eta(:))*NDIM
-    write(IMAIN,*)
-  endif
-
-
+!daniel: obsolete...
+!
+!  subroutine read_arrays_buffers_solver(iregion_code,myrank, &
+!                               iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
+!                               npoin2D_xi,npoin2D_eta, &
+!                               iprocfrom_faces,iprocto_faces,imsg_type, &
+!                               iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+!                               iboolfaces,npoin2D_faces,iboolcorner, &
+!                               NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NGLOB1D_RADIAL, &
+!                               NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
+!
+!  implicit none
+!
+!! standard include of the MPI library
+!  include 'mpif.h'
+!
+!  include "constants.h"
+!
+!  integer iregion_code,myrank,NCHUNKS,ier
+!
+!  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
+!  integer NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NGLOB1D_RADIAL
+!  integer NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA
+!
+!  integer npoin2D_faces(NUMFACES_SHARED)
+!
+!  character(len=150) LOCAL_PATH
+!
+!  integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces
+!  integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner
+!  integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi,iboolright_xi
+!  integer, dimension(NGLOB2DMAX_YMIN_YMAX) :: iboolleft_eta,iboolright_eta
+!
+!  integer, dimension(NUMMSGS_FACES) :: iprocfrom_faces,iprocto_faces,imsg_type
+!
+!! allocate array for messages for corners
+!  integer, dimension(NCORNERSCHUNKS) :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
+!
+!  integer npoin2D_xi_mesher,npoin2D_eta_mesher
+!  integer npoin1D_corner
+!
+!  integer imsg,icount_faces,icount_corners
+!  integer ipoin1D,ipoin2D
+!
+!  double precision xdummy,ydummy,zdummy
+!
+!! processor identification
+!  character(len=150) OUTPUT_FILES,prname,filename
+!
 !! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
-
-! read chunk messages only if more than one chunk
-  if(NCHUNKS /= 1) then
-
-! read messages to assemble between chunks with MPI
-
-  if(myrank == 0) then
-
-    ! file with the list of processors for each message for faces
-    open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_faces.txt', &
-          status='old',action='read',iostat=ier)
-    if( ier /= 0 ) call exit_MPI(myrank,'error opening list_messages_faces file')
-
-    do imsg = 1,NUMMSGS_FACES
-      read(IIN,*) imsg_type(imsg),iprocfrom_faces(imsg),iprocto_faces(imsg)
-      if      (iprocfrom_faces(imsg) < 0 &
-          .or. iprocto_faces(imsg) < 0 &
-          .or. iprocfrom_faces(imsg) > NPROCTOT-1 &
-          .or. iprocto_faces(imsg) > NPROCTOT-1) &
-        call exit_MPI(myrank,'incorrect chunk faces numbering')
-      if (imsg_type(imsg) < 1 .or. imsg_type(imsg) > 3) &
-        call exit_MPI(myrank,'incorrect message type labeling')
-    enddo
-    close(IIN)
-
-    ! file with the list of processors for each message for corners
-    open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_corners.txt', &
-          status='old',action='read',iostat=ier)
-    if( ier /= 0 ) call exit_MPI(myrank,'error opening list_messages_corners file')
-
-    do imsg = 1,NCORNERSCHUNKS
-      read(IIN,*) iproc_master_corners(imsg),iproc_worker1_corners(imsg), &
-                            iproc_worker2_corners(imsg)
-      if    (iproc_master_corners(imsg) < 0 &
-        .or. iproc_worker1_corners(imsg) < 0 &
-        .or. iproc_worker2_corners(imsg) < 0 &
-        .or. iproc_master_corners(imsg) > NPROCTOT-1 &
-        .or. iproc_worker1_corners(imsg) > NPROCTOT-1 &
-        .or. iproc_worker2_corners(imsg) > NPROCTOT-1) &
-        call exit_MPI(myrank,'incorrect chunk corner numbering')
-    enddo
-    close(IIN)
-
-  endif
-
-! broadcast the information read on the master to the nodes
-  call MPI_BCAST(imsg_type,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iprocfrom_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iprocto_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-
-  call MPI_BCAST(iproc_master_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iproc_worker1_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iproc_worker2_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  if( ier /= 0 ) call exit_MPI(myrank,'error mpi broadcast')
-
-
-!---- read indirect addressing for each message for faces of the chunks
-!---- a given slice can belong to at most two faces
-  icount_faces = 0
-  do imsg = 1,NUMMSGS_FACES
-    if(myrank == iprocfrom_faces(imsg) .or. myrank == iprocto_faces(imsg)) then
-      icount_faces = icount_faces + 1
-
-      if(icount_faces > NUMFACES_SHARED) then
-        print*,'error ',myrank,' icount_faces: ',icount_faces,'NUMFACES_SHARED:',NUMFACES_SHARED
-        print*,'iregion_code:',iregion_code
-        call exit_MPI(myrank,'more than NUMFACES_SHARED faces for this slice')
-      endif
-      if(icount_faces > 2 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) then
-        print*,'error ',myrank,' icount_faces: ',icount_faces,'NPROC_XI:',NPROC_XI,'NPROC_ETA:',NPROC_ETA
-        print*,'iregion_code:',iregion_code
-        call exit_MPI(myrank,'more than two faces for this slice')
-      endif
-
-      ! read file with 2D buffer for faces
-      if(myrank == iprocfrom_faces(imsg)) then
-        write(filename,"('buffer_faces_chunks_sender_msg',i6.6,'.txt')") imsg
-      else if(myrank == iprocto_faces(imsg)) then
-        write(filename,"('buffer_faces_chunks_receiver_msg',i6.6,'.txt')") imsg
-      endif
-
-      open(unit=IIN,file=prname(1:len_trim(prname))//filename,status='old',action='read',iostat=ier)
-      if( ier /= 0 ) call exit_MPI(myrank,'error opening buffer_faces file')
-
-      read(IIN,*) npoin2D_faces(icount_faces)
-      if(npoin2D_faces(icount_faces) > NGLOB2DMAX_XY) then
-        print*,'error ',myrank,' npoin2D_faces: ',npoin2D_faces(icount_faces),icount_faces
-        print*,'iregion_code:',iregion_code
-        call exit_MPI(myrank,'incorrect nb of points in face buffer')
-      endif
-
-      do ipoin2D = 1,npoin2D_faces(icount_faces)
-        read(IIN,*) iboolfaces(ipoin2D,icount_faces),xdummy,ydummy,zdummy
-      enddo
-      close(IIN)
-
-    endif
-  enddo
-
-
-!---- read indirect addressing for each message for corners of the chunks
-!---- a given slice can belong to at most one corner
-  icount_corners = 0
-  do imsg = 1,NCORNERSCHUNKS
-    ! if only two chunks then there is no second worker
-    if(myrank == iproc_master_corners(imsg) .or. &
-         myrank == iproc_worker1_corners(imsg) .or. &
-         (NCHUNKS /= 2 .and. myrank == iproc_worker2_corners(imsg))) then
-
-      icount_corners = icount_corners + 1
-      if(icount_corners>1 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) then
-        print*,'error ',myrank,'icount_corners:',icount_corners
-        print*,'iregion_code:',iregion_code
-        call exit_MPI(myrank,'more than one corner for this slice')
-      endif
-      if(icount_corners>4) call exit_MPI(myrank,'more than four corners for this slice')
-
-      ! read file with 1D buffer for corner
-      if(myrank == iproc_master_corners(imsg)) then
-        write(filename,"('buffer_corners_chunks_master_msg',i6.6,'.txt')") imsg
-      else if(myrank == iproc_worker1_corners(imsg)) then
-        write(filename,"('buffer_corners_chunks_worker1_msg',i6.6,'.txt')") imsg
-      else if( NCHUNKS /= 2 .and. myrank == iproc_worker2_corners(imsg)) then
-        write(filename,"('buffer_corners_chunks_worker2_msg',i6.6,'.txt')") imsg
-      endif
-
-      ! matching codes
-      open(unit=IIN,file=prname(1:len_trim(prname))//filename, &
-            status='old',action='read',iostat=ier)
-      if( ier /= 0 ) call exit_MPI(myrank,'error opening buffer_corners_chunks file')
-
-      read(IIN,*) npoin1D_corner
-      if(npoin1D_corner /= NGLOB1D_RADIAL) then
-        print*,'error ',myrank,' npoin1D_corner: ',npoin1D_corner,'NGLOB1D_RADIAL:',NGLOB1D_RADIAL
-        print*,'iregion_code:',iregion_code
-        call exit_MPI(myrank,'incorrect nb of points in corner buffer')
-      endif
-      do ipoin1D = 1,npoin1D_corner
-        read(IIN,*) iboolcorner(ipoin1D,icount_corners),xdummy,ydummy,zdummy
-      enddo
-      close(IIN)
-
-    endif
-
-
-  enddo
-
-  endif
-
-  end subroutine read_arrays_buffers_solver
-
+!
+!! get the base pathname for output files
+!  call get_value_string(OUTPUT_FILES, 'OUTPUT_FILES', 'OUTPUT_FILES')
+!
+!! create the name for the database of the current slide and region
+!  call create_name_database(prname,myrank,iregion_code,LOCAL_PATH)
+!
+!! read 2-D addressing for summation between slices along xi with MPI
+!
+!! read iboolleft_xi of this slice
+!  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolleft_xi.txt', &
+!        status='old',action='read',iostat=ier)
+!  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolleft_xi file')
+!
+!  npoin2D_xi(1) = 1
+! 350  continue
+!  read(IIN,*) iboolleft_xi(npoin2D_xi(1)),xdummy,ydummy,zdummy
+!  if(iboolleft_xi(npoin2D_xi(1)) > 0) then
+!      npoin2D_xi(1) = npoin2D_xi(1) + 1
+!      goto 350
+!  endif
+!! subtract the line that contains the flag after the last point
+!  npoin2D_xi(1) = npoin2D_xi(1) - 1
+!! read nb of points given by the mesher
+!  read(IIN,*) npoin2D_xi_mesher
+!  if(npoin2D_xi(1) > NGLOB2DMAX_XMIN_XMAX .or. npoin2D_xi(1) /= npoin2D_xi_mesher) &
+!      call exit_MPI(myrank,'incorrect iboolleft_xi read')
+!  close(IIN)
+!
+!! read iboolright_xi of this slice
+!  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolright_xi.txt', &
+!        status='old',action='read',iostat=ier)
+!  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolright_xi file')
+!
+!  npoin2D_xi(2) = 1
+! 360  continue
+!  read(IIN,*) iboolright_xi(npoin2D_xi(2)),xdummy,ydummy,zdummy
+!  if(iboolright_xi(npoin2D_xi(2)) > 0) then
+!      npoin2D_xi(2) = npoin2D_xi(2) + 1
+!      goto 360
+!  endif
+!! subtract the line that contains the flag after the last point
+!  npoin2D_xi(2) = npoin2D_xi(2) - 1
+!! read nb of points given by the mesher
+!  read(IIN,*) npoin2D_xi_mesher
+!  if(npoin2D_xi(2) > NGLOB2DMAX_XMIN_XMAX .or. npoin2D_xi(2) /= npoin2D_xi_mesher) &
+!      call exit_MPI(myrank,'incorrect iboolright_xi read')
+!  close(IIN)
+!
+!  if(myrank == 0) then
+!    write(IMAIN,*) '  #max of points in MPI buffers along xi npoin2D_xi = ', &
+!                                maxval(npoin2D_xi(:))
+!    write(IMAIN,*) '  #max of array elements transferred npoin2D_xi*NDIM = ', &
+!                                maxval(npoin2D_xi(:))*NDIM
+!    write(IMAIN,*)
+!  endif
+!
+!! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
+!
+!! read 2-D addressing for summation between slices along eta with MPI
+!
+!! read iboolleft_eta of this slice
+!  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolleft_eta.txt', &
+!        status='old',action='read',iostat=ier)
+!  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolleft_eta file')
+!
+!  npoin2D_eta(1) = 1
+! 370  continue
+!  read(IIN,*) iboolleft_eta(npoin2D_eta(1)),xdummy,ydummy,zdummy
+!  if(iboolleft_eta(npoin2D_eta(1)) > 0) then
+!      npoin2D_eta(1) = npoin2D_eta(1) + 1
+!      goto 370
+!  endif
+!! subtract the line that contains the flag after the last point
+!  npoin2D_eta(1) = npoin2D_eta(1) - 1
+!! read nb of points given by the mesher
+!  read(IIN,*) npoin2D_eta_mesher
+!  if(npoin2D_eta(1) > NGLOB2DMAX_YMIN_YMAX .or. npoin2D_eta(1) /= npoin2D_eta_mesher) &
+!      call exit_MPI(myrank,'incorrect iboolleft_eta read')
+!  close(IIN)
+!
+!! read iboolright_eta of this slice
+!  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolright_eta.txt', &
+!        status='old',action='read',iostat=ier)
+!  if( ier /= 0 ) call exit_MPI(myrank,'error opening iboolright_eta file')
+!
+!  npoin2D_eta(2) = 1
+! 380  continue
+!  read(IIN,*) iboolright_eta(npoin2D_eta(2)),xdummy,ydummy,zdummy
+!  if(iboolright_eta(npoin2D_eta(2)) > 0) then
+!      npoin2D_eta(2) = npoin2D_eta(2) + 1
+!      goto 380
+!  endif
+!! subtract the line that contains the flag after the last point
+!  npoin2D_eta(2) = npoin2D_eta(2) - 1
+!! read nb of points given by the mesher
+!  read(IIN,*) npoin2D_eta_mesher
+!  if(npoin2D_eta(2) > NGLOB2DMAX_YMIN_YMAX .or. npoin2D_eta(2) /= npoin2D_eta_mesher) &
+!      call exit_MPI(myrank,'incorrect iboolright_eta read')
+!  close(IIN)
+!
+!  if(myrank == 0) then
+!    write(IMAIN,*) '  #max of points in MPI buffers along eta npoin2D_eta = ', &
+!                                maxval(npoin2D_eta(:))
+!    write(IMAIN,*) '  #max of array elements transferred npoin2D_eta*NDIM = ', &
+!                                maxval(npoin2D_eta(:))*NDIM
+!    write(IMAIN,*)
+!  endif
+!
+!
+!!! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
+!
+!! read chunk messages only if more than one chunk
+!  if(NCHUNKS /= 1) then
+!
+!! read messages to assemble between chunks with MPI
+!
+!  if(myrank == 0) then
+!
+!    ! file with the list of processors for each message for faces
+!    open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_faces.txt', &
+!          status='old',action='read',iostat=ier)
+!    if( ier /= 0 ) call exit_MPI(myrank,'error opening list_messages_faces file')
+!
+!    do imsg = 1,NUMMSGS_FACES
+!      read(IIN,*) imsg_type(imsg),iprocfrom_faces(imsg),iprocto_faces(imsg)
+!      if      (iprocfrom_faces(imsg) < 0 &
+!          .or. iprocto_faces(imsg) < 0 &
+!          .or. iprocfrom_faces(imsg) > NPROCTOT-1 &
+!          .or. iprocto_faces(imsg) > NPROCTOT-1) &
+!        call exit_MPI(myrank,'incorrect chunk faces numbering')
+!      if (imsg_type(imsg) < 1 .or. imsg_type(imsg) > 3) &
+!        call exit_MPI(myrank,'incorrect message type labeling')
+!    enddo
+!    close(IIN)
+!
+!    ! file with the list of processors for each message for corners
+!    open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_corners.txt', &
+!          status='old',action='read',iostat=ier)
+!    if( ier /= 0 ) call exit_MPI(myrank,'error opening list_messages_corners file')
+!
+!    do imsg = 1,NCORNERSCHUNKS
+!      read(IIN,*) iproc_master_corners(imsg),iproc_worker1_corners(imsg), &
+!                            iproc_worker2_corners(imsg)
+!      if    (iproc_master_corners(imsg) < 0 &
+!        .or. iproc_worker1_corners(imsg) < 0 &
+!        .or. iproc_worker2_corners(imsg) < 0 &
+!        .or. iproc_master_corners(imsg) > NPROCTOT-1 &
+!        .or. iproc_worker1_corners(imsg) > NPROCTOT-1 &
+!        .or. iproc_worker2_corners(imsg) > NPROCTOT-1) &
+!        call exit_MPI(myrank,'incorrect chunk corner numbering')
+!    enddo
+!    close(IIN)
+!
+!  endif
+!
+!! broadcast the information read on the master to the nodes
+!  call MPI_BCAST(imsg_type,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+!  call MPI_BCAST(iprocfrom_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+!  call MPI_BCAST(iprocto_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+!
+!  call MPI_BCAST(iproc_master_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+!  call MPI_BCAST(iproc_worker1_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+!  call MPI_BCAST(iproc_worker2_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+!  if( ier /= 0 ) call exit_MPI(myrank,'error mpi broadcast')
+!
+!
+!!---- read indirect addressing for each message for faces of the chunks
+!!---- a given slice can belong to at most two faces
+!  icount_faces = 0
+!  do imsg = 1,NUMMSGS_FACES
+!    if(myrank == iprocfrom_faces(imsg) .or. myrank == iprocto_faces(imsg)) then
+!      icount_faces = icount_faces + 1
+!
+!      if(icount_faces > NUMFACES_SHARED) then
+!        print*,'error ',myrank,' icount_faces: ',icount_faces,'NUMFACES_SHARED:',NUMFACES_SHARED
+!        print*,'iregion_code:',iregion_code
+!        call exit_MPI(myrank,'more than NUMFACES_SHARED faces for this slice')
+!      endif
+!      if(icount_faces > 2 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) then
+!        print*,'error ',myrank,' icount_faces: ',icount_faces,'NPROC_XI:',NPROC_XI,'NPROC_ETA:',NPROC_ETA
+!        print*,'iregion_code:',iregion_code
+!        call exit_MPI(myrank,'more than two faces for this slice')
+!      endif
+!
+!      ! read file with 2D buffer for faces
+!      if(myrank == iprocfrom_faces(imsg)) then
+!        write(filename,"('buffer_faces_chunks_sender_msg',i6.6,'.txt')") imsg
+!      else if(myrank == iprocto_faces(imsg)) then
+!        write(filename,"('buffer_faces_chunks_receiver_msg',i6.6,'.txt')") imsg
+!      endif
+!
+!      open(unit=IIN,file=prname(1:len_trim(prname))//filename,status='old',action='read',iostat=ier)
+!      if( ier /= 0 ) call exit_MPI(myrank,'error opening buffer_faces file')
+!
+!      read(IIN,*) npoin2D_faces(icount_faces)
+!      if(npoin2D_faces(icount_faces) > NGLOB2DMAX_XY) then
+!        print*,'error ',myrank,' npoin2D_faces: ',npoin2D_faces(icount_faces),icount_faces
+!        print*,'iregion_code:',iregion_code
+!        call exit_MPI(myrank,'incorrect nb of points in face buffer')
+!      endif
+!
+!      do ipoin2D = 1,npoin2D_faces(icount_faces)
+!        read(IIN,*) iboolfaces(ipoin2D,icount_faces),xdummy,ydummy,zdummy
+!      enddo
+!      close(IIN)
+!
+!    endif
+!  enddo
+!
+!
+!!---- read indirect addressing for each message for corners of the chunks
+!!---- a given slice can belong to at most one corner
+!  icount_corners = 0
+!  do imsg = 1,NCORNERSCHUNKS
+!    ! if only two chunks then there is no second worker
+!    if(myrank == iproc_master_corners(imsg) .or. &
+!         myrank == iproc_worker1_corners(imsg) .or. &
+!         (NCHUNKS /= 2 .and. myrank == iproc_worker2_corners(imsg))) then
+!
+!      icount_corners = icount_corners + 1
+!      if(icount_corners>1 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) then
+!        print*,'error ',myrank,'icount_corners:',icount_corners
+!        print*,'iregion_code:',iregion_code
+!        call exit_MPI(myrank,'more than one corner for this slice')
+!      endif
+!      if(icount_corners>4) call exit_MPI(myrank,'more than four corners for this slice')
+!
+!      ! read file with 1D buffer for corner
+!      if(myrank == iproc_master_corners(imsg)) then
+!        write(filename,"('buffer_corners_chunks_master_msg',i6.6,'.txt')") imsg
+!      else if(myrank == iproc_worker1_corners(imsg)) then
+!        write(filename,"('buffer_corners_chunks_worker1_msg',i6.6,'.txt')") imsg
+!      else if( NCHUNKS /= 2 .and. myrank == iproc_worker2_corners(imsg)) then
+!        write(filename,"('buffer_corners_chunks_worker2_msg',i6.6,'.txt')") imsg
+!      endif
+!
+!      ! matching codes
+!      open(unit=IIN,file=prname(1:len_trim(prname))//filename, &
+!            status='old',action='read',iostat=ier)
+!      if( ier /= 0 ) call exit_MPI(myrank,'error opening buffer_corners_chunks file')
+!
+!      read(IIN,*) npoin1D_corner
+!      if(npoin1D_corner /= NGLOB1D_RADIAL) then
+!        print*,'error ',myrank,' npoin1D_corner: ',npoin1D_corner,'NGLOB1D_RADIAL:',NGLOB1D_RADIAL
+!        print*,'iregion_code:',iregion_code
+!        call exit_MPI(myrank,'incorrect nb of points in corner buffer')
+!      endif
+!      do ipoin1D = 1,npoin1D_corner
+!        read(IIN,*) iboolcorner(ipoin1D,icount_corners),xdummy,ydummy,zdummy
+!      enddo
+!      close(IIN)
+!
+!    endif
+!
+!
+!  enddo
+!
+!  endif
+!
+!  end subroutine read_arrays_buffers_solver
+!



More information about the CIG-COMMITS mailing list