[cig-commits] r19725 - in seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src: cuda specfem3D

danielpeter at geodynamics.org danielpeter at geodynamics.org
Sun Mar 4 22:13:17 PST 2012


Author: danielpeter
Date: 2012-03-04 22:13:16 -0800 (Sun, 04 Mar 2012)
New Revision: 19725

Modified:
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/specfem3D_gpu_cuda_method_stubs.c
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube_block.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle_Dev.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core_Dev.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core_Dev.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/iterate_time.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/noise_tomography.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_mesh_databases.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/setup_sources_receivers.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/specfem3D_par.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_output.f90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_volume.f90
Log:
updates routines for central cube debugging

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/specfem3D_gpu_cuda_method_stubs.c
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/specfem3D_gpu_cuda_method_stubs.c	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/cuda/specfem3D_gpu_cuda_method_stubs.c	2012-03-05 06:13:16 UTC (rev 19725)
@@ -359,6 +359,7 @@
                                         int* NOISE_TOMOGRAPHY,
                                         int* SAVE_FORWARD_f,
                                         int* ABSORBING_CONDITIONS_f,
+          int* OCEANS_f,
                                         int* GRAVITY_f,
                                         int* ROTATION_f,
                                         int* ATTENUATION_f,
@@ -576,6 +577,10 @@
            int* nspec_inner,
            int* NSPEC2D_TOP_IC) {} 
 
+void FC_FUNC_(prepare_oceans_device,
+              PREPARE_OCEANS_DEVICE)(long* Mesh_pointer_f,
+             realw* h_rmass_ocean_load) {} 
+
 void FC_FUNC_(prepare_fields_elastic_device,
               PREPARE_FIELDS_ELASTIC_DEVICE)(long* Mesh_pointer_f,
                                              int* size,
@@ -648,15 +653,6 @@
 void FC_FUNC_(transfer_fields_oc_to_device,
               TRANSFER_FIELDS_OC_TO_DEVICE)(int* size, realw* displ, realw* veloc, realw* accel,long* Mesh_pointer_f) {} 
 
-void FC_FUNC_(transfer_coupling_fields_fluid_cmb_icb_to_device,
-              TRANSFER_COUPLING_FIELDS_FLUID_CMB_ICB_TO_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* displ_cm, realw* displ_ic, realw* accel, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_fields_cmb_icb_fluid_to_device,
-              TRANSFER_COUPLING_FIELDS_CMB_ICB_FLUID_TO_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* displ_cm, realw* displ_ic, realw* accel_cm, realw* accel_ic, realw* accel_oc, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_fields_cmb_ocean_to_device,
-        TRANSFER_COUPLING_FIELDS_CMB_OCEAN_TO_DEVICE)(int* size, realw* accel, long* Mesh_pointer_f) {} 
-
 void FC_FUNC_(transfer_b_fields_cm_to_device,
               TRANSFER_FIELDS_B_CM_TO_DEVICE)(int* size, realw* b_displ, realw* b_veloc, realw* b_accel,
                                               long* Mesh_pointer_f) {} 
@@ -669,15 +665,6 @@
               TRANSFER_FIELDS_B_OC_TO_DEVICE)(int* size, realw* b_displ, realw* b_veloc, realw* b_accel,
                                               long* Mesh_pointer_f) {} 
 
-void FC_FUNC_(transfer_coupling_b_fields_fluid_cmb_icb_to_device,
-              TRANSFER_COUPLING_B_FIELDS_FLUID_CMB_ICB_TO_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* b_displ_cm, realw* b_displ_ic, realw* b_accel, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_b_fields_cmb_icb_fluid_to_device,
-              TRANSFER_COUPLING_B_FIELDS_CMB_ICB_FLUID_TO_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* b_displ_cm, realw* b_displ_ic, realw* b_accel_cm, realw* b_accel_ic, realw* b_accel_oc, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_b_fields_cmb_ocean_to_device,
-        TRANSFER_COUPLING_B_FIELDS_CMB_OCEAN_TO_DEVICE)(int* size, realw* b_accel, long* Mesh_pointer_f) {} 
-
 void FC_FUNC_(transfer_fields_cm_from_device,
               TRANSFER_FIELDS_CM_FROM_DEVICE)(int* size, realw* displ, realw* veloc, realw* accel,long* Mesh_pointer_f) {} 
 
@@ -687,15 +674,6 @@
 void FC_FUNC_(transfer_fields_oc_from_device,
               TRANSFER_FIELDS_OC_FROM_DEVICE)(int* size, realw* displ, realw* veloc, realw* accel,long* Mesh_pointer_f) {} 
 
-void FC_FUNC_(transfer_coupling_fields_fluid_cmb_icb_from_device,
-              TRANSFER_COUPLING_FIELDS_FLUID_CMB_ICB_FROM_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* displ_cm, realw* displ_ic, realw* accel, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_fields_cmb_icb_fluid_from_device,
-              TRANSFER_COUPLING_FIELDS_CMB_ICB_FLUID_FROM_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* displ_cm, realw* displ_ic, realw* accel_cm, realw* accel_ic, realw* accel_oc, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_fields_cmb_ocean_from_device,
-        TRANSFER_COUPLING_FIELDS_CMB_OCEAN_FROM_DEVICE)(int* size, realw* accel, long* Mesh_pointer_f) {} 
-
 void FC_FUNC_(transfer_b_fields_cm_from_device,
               TRANSFER_B_FIELDS_CM_FROM_DEVICE)(int* size, realw* b_displ, realw* b_veloc, realw* b_accel,
                                                 long* Mesh_pointer_f) {} 
@@ -708,15 +686,6 @@
               TRANSFER_B_FIELDS_OC_FROM_DEVICE)(int* size, realw* b_displ, realw* b_veloc, realw* b_accel,
                                                 long* Mesh_pointer_f) {} 
 
-void FC_FUNC_(transfer_coupling_b_fields_fluid_cmb_icb_from_device,
-              TRANSFER_COUPLING_B_FIELDS_FLUID_CMB_icb_FROM_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* b_displ_cm, realw* b_displ_ic, realw* b_accel, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_b_fields_cmb_icb_fluid_from_device,
-              TRANSFER_COUPLING_B_FIELDS_CMB_ICB_FLUID_FROM_DEVICE)(int* size_oc, int* size_cm, int* size_ic, realw* b_displ_cm, realw* b_displ_ic, realw* b_accel_cm, realw* b_accel_ic, realw* b_accel_oc, long* Mesh_pointer_f) {} 
-
-void FC_FUNC_(transfer_coupling_b_fields_cmb_ocean_from_device,
-        TRANSFER_COUPLING_B_FIELDS_CMB_OCEAN_FROM_DEVICE)(int* size, realw* b_accel, long* Mesh_pointer_f) {} 
-
 void FC_FUNC_(transfer_accel_cm_to_device,
               TRANSFER_ACCEL_CM_TO_DEVICE)(int* size, realw* accel,long* Mesh_pointer_f) {} 
 
@@ -745,7 +714,7 @@
               TRANSFER_ACCEL_CM_FROM_DEVICE)(int* size, realw* accel,long* Mesh_pointer_f) {} 
 
 void FC_FUNC_(transfer_b_accel_cm_from_device,
-              TRNASFER_B_ACCEL_CM_FROM_DEVICE)(int* size, realw* b_accel,long* Mesh_pointer_f) {} 
+              TRANSFER_B_ACCEL_CM_FROM_DEVICE)(int* size, realw* b_accel,long* Mesh_pointer_f) {} 
 
 void FC_FUNC_(transfer_accel_ic_from_device,
               TRANSFER_ACCEL_IC_FROM_DEVICE)(int* size, realw* accel,long* Mesh_pointer_f) {} 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -52,9 +52,10 @@
   integer, intent(inout) :: iphase_comm_CC
   integer, dimension(nb_msgs_theor_in_cube), intent(in) :: sender_from_slices_to_cube
 
-  double precision, dimension(npoin2D_cube_from_slices,ndim_assemble), intent(inout) :: buffer_slices
+  double precision, dimension(npoin2D_cube_from_slices,ndim_assemble), intent(inout) :: &
+    buffer_slices
   double precision, dimension(npoin2D_cube_from_slices,ndim_assemble,nb_msgs_theor_in_cube), intent(inout) :: &
-                                                                                      buffer_all_cube_from_slices
+    buffer_all_cube_from_slices
 
   ! note: these parameters are "saved" now as global parameters
   ! MPI status of messages to be received

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube_block.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube_block.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/assemble_MPI_central_cube_block.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -45,8 +45,10 @@
 ! for matching with central cube in inner core
   integer ichunk, nb_msgs_theor_in_cube, npoin2D_cube_from_slices
   integer, dimension(nb_msgs_theor_in_cube) :: sender_from_slices_to_cube
-  double precision, dimension(npoin2D_cube_from_slices,NDIM) :: buffer_slices,buffer_slices2
-  double precision, dimension(nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM) :: buffer_all_cube_from_slices
+  double precision, dimension(npoin2D_cube_from_slices,NDIM) :: &
+    buffer_slices,buffer_slices2
+  double precision, dimension(nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM) :: &
+    buffer_all_cube_from_slices
   integer, dimension(nb_msgs_theor_in_cube,npoin2D_cube_from_slices):: ibool_central_cube
   integer receiver_cube_from_slices
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_acoustic.F90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -85,15 +85,17 @@
       if( USE_DEVILLE_PRODUCTS_VAL ) then
         ! uses Deville et al. (2002) routine
         call compute_forces_outer_core_Dev(time,deltat,two_omega_earth, &
+                                      NSPEC_OUTER_CORE_ROTATION,NGLOB_OUTER_CORE, &
                                       A_array_rotation,B_array_rotation, &
-                                      displ_outer_core,accel_outer_core,div_displ_outer_core, &
-                                      phase_is_inner)
+                                      displ_outer_core,accel_outer_core, &
+                                      div_displ_outer_core,phase_is_inner)
       else
         ! div_displ_outer_core is initialized to zero in the following subroutine.
         call compute_forces_outer_core(time,deltat,two_omega_earth, &
+                                      NSPEC_OUTER_CORE_ROTATION,NGLOB_OUTER_CORE, &
                                       A_array_rotation,B_array_rotation, &
-                                      displ_outer_core,accel_outer_core,div_displ_outer_core, &
-                                      phase_is_inner)
+                                      displ_outer_core,accel_outer_core, &
+                                      div_displ_outer_core,phase_is_inner)
       endif
 
       ! adjoint / kernel runs
@@ -101,14 +103,16 @@
         if( USE_DEVILLE_PRODUCTS_VAL ) then
           ! uses Deville et al. (2002) routine
           call compute_forces_outer_core_Dev(b_time,b_deltat,b_two_omega_earth, &
+                                      NSPEC_OUTER_CORE_ROT_ADJOINT,NGLOB_OUTER_CORE_ADJOINT, &
                                       b_A_array_rotation,b_B_array_rotation, &
-                                      b_displ_outer_core,b_accel_outer_core,b_div_displ_outer_core, &
-                                      phase_is_inner)
+                                      b_displ_outer_core,b_accel_outer_core, &
+                                      b_div_displ_outer_core,phase_is_inner)
         else
           call compute_forces_outer_core(b_time,b_deltat,b_two_omega_earth, &
+                                      NSPEC_OUTER_CORE_ROT_ADJOINT,NGLOB_OUTER_CORE_ADJOINT, &  
                                       b_A_array_rotation,b_B_array_rotation, &
-                                      b_displ_outer_core,b_accel_outer_core,b_div_displ_outer_core, &
-                                      phase_is_inner)
+                                      b_displ_outer_core,b_accel_outer_core, &
+                                      b_div_displ_outer_core,phase_is_inner)
         endif
       endif
 
@@ -267,11 +271,11 @@
   ! (multiply by the inverse of the mass matrix and update velocity)
   if(.NOT. GPU_MODE) then
     ! on CPU
-    call compute_forces_ac_update_veloc(veloc_outer_core,accel_outer_core, &
+    call compute_forces_ac_update_veloc(NGLOB_OUTER_CORE,veloc_outer_core,accel_outer_core, &
                                        deltatover2,rmass_outer_core)
     ! adjoint / kernel runs
     if (SIMULATION_TYPE == 3) &
-      call compute_forces_ac_update_veloc(b_veloc_outer_core,b_accel_outer_core, &
+      call compute_forces_ac_update_veloc(NGLOB_OUTER_CORE_ADJOINT,b_veloc_outer_core,b_accel_outer_core, &
                                          b_deltatover2,rmass_outer_core)
   else
     ! on GPU
@@ -283,9 +287,10 @@
 
 !=====================================================================
 
-  subroutine compute_forces_ac_update_veloc(veloc_outer_core,accel_outer_core,deltatover2,rmass_outer_core)
+  subroutine compute_forces_ac_update_veloc(NGLOB,veloc_outer_core,accel_outer_core, &
+                                          deltatover2,rmass_outer_core)
 
-  use specfem_par,only: CUSTOM_REAL,NGLOB_OUTER_CORE
+  use constants,only: CUSTOM_REAL
 
 #ifdef _HANDOPT
   use specfem_par,only: imodulo_NGLOB_OUTER_CORE
@@ -293,11 +298,13 @@
 
   implicit none
 
+  integer :: NGLOB
+  
   ! velocity potential
-  real(kind=CUSTOM_REAL), dimension(NGLOB_OUTER_CORE) :: veloc_outer_core,accel_outer_core
+  real(kind=CUSTOM_REAL), dimension(NGLOB) :: veloc_outer_core,accel_outer_core
 
   ! mass matrix
-  real(kind=CUSTOM_REAL), dimension(NGLOB_OUTER_CORE) :: rmass_outer_core
+  real(kind=CUSTOM_REAL), dimension(NGLOB) :: rmass_outer_core
 
   real(kind=CUSTOM_REAL) :: deltatover2
 
@@ -316,7 +323,7 @@
       veloc_outer_core(i) = veloc_outer_core(i) + deltatover2*accel_outer_core(i)
     enddo
   endif
-  do i=imodulo_NGLOB_OUTER_CORE+1,NGLOB_OUTER_CORE,3
+  do i=imodulo_NGLOB_OUTER_CORE+1,NGLOB,3
     accel_outer_core(i) = accel_outer_core(i)*rmass_outer_core(i)
     veloc_outer_core(i) = veloc_outer_core(i) + deltatover2*accel_outer_core(i)
 
@@ -328,7 +335,7 @@
   enddo
 #else
 ! way 1:
-  do i=1,NGLOB_OUTER_CORE
+  do i=1,NGLOB
     accel_outer_core(i) = accel_outer_core(i)*rmass_outer_core(i)
     veloc_outer_core(i) = veloc_outer_core(i) + deltatover2*accel_outer_core(i)
   enddo

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -25,9 +25,10 @@
 !
 !=====================================================================
 
-  subroutine compute_forces_crust_mantle(displ_crust_mantle,accel_crust_mantle, &
+  subroutine compute_forces_crust_mantle(NSPEC,NGLOB,NSPEC_ATT, &
+                                        displ_crust_mantle,accel_crust_mantle, &
                                         phase_is_inner, &
-                                        R_xx,R_yy,R_xy,R_xz,R_yz, &
+                                        R_xx,R_yy,R_xy,R_xz,R_yz, &                                        
                                         epsilondev_xx,epsilondev_yy,epsilondev_xy, &
                                         epsilondev_xz,epsilondev_yz, &
                                         epsilon_trace_over_3, &
@@ -70,37 +71,38 @@
     nspec_outer => nspec_outer_crust_mantle, &
     nspec_inner => nspec_inner_crust_mantle
 
-  use specfem_par_innercore,only: &
-    ibool_inner_core,idoubling_inner_core, &
-    iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-    npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
-    iboolfaces_inner_core,iboolcorner_inner_core, &
-    nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
-    npoin2D_cube_from_slices, &
-    ibool_central_cube, &
-    receiver_cube_from_slices,ibelm_bottom_inner_core,NSPEC2D_BOTTOM_IC
+!  use specfem_par_innercore,only: &
+!    ibool_inner_core,idoubling_inner_core, &
+!    iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
+!    npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
+!    iboolfaces_inner_core,iboolcorner_inner_core, &
+!    nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
+!    npoin2D_cube_from_slices, &
+!    ibool_central_cube, &
+!    receiver_cube_from_slices,ibelm_bottom_inner_core,NSPEC2D_BOTTOM_IC
 
   implicit none
 
+  integer :: NSPEC,NGLOB,NSPEC_ATT
+
   ! displacement and acceleration
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_CRUST_MANTLE) :: displ_crust_mantle,accel_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB) :: displ_crust_mantle,accel_crust_mantle
 
 
   ! memory variables for attenuation
   ! memory variables R_ij are stored at the local rather than global level
   ! to allow for optimization of cache access by compiler
 !  real(kind=CUSTOM_REAL), dimension(5,N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_ATTENUAT) :: R_memory
-  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_ATTENUAT) :: &
+  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_ATT) :: &
     R_xx,R_yy,R_xy,R_xz,R_yz
 
-
+  
 !  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: epsilondev
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: &
     epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz
 
+  real(kind=CUSTOM_REAL),dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: epsilon_trace_over_3
 
-  real(kind=CUSTOM_REAL),dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: epsilon_trace_over_3
-
   ! variable sized array variables for one_minus_sum_beta and factor_common
   integer vx, vy, vz, vnspec
   ! [alpha,beta,gamma]val reduced to N_SLS and factor_common to N_SLS*NUM_NODES

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle_Dev.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle_Dev.F90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_crust_mantle_Dev.F90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -34,9 +34,10 @@
 !          depending on compilers, it can further decrease the computation time by ~ 30%.
 !          the original routines are commented with "! way 1", the hand-optimized routines with  "! way 2"
 
-  subroutine compute_forces_crust_mantle_Dev( displ_crust_mantle,accel_crust_mantle, &
+  subroutine compute_forces_crust_mantle_Dev( NSPEC,NGLOB,NSPEC_ATT, &
+                                              displ_crust_mantle,accel_crust_mantle, &
                                               phase_is_inner, &
-                                              R_xx,R_yy,R_xy,R_xz,R_yz, &
+                                              R_xx,R_yy,R_xy,R_xz,R_yz, &                                              
                                               epsilondev_xx,epsilondev_yy,epsilondev_xy, &
                                               epsilondev_xz,epsilondev_yz, &
                                               epsilon_trace_over_3, &
@@ -80,34 +81,36 @@
     nspec_outer => nspec_outer_crust_mantle, &
     nspec_inner => nspec_inner_crust_mantle
 
-  use specfem_par_innercore,only: &
-    ibool_inner_core,idoubling_inner_core, &
-    iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
-    npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
-    iboolfaces_inner_core,iboolcorner_inner_core, &
-    nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
-    npoin2D_cube_from_slices, &
-    ibool_central_cube, &
-    receiver_cube_from_slices,ibelm_bottom_inner_core,NSPEC2D_BOTTOM_IC
+!  use specfem_par_innercore,only: &
+!    ibool_inner_core,idoubling_inner_core, &
+!    iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
+!    npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
+!    iboolfaces_inner_core,iboolcorner_inner_core, &
+!    nb_msgs_theor_in_cube,sender_from_slices_to_cube, &
+!    npoin2D_cube_from_slices, &
+!    ibool_central_cube, &
+!    receiver_cube_from_slices,ibelm_bottom_inner_core,NSPEC2D_BOTTOM_IC
 
   implicit none
 
+  integer :: NSPEC,NGLOB,NSPEC_ATT
+  
   ! displacement and acceleration
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_CRUST_MANTLE) :: displ_crust_mantle,accel_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB) :: displ_crust_mantle,accel_crust_mantle
 
   ! attenuation
   ! memory variables for attenuation
   ! memory variables R_ij are stored at the local rather than global level
   ! to allow for optimization of cache access by compiler
 !  real(kind=CUSTOM_REAL), dimension(5,N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_ATTENUAT) :: R_memory
-  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_ATTENUAT) :: &
+  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_ATT) :: &
     R_xx,R_yy,R_xy,R_xz,R_yz
-
+  
 !  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: epsilondev
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: &
     epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz
 
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: epsilon_trace_over_3
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: epsilon_trace_over_3
 
   integer :: vx,vy,vz,vnspec
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_elastic.F90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -76,10 +76,12 @@
       if( USE_DEVILLE_PRODUCTS_VAL ) then
         ! uses Deville (2002) optimizations
         ! crust/mantle region
-        call compute_forces_crust_mantle_Dev( displ_crust_mantle,accel_crust_mantle, &
+        call compute_forces_crust_mantle_Dev( NSPEC_CRUST_MANTLE_STR_OR_ATT,NGLOB_CRUST_MANTLE, &
+                                      NSPEC_CRUST_MANTLE_ATTENUAT, &      
+                                      displ_crust_mantle,accel_crust_mantle, &
                                       phase_is_inner, &
                                       R_xx_crust_mantle,R_yy_crust_mantle,R_xy_crust_mantle, &
-                                      R_xz_crust_mantle,R_yz_crust_mantle, &
+                                      R_xz_crust_mantle,R_yz_crust_mantle, &                                      
                                       epsilondev_xx_crust_mantle,epsilondev_yy_crust_mantle,epsilondev_xy_crust_mantle, &
                                       epsilondev_xz_crust_mantle,epsilondev_yz_crust_mantle, &
                                       eps_trace_over_3_crust_mantle, &
@@ -87,7 +89,9 @@
                                       size(factor_common_crust_mantle,2), size(factor_common_crust_mantle,3), &
                                       size(factor_common_crust_mantle,4), size(factor_common_crust_mantle,5) )
         ! inner core region
-        call compute_forces_inner_core_Dev( displ_inner_core,accel_inner_core, &
+        call compute_forces_inner_core_Dev( NSPEC_INNER_CORE_STR_OR_ATT,NGLOB_INNER_CORE, &
+                                      NSPEC_INNER_CORE_ATTENUATION, &
+                                      displ_inner_core,accel_inner_core, &
                                       phase_is_inner, &
                                       R_xx_inner_core,R_yy_inner_core,R_xy_inner_core,R_xz_inner_core,R_yz_inner_core, &
                                       epsilondev_xx_inner_core,epsilondev_yy_inner_core,epsilondev_xy_inner_core, &
@@ -101,7 +105,9 @@
       else
         ! no Deville optimization
         ! crust/mantle region
-        call compute_forces_crust_mantle( displ_crust_mantle,accel_crust_mantle, &
+        call compute_forces_crust_mantle(  NSPEC_CRUST_MANTLE_STR_OR_ATT,NGLOB_CRUST_MANTLE, &
+                                      NSPEC_CRUST_MANTLE_ATTENUAT, &              
+                                      displ_crust_mantle,accel_crust_mantle, &
                                       phase_is_inner, &
                                       R_xx_crust_mantle,R_yy_crust_mantle,R_xy_crust_mantle, &
                                       R_xz_crust_mantle,R_yz_crust_mantle, &
@@ -112,7 +118,9 @@
                                       size(factor_common_crust_mantle,2), size(factor_common_crust_mantle,3), &
                                       size(factor_common_crust_mantle,4), size(factor_common_crust_mantle,5) )
         ! inner core region
-        call compute_forces_inner_core( displ_inner_core,accel_inner_core, &
+        call compute_forces_inner_core( NSPEC_INNER_CORE_STR_OR_ATT,NGLOB_INNER_CORE, &
+                                      NSPEC_INNER_CORE_ATTENUATION, &
+                                      displ_inner_core,accel_inner_core, &
                                       phase_is_inner, &
                                       R_xx_inner_core,R_yy_inner_core,R_xy_inner_core,R_xz_inner_core,R_yz_inner_core, &
                                       epsilondev_xx_inner_core,epsilondev_yy_inner_core,epsilondev_xy_inner_core, &
@@ -130,7 +138,9 @@
         if( USE_DEVILLE_PRODUCTS_VAL ) then
           ! uses Deville (2002) optimizations
           ! crust/mantle region
-          call compute_forces_crust_mantle_Dev( b_displ_crust_mantle,b_accel_crust_mantle, &
+          call compute_forces_crust_mantle_Dev( NSPEC_CRUST_MANTLE_ADJOINT,NGLOB_CRUST_MANTLE_ADJOINT, &
+                                      NSPEC_CRUST_MANTLE_STR_AND_ATT, &
+                                      b_displ_crust_mantle,b_accel_crust_mantle, &
                                       phase_is_inner, &
                                       b_R_xx_crust_mantle,b_R_yy_crust_mantle,b_R_xy_crust_mantle, &
                                       b_R_xz_crust_mantle,b_R_yz_crust_mantle, &
@@ -142,7 +152,9 @@
                                       size(factor_common_crust_mantle,2), size(factor_common_crust_mantle,3), &
                                       size(factor_common_crust_mantle,4), size(factor_common_crust_mantle,5) )
           ! inner core region
-          call compute_forces_inner_core_Dev( b_displ_inner_core,b_accel_inner_core, &
+          call compute_forces_inner_core_Dev( NSPEC_INNER_CORE_ADJOINT,NGLOB_INNER_CORE_ADJOINT, &
+                                        NSPEC_INNER_CORE_STR_AND_ATT, &
+                                        b_displ_inner_core,b_accel_inner_core, &
                                         phase_is_inner, &
                                         b_R_xx_inner_core,b_R_yy_inner_core,b_R_xy_inner_core, &
                                         b_R_xz_inner_core,b_R_yz_inner_core, &
@@ -157,7 +169,9 @@
         else
           ! no Deville optimization
           ! crust/mantle region
-          call compute_forces_crust_mantle( b_displ_crust_mantle,b_accel_crust_mantle, &
+          call compute_forces_crust_mantle( NSPEC_CRUST_MANTLE_ADJOINT,NGLOB_CRUST_MANTLE_ADJOINT, &
+                                        NSPEC_CRUST_MANTLE_STR_AND_ATT, &
+                                        b_displ_crust_mantle,b_accel_crust_mantle, &
                                         phase_is_inner, &
                                         b_R_xx_crust_mantle,b_R_yy_crust_mantle,b_R_xy_crust_mantle, &
                                         b_R_xz_crust_mantle,b_R_yz_crust_mantle, &
@@ -170,7 +184,9 @@
                                         size(factor_common_crust_mantle,4), size(factor_common_crust_mantle,5) )
 
           ! inner core region
-          call compute_forces_inner_core( b_displ_inner_core,b_accel_inner_core, &
+          call compute_forces_inner_core( NSPEC_INNER_CORE_ADJOINT,NGLOB_INNER_CORE_ADJOINT, &
+                                        NSPEC_INNER_CORE_STR_AND_ATT, &
+                                        b_displ_inner_core,b_accel_inner_core, &
                                         phase_is_inner, &
                                         b_R_xx_inner_core,b_R_yy_inner_core,b_R_xy_inner_core, &
                                         b_R_xz_inner_core,b_R_yz_inner_core, &
@@ -225,7 +241,7 @@
        case( 2 )
           ! second step of noise tomography, i.e., read the surface movie saved at every timestep
           ! use the movie to drive the ensemble forward wavefield
-          call noise_read_add_surface_movie(accel_crust_mantle,NSTEP-it+1)
+          call noise_read_add_surface_movie(NGLOB_CRUST_MANTLE,accel_crust_mantle,NSTEP-it+1)
           ! be careful, since ensemble forward sources are reversals of generating wavefield "eta"
           ! hence the "NSTEP-it+1", i.e., start reading from the last timestep
           ! note the ensemble forward sources are generally distributed on the surface of the earth
@@ -236,7 +252,7 @@
           ! use the movie to reconstruct the ensemble forward wavefield
           ! the ensemble adjoint wavefield is done as usual
           ! note instead of "NSTEP-it+1", now we us "it", since reconstruction is a reversal of reversal
-          call noise_read_add_surface_movie(b_accel_crust_mantle,it)
+          call noise_read_add_surface_movie(NGLOB_CRUST_MANTLE_ADJOINT,b_accel_crust_mantle,it)
        end select
 
 
@@ -464,12 +480,12 @@
   ! updates (only) acceleration w/ rotation in the crust/mantle region (touches oceans)
   if(.NOT. GPU_MODE) then
     ! on CPU
-    call compute_forces_el_update_accel(veloc_crust_mantle,accel_crust_mantle, &
-                                      two_omega_earth,rmass_crust_mantle)
+    call compute_forces_el_update_accel(NGLOB_CRUST_MANTLE,veloc_crust_mantle,accel_crust_mantle, &
+                                       two_omega_earth,rmass_crust_mantle)
     ! adjoint / kernel runs
     if (SIMULATION_TYPE == 3) &
-      call compute_forces_el_update_accel(b_veloc_crust_mantle,b_accel_crust_mantle, &
-                                      b_two_omega_earth,rmass_crust_mantle)
+      call compute_forces_el_update_accel(NGLOB_CRUST_MANTLE_ADJOINT,b_veloc_crust_mantle,b_accel_crust_mantle, &
+                                         b_two_omega_earth,rmass_crust_mantle)
   else
     ! on GPU
     call kernel_3_a_cuda(Mesh_pointer, &
@@ -501,14 +517,14 @@
   ! (updates velocity)
   if(.NOT. GPU_MODE ) then
     ! on CPU
-    call compute_forces_el_update_veloc(veloc_crust_mantle,accel_crust_mantle, &
-                                      veloc_inner_core,accel_inner_core, &
-                                      deltatover2,two_omega_earth,rmass_inner_core)
+    call compute_forces_el_update_veloc(NGLOB_CRUST_MANTLE,veloc_crust_mantle,accel_crust_mantle, &
+                                       NGLOB_INNER_CORE,veloc_inner_core,accel_inner_core, &
+                                       deltatover2,two_omega_earth,rmass_inner_core)
     ! adjoint / kernel runs
     if (SIMULATION_TYPE == 3) &
-      call compute_forces_el_update_veloc(b_veloc_crust_mantle,b_accel_crust_mantle, &
-                                        b_veloc_inner_core,b_accel_inner_core, &
-                                        b_deltatover2,b_two_omega_earth,rmass_inner_core)
+      call compute_forces_el_update_veloc(NGLOB_CRUST_MANTLE_ADJOINT,b_veloc_crust_mantle,b_accel_crust_mantle, &
+                                         NGLOB_INNER_CORE_ADJOINT,b_veloc_inner_core,b_accel_inner_core, &
+                                         b_deltatover2,b_two_omega_earth,rmass_inner_core)
   else
     ! on GPU
     call kernel_3_b_cuda(Mesh_pointer, &
@@ -520,10 +536,10 @@
 
 !=====================================================================
 
-  subroutine compute_forces_el_update_accel(veloc_crust_mantle,accel_crust_mantle, &
+  subroutine compute_forces_el_update_accel(NGLOB,veloc_crust_mantle,accel_crust_mantle, &
                                            two_omega_earth,rmass_crust_mantle)
 
-  use specfem_par,only: CUSTOM_REAL,NGLOB_CRUST_MANTLE,NDIM
+  use constants,only: CUSTOM_REAL,NDIM
 
 #ifdef _HANDOPT
   use specfem_par,only: imodulo_NGLOB_CRUST_MANTLE4
@@ -531,12 +547,14 @@
 
   implicit none
 
+  integer :: NGLOB
+  
   ! velocity & acceleration
   ! crust/mantle region
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_CRUST_MANTLE) :: veloc_crust_mantle,accel_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB) :: veloc_crust_mantle,accel_crust_mantle
 
   ! mass matrix
-  real(kind=CUSTOM_REAL), dimension(NGLOB_CRUST_MANTLE) :: rmass_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NGLOB) :: rmass_crust_mantle
 
   real(kind=CUSTOM_REAL) :: two_omega_earth
 
@@ -556,7 +574,7 @@
       accel_crust_mantle(3,i) = accel_crust_mantle(3,i)*rmass_crust_mantle(i)
     enddo
   endif
-  do i=imodulo_NGLOB_CRUST_MANTLE4+1,NGLOB_CRUST_MANTLE,4
+  do i=imodulo_NGLOB_CRUST_MANTLE4+1,NGLOB,4
     accel_crust_mantle(1,i) = accel_crust_mantle(1,i)*rmass_crust_mantle(i) &
              + two_omega_earth*veloc_crust_mantle(2,i)
     accel_crust_mantle(2,i) = accel_crust_mantle(2,i)*rmass_crust_mantle(i) &
@@ -583,7 +601,7 @@
   enddo
 #else
 ! way 1:
-  do i=1,NGLOB_CRUST_MANTLE
+  do i=1,NGLOB
     accel_crust_mantle(1,i) = accel_crust_mantle(1,i)*rmass_crust_mantle(i) &
              + two_omega_earth*veloc_crust_mantle(2,i)
     accel_crust_mantle(2,i) = accel_crust_mantle(2,i)*rmass_crust_mantle(i) &
@@ -597,11 +615,11 @@
 
 !=====================================================================
 
-  subroutine compute_forces_el_update_veloc(veloc_crust_mantle,accel_crust_mantle, &
-                                            veloc_inner_core,accel_inner_core, &
+  subroutine compute_forces_el_update_veloc(NGLOB_CM,veloc_crust_mantle,accel_crust_mantle, &
+                                            NGLOB_IC,veloc_inner_core,accel_inner_core, &
                                             deltatover2,two_omega_earth,rmass_inner_core)
 
-  use specfem_par,only: CUSTOM_REAL,NGLOB_CRUST_MANTLE,NGLOB_INNER_CORE,NDIM
+  use constants,only: CUSTOM_REAL,NDIM
 
 #ifdef _HANDOPT
   use specfem_par,only: imodulo_NGLOB_CRUST_MANTLE4,imodulo_NGLOB_INNER_CORE
@@ -609,14 +627,16 @@
 
   implicit none
 
+  integer :: NGLOB_CM,NGLOB_IC
+  
   ! acceleration & velocity
   ! crust/mantle region
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_CRUST_MANTLE) :: veloc_crust_mantle,accel_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_CM) :: veloc_crust_mantle,accel_crust_mantle
   ! inner core
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_INNER_CORE) :: veloc_inner_core,accel_inner_core
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_IC) :: veloc_inner_core,accel_inner_core
 
   ! mass matrix
-  real(kind=CUSTOM_REAL), dimension(NGLOB_INNER_CORE) :: rmass_inner_core
+  real(kind=CUSTOM_REAL), dimension(NGLOB_IC) :: rmass_inner_core
 
   real(kind=CUSTOM_REAL) :: deltatover2,two_omega_earth
 
@@ -640,7 +660,7 @@
       veloc_crust_mantle(:,i) = veloc_crust_mantle(:,i) + deltatover2*accel_crust_mantle(:,i)
     enddo
   endif
-  do i=imodulo_NGLOB_CRUST_MANTLE4+1,NGLOB_CRUST_MANTLE,4
+  do i=imodulo_NGLOB_CRUST_MANTLE4+1,NGLOB_CM,4
     veloc_crust_mantle(:,i) = veloc_crust_mantle(:,i) + deltatover2*accel_crust_mantle(:,i)
     veloc_crust_mantle(:,i+1) = veloc_crust_mantle(:,i+1) + deltatover2*accel_crust_mantle(:,i+1)
     veloc_crust_mantle(:,i+2) = veloc_crust_mantle(:,i+2) + deltatover2*accel_crust_mantle(:,i+2)
@@ -659,7 +679,7 @@
       veloc_inner_core(:,i) = veloc_inner_core(:,i) + deltatover2*accel_inner_core(:,i)
     enddo
   endif
-  do i=imodulo_NGLOB_INNER_CORE+1,NGLOB_INNER_CORE,3
+  do i=imodulo_NGLOB_INNER_CORE+1,NGLOB_IC,3
     accel_inner_core(1,i) = accel_inner_core(1,i)*rmass_inner_core(i) &
            + two_omega_earth*veloc_inner_core(2,i)
     accel_inner_core(2,i) = accel_inner_core(2,i)*rmass_inner_core(i) &
@@ -687,11 +707,11 @@
 #else
 ! way 1:
   ! mantle
-  do i=1,NGLOB_CRUST_MANTLE
+  do i=1,NGLOB_CM
     veloc_crust_mantle(:,i) = veloc_crust_mantle(:,i) + deltatover2*accel_crust_mantle(:,i)
   enddo
   ! inner core
-  do i=1,NGLOB_INNER_CORE
+  do i=1,NGLOB_IC
     accel_inner_core(1,i) = accel_inner_core(1,i)*rmass_inner_core(i) &
            + two_omega_earth*veloc_inner_core(2,i)
     accel_inner_core(2,i) = accel_inner_core(2,i)*rmass_inner_core(i) &

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -25,9 +25,10 @@
 !
 !=====================================================================
 
-  subroutine compute_forces_inner_core( displ_inner_core,accel_inner_core, &
+  subroutine compute_forces_inner_core( NSPEC,NGLOB,NSPEC_ATT, &
+                                        displ_inner_core,accel_inner_core, &
                                         phase_is_inner, &
-                                        R_xx,R_yy,R_xy,R_xz,R_yz, &
+                                        R_xx,R_yy,R_xy,R_xz,R_yz, &                                        
                                         epsilondev_xx,epsilondev_yy,epsilondev_xy, &
                                         epsilondev_xz,epsilondev_yz, &
                                         epsilon_trace_over_3,&
@@ -68,15 +69,17 @@
     nspec_outer => nspec_outer_inner_core, &
     nspec_inner => nspec_inner_inner_core
 
-  use specfem_par_crustmantle,only: &
-    iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-    npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
-    iboolfaces_crust_mantle,iboolcorner_crust_mantle
+!  use specfem_par_crustmantle,only: &
+!    iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
+!    npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+!    iboolfaces_crust_mantle,iboolcorner_crust_mantle
 
   implicit none
 
+  integer :: NSPEC,NGLOB,NSPEC_ATT
+
   ! displacement and acceleration
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_INNER_CORE) :: displ_inner_core,accel_inner_core
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB) :: displ_inner_core,accel_inner_core
 
   ! for attenuation
   ! memory variables R_ij are stored at the local rather than global level
@@ -87,15 +90,14 @@
   real(kind=CUSTOM_REAL), dimension(N_SLS) :: alphaval,betaval,gammaval
 
 !  real(kind=CUSTOM_REAL), dimension(5,N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE_ATTENUATION) :: R_memory
-  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE_ATTENUATION) :: &
+  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_ATT) :: &
     R_xx,R_yy,R_xy,R_xz,R_yz
-
-
+  
 !  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: epsilondev
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: &
     epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz
 
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: epsilon_trace_over_3
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: epsilon_trace_over_3
 
   ! inner/outer element run flag
   logical :: phase_is_inner

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core_Dev.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core_Dev.F90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_inner_core_Dev.F90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -34,9 +34,10 @@
 !          depending on compilers, it can further decrease the computation time by ~ 30%.
 !          the original routines are commented with "! way 1", the hand-optimized routines with  "! way 2"
 
-  subroutine compute_forces_inner_core_Dev( displ_inner_core,accel_inner_core, &
+  subroutine compute_forces_inner_core_Dev( NSPEC,NGLOB,NSPEC_ATT, &
+                                            displ_inner_core,accel_inner_core, &
                                             phase_is_inner, &
-                                            R_xx,R_yy,R_xy,R_xz,R_yz, &
+                                            R_xx,R_yy,R_xy,R_xz,R_yz, &                                            
                                             epsilondev_xx,epsilondev_yy,epsilondev_xy, &
                                             epsilondev_xz,epsilondev_yz, &
                                             epsilon_trace_over_3,&
@@ -79,15 +80,17 @@
     nspec_inner => nspec_inner_inner_core
 
 
-  use specfem_par_crustmantle,only: &
-    iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
-    npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
-    iboolfaces_crust_mantle,iboolcorner_crust_mantle
+!  use specfem_par_crustmantle,only: &
+!    iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
+!    npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+!    iboolfaces_crust_mantle,iboolcorner_crust_mantle
 
   implicit none
 
+  integer :: NSPEC,NGLOB,NSPEC_ATT
+
   ! displacement and acceleration
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_INNER_CORE) :: displ_inner_core,accel_inner_core
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB) :: displ_inner_core,accel_inner_core
 
   ! for attenuation
   ! memory variables R_ij are stored at the local rather than global level
@@ -98,14 +101,14 @@
   real(kind=CUSTOM_REAL), dimension(N_SLS) :: alphaval,betaval,gammaval
 
 !  real(kind=CUSTOM_REAL), dimension(5,N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE_ATTENUATION) :: R_memory
-  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE_ATTENUATION) :: &
+  real(kind=CUSTOM_REAL), dimension(N_SLS,NGLLX,NGLLY,NGLLZ,NSPEC_ATT) :: &
     R_xx,R_yy,R_xy,R_xz,R_yz
-
+  
 !  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: epsilondev
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: &
     epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz
 
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: epsilon_trace_over_3
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: epsilon_trace_over_3
 
   ! inner/outer element run flag
   logical :: phase_is_inner

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -26,9 +26,10 @@
 !=====================================================================
 
   subroutine compute_forces_outer_core(time,deltat,two_omega_earth, &
+                                      NSPEC,NGLOB, &
                                       A_array_rotation,B_array_rotation, &
-                                      displfluid,accelfluid,div_displfluid, &
-                                      phase_is_inner)
+                                      displfluid,accelfluid, &
+                                      div_displfluid,phase_is_inner)
 
   use constants
 
@@ -57,13 +58,15 @@
 
   implicit none
 
+  integer :: NSPEC,NGLOB
+
   ! for the Euler scheme for rotation
   real(kind=CUSTOM_REAL) time,deltat,two_omega_earth
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE_ROTATION) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: &
     A_array_rotation,B_array_rotation
 
   ! displacement and acceleration
-  real(kind=CUSTOM_REAL), dimension(NGLOB_OUTER_CORE) :: displfluid,accelfluid
+  real(kind=CUSTOM_REAL), dimension(NGLOB) :: displfluid,accelfluid
 
   ! divergence of displacement
   real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE_ADJOINT) :: div_displfluid

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core_Dev.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core_Dev.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/compute_forces_outer_core_Dev.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -26,9 +26,10 @@
 !=====================================================================
 
   subroutine compute_forces_outer_core_Dev(time,deltat,two_omega_earth, &
+                                          NSPEC,NGLOB, &
                                           A_array_rotation,B_array_rotation, &
-                                          displfluid,accelfluid,div_displfluid, &
-                                          phase_is_inner)
+                                          displfluid,accelfluid, &
+                                          div_displfluid,phase_is_inner)
 
 ! this routine is optimized for NGLLX = NGLLY = NGLLZ = 5 using the Deville et al. (2002) inlined matrix-matrix products
 
@@ -59,13 +60,15 @@
 
   implicit none
 
+  integer :: NSPEC,NGLOB
+
   ! for the Euler scheme for rotation
   real(kind=CUSTOM_REAL) time,deltat,two_omega_earth
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE_ROTATION) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC) :: &
     A_array_rotation,B_array_rotation
 
   ! displacement and acceleration
-  real(kind=CUSTOM_REAL), dimension(NGLOB_OUTER_CORE) :: displfluid,accelfluid
+  real(kind=CUSTOM_REAL), dimension(NGLOB) :: displfluid,accelfluid
 
   ! divergence of displacement
   real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE_ADJOINT) :: div_displfluid

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/iterate_time.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/iterate_time.F90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/iterate_time.F90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -431,15 +431,15 @@
     endif
 
     ! updates integral values
-    Iepsilondev_crust_mantle(1,:,:,:,:) = Iepsilondev_crust_mantle(1,:,:,:,:)  &
+    Iepsilondev_xx_crust_mantle(:,:,:,:) = Iepsilondev_xx_crust_mantle(:,:,:,:)  &
                                               + deltat*epsilondev_xx_crust_mantle(:,:,:,:)
-    Iepsilondev_crust_mantle(2,:,:,:,:) = Iepsilondev_crust_mantle(2,:,:,:,:)  &
+    Iepsilondev_yy_crust_mantle(:,:,:,:) = Iepsilondev_yy_crust_mantle(:,:,:,:)  &
                                               + deltat*epsilondev_yy_crust_mantle(:,:,:,:)
-    Iepsilondev_crust_mantle(3,:,:,:,:) = Iepsilondev_crust_mantle(3,:,:,:,:)  &
+    Iepsilondev_xy_crust_mantle(:,:,:,:) = Iepsilondev_xy_crust_mantle(:,:,:,:)  &
                                               + deltat*epsilondev_xy_crust_mantle(:,:,:,:)
-    Iepsilondev_crust_mantle(4,:,:,:,:) = Iepsilondev_crust_mantle(4,:,:,:,:)  &
+    Iepsilondev_xz_crust_mantle(:,:,:,:) = Iepsilondev_xz_crust_mantle(:,:,:,:)  &
                                               + deltat*epsilondev_xz_crust_mantle(:,:,:,:)
-    Iepsilondev_crust_mantle(5,:,:,:,:) = Iepsilondev_crust_mantle(5,:,:,:,:)  &
+    Iepsilondev_yz_crust_mantle(:,:,:,:) = Iepsilondev_yz_crust_mantle(:,:,:,:)  &
                                               + deltat*epsilondev_yz_crust_mantle(:,:,:,:)
 
     Ieps_trace_over_3_crust_mantle(:,:,:,:) = Ieps_trace_over_3_crust_mantle(:,:,:,:) &

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/noise_tomography.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/noise_tomography.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/noise_tomography.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -516,13 +516,15 @@
 ! by this modification, the efficiency is greatly improved
 ! and now, it should be OK to run NOISE_TOMOGRAPHY on a cluster with global storage
 
-  subroutine noise_read_add_surface_movie(accel,it_index)
+  subroutine noise_read_add_surface_movie(NGLOB,accel,it_index)
 
   use specfem_par
   use specfem_par_crustmantle
   implicit none
 
-  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_CRUST_MANTLE),intent(inout) :: accel
+  integer :: NGLOB
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB),intent(inout) :: accel
+
   integer,intent(in) :: it_index
 
   ! local parameters

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/prepare_timerun.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -937,7 +937,11 @@
 
   if (COMPUTE_AND_STORE_STRAIN) then
     if(MOVIE_VOLUME .and. (MOVIE_VOLUME_TYPE == 2 .or. MOVIE_VOLUME_TYPE == 3)) then
-      Iepsilondev_crust_mantle(:,:,:,:,:) = 0._CUSTOM_REAL
+      Iepsilondev_xx_crust_mantle(:,:,:,:) = 0._CUSTOM_REAL
+      Iepsilondev_yy_crust_mantle(:,:,:,:) = 0._CUSTOM_REAL
+      Iepsilondev_xy_crust_mantle(:,:,:,:) = 0._CUSTOM_REAL
+      Iepsilondev_xz_crust_mantle(:,:,:,:) = 0._CUSTOM_REAL
+      Iepsilondev_yz_crust_mantle(:,:,:,:) = 0._CUSTOM_REAL
       Ieps_trace_over_3_crust_mantle(:,:,:,:)=0._CUSTOM_REAL
     endif
   endif

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_mesh_databases.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_mesh_databases.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/read_mesh_databases.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -82,6 +82,7 @@
   endif
 
   ! user output
+  call sync_all()
   if( myrank == 0 ) then
     ! elapsed time since beginning of mesh generation
     tCPU = MPI_WTIME() - time_start
@@ -90,6 +91,7 @@
     write(IMAIN,*)
   endif
 
+  
   ! frees temporary allocated arrays
   deallocate(is_on_a_slice_edge_crust_mantle, &
             is_on_a_slice_edge_outer_core, &
@@ -805,8 +807,9 @@
   integer :: max_nibool
   real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
   real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag_cc
+  integer,dimension(:),allocatable :: dummy_i
   integer :: i,j,k,ispec,iglob
-
+  
   ! estimates initial maximum ibool array
   max_nibool = npoin2D_max_all_CM_IC * NUMFACES_SHARED &
                + non_zero_nb_msgs_theor_in_cube*npoin2D_cube_from_slices
@@ -849,6 +852,9 @@
   !                      xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
   !                      test_flag,filename)
 
+  allocate(dummy_i(NSPEC_CRUST_MANTLE),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating dummy_i')
+  
   ! determines neighbor rank for shared faces
   call rmd_get_MPI_interfaces(myrank,NGLOB_CRUST_MANTLE,NSPEC_CRUST_MANTLE, &
                             test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
@@ -856,22 +862,26 @@
                             max_nibool,MAX_NEIGHBOURS, &
                             ibool_crust_mantle,&
                             is_on_a_slice_edge_crust_mantle, &
-                            IREGION_CRUST_MANTLE,.false.)
+                            IREGION_CRUST_MANTLE,.false.,dummy_i,INCLUDE_CENTRAL_CUBE)
 
   deallocate(test_flag)
-
+  deallocate(dummy_i)
+  
   ! stores MPI interfaces informations
   allocate(my_neighbours_crust_mantle(num_interfaces_crust_mantle), &
           nibool_interfaces_crust_mantle(num_interfaces_crust_mantle), &
           stat=ier)
   if( ier /= 0 ) call exit_mpi(myrank,'error allocating array my_neighbours_crust_mantle etc.')
-
+  my_neighbours_crust_mantle = -1
+  nibool_interfaces_crust_mantle = 0
+  
   ! copies interfaces arrays
   if( num_interfaces_crust_mantle > 0 ) then
     allocate(ibool_interfaces_crust_mantle(max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
            stat=ier)
     if( ier /= 0 ) call exit_mpi(myrank,'error allocating array ibool_interfaces_crust_mantle')
-
+    ibool_interfaces_crust_mantle = 0
+    
     ! ranks of neighbour processes
     my_neighbours_crust_mantle(:) = my_neighbours(1:num_interfaces_crust_mantle)
     ! number of global ibool entries on each interface
@@ -893,7 +903,30 @@
   !                      nibool_interfaces_crust_mantle(1),filename)
   !endif
 
+  ! checks addressing
+  call rmd_test_MPI_neighbours(num_interfaces_crust_mantle,my_neighbours_crust_mantle,nibool_interfaces_crust_mantle)
 
+  ! allocates MPI buffers
+  ! crust mantle
+  allocate(buffer_send_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
+          buffer_recv_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
+          request_send_vector_crust_mantle(num_interfaces_crust_mantle), &
+          request_recv_vector_crust_mantle(num_interfaces_crust_mantle), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array buffer_send_vector_crust_mantle etc.')
+  if( SIMULATION_TYPE == 3 ) then
+    allocate(b_buffer_send_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
+            b_buffer_recv_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
+            b_request_send_vector_crust_mantle(num_interfaces_crust_mantle), &
+            b_request_recv_vector_crust_mantle(num_interfaces_crust_mantle), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array b_buffer_send_vector_crust_mantle etc.')
+  endif
+
+  ! checks with assembly of test fields  
+  call rmd_test_MPI_cm()
+
+  
 ! outer core region
   if( myrank == 0 ) write(IMAIN,*) 'outer core mpi:'
 
@@ -929,6 +962,8 @@
   !                      xstore_outer_core,ystore_outer_core,zstore_outer_core, &
   !                      test_flag,filename)
 
+  allocate(dummy_i(NSPEC_OUTER_CORE),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating dummy_i')
 
   ! determines neighbor rank for shared faces
   call rmd_get_MPI_interfaces(myrank,NGLOB_OUTER_CORE,NSPEC_OUTER_CORE, &
@@ -937,22 +972,26 @@
                             max_nibool,MAX_NEIGHBOURS, &
                             ibool_outer_core,&
                             is_on_a_slice_edge_outer_core, &
-                            IREGION_OUTER_CORE,.false.)
+                            IREGION_OUTER_CORE,.false.,dummy_i,INCLUDE_CENTRAL_CUBE)
 
   deallocate(test_flag)
-
+  deallocate(dummy_i)
+  
   ! stores MPI interfaces informations
   allocate(my_neighbours_outer_core(num_interfaces_outer_core), &
           nibool_interfaces_outer_core(num_interfaces_outer_core), &
           stat=ier)
   if( ier /= 0 ) call exit_mpi(myrank,'error allocating array my_neighbours_outer_core etc.')
-
+  my_neighbours_outer_core = -1
+  nibool_interfaces_outer_core = 0
+  
   ! copies interfaces arrays
   if( num_interfaces_outer_core > 0 ) then
     allocate(ibool_interfaces_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
            stat=ier)
     if( ier /= 0 ) call exit_mpi(myrank,'error allocating array ibool_interfaces_outer_core')
-
+    ibool_interfaces_outer_core = 0
+    
     ! ranks of neighbour processes
     my_neighbours_outer_core(:) = my_neighbours(1:num_interfaces_outer_core)
     ! number of global ibool entries on each interface
@@ -974,6 +1013,30 @@
   !                      nibool_interfaces_outer_core(1),filename)
   !endif
 
+  ! checks addressing
+  call rmd_test_MPI_neighbours(num_interfaces_outer_core,my_neighbours_outer_core,nibool_interfaces_outer_core)
+
+  ! allocates MPI buffers
+  ! outer core
+  allocate(buffer_send_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
+          buffer_recv_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
+          request_send_scalar_outer_core(num_interfaces_outer_core), &
+          request_recv_scalar_outer_core(num_interfaces_outer_core), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array buffer_send_vector_outer_core etc.')
+  if( SIMULATION_TYPE == 3 ) then
+    allocate(b_buffer_send_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
+            b_buffer_recv_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
+            b_request_send_scalar_outer_core(num_interfaces_outer_core), &
+            b_request_recv_scalar_outer_core(num_interfaces_outer_core), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array b_buffer_send_vector_outer_core etc.')
+  endif
+
+  ! checks with assembly of test fields  
+  call rmd_test_MPI_oc()
+
+
 ! inner core
   if( myrank == 0 ) write(IMAIN,*) 'inner core mpi:'
 
@@ -986,6 +1049,15 @@
   do ispec=1,NSPEC_INNER_CORE
     ! suppress fictitious elements in central cube
     if(idoubling_inner_core(ispec) == IFLAG_IN_FICTITIOUS_CUBE) cycle
+    
+    ! suppress central cube, will be handled afterwards
+    if( INCLUDE_CENTRAL_CUBE ) then
+      if(idoubling_inner_core(ispec) == IFLAG_MIDDLE_CENTRAL_CUBE .or. &
+        idoubling_inner_core(ispec) == IFLAG_BOTTOM_CENTRAL_CUBE .or. &
+        idoubling_inner_core(ispec) == IFLAG_TOP_CENTRAL_CUBE) cycle
+    endif
+    
+    ! sets flags  
     do k = 1,NGLLZ
       do j = 1,NGLLY
         do i = 1,NGLLX
@@ -1021,29 +1093,53 @@
                         xstore_inner_core,ystore_inner_core,zstore_inner_core, &
                         test_flag,filename)
 
-!  ! gets new interfaces for inner_core without central cube yet
-!  ! determines neighbor rank for shared faces
-!  call rmd_get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
-!                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
-!                            num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
-!                            max_nibool,MAX_NEIGHBOURS, &
-!                            ibool_inner_core,&
-!                            is_on_a_slice_edge_inner_core, &
-!                            IREGION_INNER_CORE,.false.,idoubling_inner_core)
+  ! in sequential order, for testing purpose
+  do i=0,NPROCTOT_VAL - 1  
+    if( myrank == i ) then
 
+    ! gets new interfaces for inner_core without central cube yet
+    ! determines neighbor rank for shared faces
+    call rmd_get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
+                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+                            num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
+                            max_nibool,MAX_NEIGHBOURS, &
+                            ibool_inner_core,&
+                            is_on_a_slice_edge_inner_core, &
+                            IREGION_INNER_CORE,.false.,idoubling_inner_core,INCLUDE_CENTRAL_CUBE)
+
+    endif
+    call sync_all()
+  enddo
+
+  ! intermediate check
+  call rmd_test_MPI_neighbours(num_interfaces_inner_core, &
+                              my_neighbours(1:num_interfaces_inner_core), &
+                              nibool_neighbours(1:num_interfaces_inner_core))
+
+
   ! including central cube
   if(INCLUDE_CENTRAL_CUBE) then
     if( myrank == 0 ) write(IMAIN,*) 'inner core with central cube mpi:'
 
+    ! test_flag is a scalar, not a vector
+    ndim_assemble = 1
+
     allocate(test_flag_cc(NGLOB_INNER_CORE), &
             stat=ier)
     if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_flag_cc inner core')
 
     ! re-sets flag to rank id (+1 to avoid problems with zero rank)
-    test_flag_cc(:) = 0.0
+    test_flag_cc = 0.0
     do ispec=1,NSPEC_INNER_CORE
       ! suppress fictitious elements in central cube
       if(idoubling_inner_core(ispec) == IFLAG_IN_FICTITIOUS_CUBE) cycle
+      
+      ! only takes central cube
+      if(idoubling_inner_core(ispec) /= IFLAG_MIDDLE_CENTRAL_CUBE .and. &
+        idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE .and. &
+        idoubling_inner_core(ispec) /= IFLAG_TOP_CENTRAL_CUBE) cycle
+
+      ! sets flags    
       do k = 1,NGLLZ
         do j = 1,NGLLY
           do i = 1,NGLLX
@@ -1054,8 +1150,6 @@
       enddo
     enddo
 
-    ! test_flag is a scalar, not a vector
-    ndim_assemble = 1
     ! use central cube buffers to assemble the inner core mass matrix with the central cube
     call assemble_MPI_central_cube_block(ichunk,nb_msgs_theor_in_cube, sender_from_slices_to_cube, &
                  npoin2D_cube_from_slices, buffer_all_cube_from_slices, &
@@ -1068,59 +1162,77 @@
 
 
     ! removes own myrank id (+1)
-    test_flag_cc(:) = test_flag_cc(:) - ( myrank + 1.0)
-    where( test_flag_cc(:) < 0.0 ) test_flag_cc(:) = 0.0
+    test_flag_cc = test_flag_cc - ( myrank + 1.0)
+    where( test_flag_cc < 0.0 ) test_flag_cc = 0.0
 
+    ! debug: saves array
     write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_test_flag_inner_core_B_proc',myrank
     call write_VTK_glob_points(NGLOB_INNER_CORE, &
                         xstore_inner_core,ystore_inner_core,zstore_inner_core, &
                         test_flag_cc,filename)
 
-!    ! adds additional inner core points
-!    call rmd_get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
-!                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
-!                            num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
-!                            max_nibool,MAX_NEIGHBOURS, &
-!                            ibool_inner_core,&
-!                            is_on_a_slice_edge_inner_core, &
-!                            IREGION_INNER_CORE,.true.,idoubling_inner_core)
+    ! in sequential order, for testing purpose
+    do i=0,NPROCTOT_VAL - 1  
+      if( myrank == i ) then
+        ! adds additional inner core points
+        call rmd_get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
+                            test_flag_cc,my_neighbours,nibool_neighbours,ibool_neighbours, &
+                            num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
+                            max_nibool,MAX_NEIGHBOURS, &
+                            ibool_inner_core,&
+                            is_on_a_slice_edge_inner_core, &
+                            IREGION_INNER_CORE,.true.,idoubling_inner_core,INCLUDE_CENTRAL_CUBE)
+      endif
+      call sync_all()
+    enddo
 
-    ! adds both together
-    test_flag(:) = test_flag(:) + test_flag_cc(:)
+!    ! adds both together
+!    test_flag(:) = test_flag(:) + test_flag_cc(:)
+!
+!    ! debug: saves array
+!    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_test_flag_inner_core_C_proc',myrank
+!    call write_VTK_glob_points(NGLOB_INNER_CORE, &
+!                              xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+!                              test_flag,filename)
 
     deallocate(test_flag_cc)
 
-    ! debug: saves array
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_test_flag_inner_core_C_proc',myrank
-    call write_VTK_glob_points(NGLOB_INNER_CORE, &
-                              xstore_inner_core,ystore_inner_core,zstore_inner_core, &
-                              test_flag,filename)
-
   endif
 
-  ! gets new interfaces for inner_core without central cube yet
-  ! determines neighbor rank for shared faces
-  call rmd_get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
-                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
-                            num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
-                            max_nibool,MAX_NEIGHBOURS, &
-                            ibool_inner_core,&
-                            is_on_a_slice_edge_inner_core, &
-                            IREGION_INNER_CORE,.false.,idoubling_inner_core)
-
+!  ! in sequential order, for testing purpose
+!  do i=0,NPROCTOT_VAL - 1  
+!    if( myrank == i ) then
+!      ! gets new interfaces for inner_core without central cube yet
+!      ! determines neighbor rank for shared faces
+!      call rmd_get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
+!                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+!                            num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
+!                            max_nibool,MAX_NEIGHBOURS, &
+!                            ibool_inner_core,&
+!                            is_on_a_slice_edge_inner_core, &
+!                            IREGION_INNER_CORE,.false.,idoubling_inner_core,INCLUDE_CENTRAL_CUBE)
+!
+!    endif
+!    call sync_all()
+!  enddo
+  
   deallocate(test_flag)
+  call sync_all()
 
   ! stores MPI interfaces informations
   allocate(my_neighbours_inner_core(num_interfaces_inner_core), &
           nibool_interfaces_inner_core(num_interfaces_inner_core), &
           stat=ier)
   if( ier /= 0 ) call exit_mpi(myrank,'error allocating array my_neighbours_inner_core etc.')
-
+  my_neighbours_inner_core = -1
+  nibool_interfaces_inner_core = 0
+  
   ! copies interfaces arrays
   if( num_interfaces_inner_core > 0 ) then
     allocate(ibool_interfaces_inner_core(max_nibool_interfaces_inner_core,num_interfaces_inner_core), &
            stat=ier)
     if( ier /= 0 ) call exit_mpi(myrank,'error allocating array ibool_interfaces_inner_core')
+    ibool_interfaces_inner_core = 0
 
     ! ranks of neighbour processes
     my_neighbours_inner_core(:) = my_neighbours(1:num_interfaces_inner_core)
@@ -1135,55 +1247,23 @@
   endif
 
   ! debug: saves 1. MPI interface
-  if( myrank == 0 .and. num_interfaces_inner_core >= 1 ) then
-    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_1_points_inner_core_proc',myrank
-    call write_VTK_data_points(NGLOB_INNER_CORE, &
+  !if( myrank == 4 .or. myrank == 13 ) then
+    do i=1,num_interfaces_inner_core
+      write(filename,'(a,i6.6,a,i2.2)') trim(OUTPUT_FILES)//'/MPI_points_inner_core_proc',myrank, &
+                      '_',my_neighbours_inner_core(i)
+      call write_VTK_data_points(NGLOB_INNER_CORE, &
                         xstore_inner_core,ystore_inner_core,zstore_inner_core, &
-                        ibool_interfaces_inner_core(1:nibool_interfaces_inner_core(1),1), &
-                        nibool_interfaces_inner_core(1),filename)
-    !print*,'saved: ',trim(filename)//'.vtk'
-  endif
-
-  ! synchronizes MPI processes
+                        ibool_interfaces_inner_core(1:nibool_interfaces_inner_core(i),i), &
+                        nibool_interfaces_inner_core(i),filename)
+      !print*,'saved: ',trim(filename)//'.vtk'
+    enddo
+  !endif
   call sync_all()
+  
+  ! checks addressing
+  call rmd_test_MPI_neighbours(num_interfaces_inner_core,my_neighbours_inner_core,nibool_interfaces_inner_core)
 
-  ! frees temporary array
-  deallocate(ibool_neighbours)
-
-
   ! allocates MPI buffers
-  ! crust mantle
-  allocate(buffer_send_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
-          buffer_recv_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
-          request_send_vector_crust_mantle(num_interfaces_crust_mantle), &
-          request_recv_vector_crust_mantle(num_interfaces_crust_mantle), &
-          stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array buffer_send_vector_crust_mantle etc.')
-  if( SIMULATION_TYPE == 3 ) then
-    allocate(b_buffer_send_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
-            b_buffer_recv_vector_crust_mantle(NDIM,max_nibool_interfaces_crust_mantle,num_interfaces_crust_mantle), &
-            b_request_send_vector_crust_mantle(num_interfaces_crust_mantle), &
-            b_request_recv_vector_crust_mantle(num_interfaces_crust_mantle), &
-            stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array b_buffer_send_vector_crust_mantle etc.')
-  endif
-
-  ! outer core
-  allocate(buffer_send_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
-          buffer_recv_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
-          request_send_scalar_outer_core(num_interfaces_outer_core), &
-          request_recv_scalar_outer_core(num_interfaces_outer_core), &
-          stat=ier)
-  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array buffer_send_vector_outer_core etc.')
-  if( SIMULATION_TYPE == 3 ) then
-    allocate(b_buffer_send_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
-            b_buffer_recv_scalar_outer_core(max_nibool_interfaces_outer_core,num_interfaces_outer_core), &
-            b_request_send_scalar_outer_core(num_interfaces_outer_core), &
-            b_request_recv_scalar_outer_core(num_interfaces_outer_core), &
-            stat=ier)
-    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array b_buffer_send_vector_outer_core etc.')
-  endif
-
   ! inner core
   allocate(buffer_send_vector_inner_core(NDIM,max_nibool_interfaces_inner_core,num_interfaces_inner_core), &
           buffer_recv_vector_inner_core(NDIM,max_nibool_interfaces_inner_core,num_interfaces_inner_core), &
@@ -1200,6 +1280,15 @@
     if( ier /= 0 ) call exit_mpi(myrank,'error allocating array b_buffer_send_vector_inner_core etc.')
   endif
 
+  ! checks with assembly of test fields  
+  call rmd_test_MPI_ic()
+
+  ! synchronizes MPI processes
+  call sync_all()
+
+  ! frees temporary array
+  deallocate(ibool_neighbours)
+
   end subroutine read_mesh_databases_MPIinter
 
 
@@ -1213,7 +1302,7 @@
                                     max_nibool,MAX_NEIGHBOURS, &
                                     ibool,&
                                     is_on_a_slice_edge, &
-                                    IREGION,add_central_cube,idoubling)
+                                    IREGION,add_central_cube,idoubling,INCLUDE_CENTRAL_CUBE)
 
   use constants
 
@@ -1238,8 +1327,10 @@
 
   integer,intent(in) :: IREGION
   logical,intent(in) :: add_central_cube
-  integer,dimension(NSPEC),optional:: idoubling
+  integer,dimension(NSPEC),intent(in) :: idoubling
 
+  logical,intent(in) :: INCLUDE_CENTRAL_CUBE
+  
   ! local parameters
   integer :: ispec,iglob,j,k
   integer :: iface,iedge,icorner
@@ -1249,6 +1340,8 @@
   integer,dimension(NGLOB) :: work_test_flag
   logical,dimension(NSPEC) :: work_ispec_is_outer
 
+  integer,parameter :: MID = (NGLLX+1)/2
+  
   ! initializes
   if( add_central_cube) then
     ! adds points to existing inner_core interfaces
@@ -1292,22 +1385,22 @@
       select case( iface )
       case( 1 )
         ! face I == 1
-        iglob = ibool(1,2,2,ispec)
+        iglob = ibool(1,MID,MID,ispec)
       case( 2 )
         ! face I == NGLLX
-        iglob = ibool(NGLLX,2,2,ispec)
+        iglob = ibool(NGLLX,MID,MID,ispec)
       case( 3 )
         ! face J == 1
-        iglob = ibool(2,1,2,ispec)
+        iglob = ibool(MID,1,MID,ispec)
       case( 4 )
         ! face J == NGLLY
-        iglob = ibool(2,NGLLY,2,ispec)
+        iglob = ibool(MID,NGLLY,MID,ispec)
       case( 5 )
         ! face K == 1
-        iglob = ibool(2,2,1,ispec)
+        iglob = ibool(MID,MID,1,ispec)
       case( 6 )
         ! face K == NGLLZ
-        iglob = ibool(2,2,NGLLZ,ispec)
+        iglob = ibool(MID,MID,NGLLZ,ispec)
       end select
 
       ! checks assembled flag on global point
@@ -1374,24 +1467,25 @@
             end select
 
             ! checks that we take each global point (on edges and corners) only once
-            if( work_test_flag(iglob) <= 0 ) cycle ! continues to next point
-
-            ! increases number of total points on this interface
-            nibool_neighbours(icurrent) = nibool_neighbours(icurrent) + 1
-            if( nibool_neighbours(icurrent) > max_nibool) &
-              call exit_mpi(myrank,'interface face exceeds max_nibool range')
-
-            ! stores interface iglob index
-            ibool_neighbours( nibool_neighbours(icurrent),icurrent ) = iglob
-
-            ! re-sets flag
-            work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )
+            call rmd_add_interface_point(iglob,rank,icurrent, &
+                                        nibool_neighbours,MAX_NEIGHBOURS, &
+                                        ibool_neighbours,max_nibool, &
+                                        work_test_flag,NGLOB,myrank, &
+                                        .true.,add_central_cube)
             ! debug
             if( work_test_flag(iglob) < 0 ) then
-              print*,'error face flag:',myrank,'ispec=',ispec,'rank=',rank
-              print*,'  flag=',work_test_flag(iglob),'iface jk=',iface,j,k
-              call exit_mpi(myrank,'error face flag')
+              if( IREGION == IREGION_INNER_CORE .and. INCLUDE_CENTRAL_CUBE ) then              
+                ! we might have missed an interface point on an edge, just re-set to missing value
+                print*,'warning face flag:',myrank,'ispec=',ispec,'rank=',rank
+                print*,'  flag=',work_test_flag(iglob),'iface jk=',iface,j,k,'missed iglob=',iglob                
+                !work_test_flag(iglob) = 0
+              else
+                print*,'error face flag:',myrank,'ispec=',ispec,'rank=',rank
+                print*,'  flag=',work_test_flag(iglob),'iface jk=',iface,j,k,'iglob=',iglob
+                call exit_mpi(myrank,'error face flag')
+              endif
             endif
+            
           enddo
         enddo
       endif
@@ -1407,40 +1501,40 @@
       select case( iedge )
       case( 1 )
         ! face I == 1, J == 1
-        iglob = ibool(1,1,2,ispec)
+        iglob = ibool(1,1,MID,ispec)
       case( 2 )
         ! face I == 1, J == NGLLY
-        iglob = ibool(1,NGLLY,2,ispec)
+        iglob = ibool(1,NGLLY,MID,ispec)
       case( 3 )
         ! face I == 1, K == 1
-        iglob = ibool(1,2,1,ispec)
+        iglob = ibool(1,MID,1,ispec)
       case( 4 )
         ! face I == 1, K == NGLLZ
-        iglob = ibool(1,2,NGLLZ,ispec)
+        iglob = ibool(1,MID,NGLLZ,ispec)
       case( 5 )
         ! face I == NGLLX, J == 1
-        iglob = ibool(NGLLX,1,2,ispec)
+        iglob = ibool(NGLLX,1,MID,ispec)
       case( 6 )
         ! face I == NGLLX, J == NGLLY
-        iglob = ibool(NGLLX,NGLLY,2,ispec)
+        iglob = ibool(NGLLX,NGLLY,MID,ispec)
       case( 7 )
         ! face I == NGLLX, K == 1
-        iglob = ibool(NGLLX,2,1,ispec)
+        iglob = ibool(NGLLX,MID,1,ispec)
       case( 8 )
         ! face I == NGLLX, K == NGLLZ
-        iglob = ibool(NGLLX,2,NGLLZ,ispec)
+        iglob = ibool(NGLLX,MID,NGLLZ,ispec)
       case( 9 )
         ! face J == 1, K == 1
-        iglob = ibool(2,1,1,ispec)
+        iglob = ibool(MID,1,1,ispec)
       case( 10 )
         ! face J == 1, K == NGLLZ
-        iglob = ibool(2,1,NGLLZ,ispec)
+        iglob = ibool(MID,1,NGLLZ,ispec)
       case( 11 )
         ! face J == NGLLY, K == 1
-        iglob = ibool(2,NGLLY,1,ispec)
+        iglob = ibool(MID,NGLLY,1,ispec)
       case( 12 )
         ! face J == NGLLY, K == NGLLZ
-        iglob = ibool(2,NGLLY,NGLLZ,ispec)
+        iglob = ibool(MID,NGLLY,NGLLZ,ispec)
       end select
 
       ! checks assembled flag on global point
@@ -1524,21 +1618,25 @@
           end select
 
           ! checks that we take each global point (on edges and corners) only once
-          if( work_test_flag(iglob) <= 0 ) cycle ! continues to next point
+          call rmd_add_interface_point(iglob,rank,icurrent, &
+                                        nibool_neighbours,MAX_NEIGHBOURS, &
+                                        ibool_neighbours,max_nibool, &
+                                        work_test_flag,NGLOB,myrank, &
+                                        .true.,add_central_cube)
 
-          ! increases number of total points on this interface
-          nibool_neighbours(icurrent) = nibool_neighbours(icurrent) + 1
-          if( nibool_neighbours(icurrent) > max_nibool) &
-            call exit_mpi(myrank,'interface edge exceeds max_nibool range')
-
-          ! stores interface iglob index
-          ibool_neighbours( nibool_neighbours(icurrent),icurrent ) = iglob
-
-          ! re-sets flag
-          work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )
-
           ! debug
-          if( work_test_flag(iglob) < 0 ) call exit_mpi(myrank,'error edge flag')
+          if( work_test_flag(iglob) < 0 ) then
+            if( IREGION == IREGION_INNER_CORE .and. INCLUDE_CENTRAL_CUBE ) then              
+              ! we might have missed an interface point on an edge, just re-set to missing value
+              print*,'warning edge flag:',myrank,'ispec=',ispec,'rank=',rank
+              print*,'  flag=',work_test_flag(iglob),'iedge jk=',iedge,k,'missed iglob=',iglob                
+              !work_test_flag(iglob) = 0
+            else
+              print*,'error edge flag:',myrank,'ispec=',ispec,'rank=',rank
+              print*,'  flag=',work_test_flag(iglob),'iedge jk=',iedge,k,'iglob=',iglob
+              call exit_mpi(myrank,'error edge flag')
+            endif
+          endif
 
         enddo
       endif
@@ -1628,18 +1726,14 @@
         if( icurrent == 0 ) &
           call exit_mpi(myrank,'could not find current interface for this neighbor, please check my_neighbours')
 
-        ! adds this corner as interface point and removes neighbor flag from face
-        ! increases number of total points on this interface
-        nibool_neighbours(icurrent) = nibool_neighbours(icurrent) + 1
-        if( nibool_neighbours(icurrent) > max_nibool) &
-          call exit_mpi(myrank,'interface corner exceeds max_nibool range')
+        ! adds this corner as interface point and removes neighbor flag from face,
+        ! checks that we take each global point (on edges and corners) only once
+        call rmd_add_interface_point(iglob,rank,icurrent, &
+                                    nibool_neighbours,MAX_NEIGHBOURS, &
+                                    ibool_neighbours,max_nibool, &
+                                    work_test_flag,NGLOB,myrank, &
+                                    .false.,add_central_cube)
 
-        ! stores interface iglob index
-        ibool_neighbours( nibool_neighbours(icurrent),icurrent ) = iglob
-
-        ! re-sets flag
-        work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )
-
         ! debug
         if( work_test_flag(iglob) < 0 ) call exit_mpi(myrank,'error corner flag')
 
@@ -1659,30 +1753,23 @@
   npoin = count( work_ispec_is_outer )
 
   ! debug: user output
-  if( myrank == 0 ) then
-    write(IMAIN,*) '  interfaces : ',iinterface
-    write(IMAIN,*) '  my_neighbours: ',my_neighbours(1:iinterface)
-    write(IMAIN,*) '  nibool_neighbours: ',nibool_neighbours(1:iinterface)
-    write(IMAIN,*) '  test flag min/max: ',minval(work_test_flag),maxval(work_test_flag)
-    write(IMAIN,*) '  outer elements: ',npoin
-    write(IMAIN,*)
+  if( add_central_cube ) then
+    print*, 'rank',myrank,'interfaces : ',iinterface
+    do j=1,iinterface
+      print*, '  my_neighbours: ',my_neighbours(j),nibool_neighbours(j)
+    enddo
+    print*, '  test flag min/max: ',minval(work_test_flag),maxval(work_test_flag)
+    print*, '  outer elements: ',npoin
+    print*
   endif
-  call sync_all()
-
+  
   ! checks if all points were recognized
-  if( maxval(work_test_flag) > 0 ) then
+  if( minval(work_test_flag) < 0 .or. maxval(work_test_flag) > 0 ) then
     print*,'error mpi interface rank: ',myrank
     print*,'  work_test_flag min/max :',minval(work_test_flag),maxval(work_test_flag)
     call exit_mpi(myrank,'error: mpi points remain unrecognized, please check mesh interfaces')
   endif
 
-  ! checks if all points were taken only once
-  if( minval(work_test_flag) < 0 ) then
-    print*,'error mpi interface rank: ',myrank
-    print*,'  work_test_flag min/max :',minval(work_test_flag),maxval(work_test_flag)
-    call exit_mpi(myrank,'error: mpi points counted more than once, please check mesh interfaces')
-  endif
-
   ! sets interfaces infos
   num_interfaces = iinterface
   max_nibool_interfaces = maxval( nibool_neighbours(1:num_interfaces) )
@@ -1698,14 +1785,36 @@
     ! debug: checks if unique set of iglob values
     do j=1,npoin-1
       if( ibool_neighbours(j,iinterface) == ibool_neighbours(j+1,iinterface) ) then
-        print*,'error mpi interface rank:',myrank
-        print*,'  interface: ',my_neighbours(iinterface),'point: ',j,'of',npoin
-        call exit_mpi(myrank,'error: mpi points not unique on interface')
+        if( IREGION == IREGION_INNER_CORE .and. INCLUDE_CENTRAL_CUBE ) then
+          ! missing points might have been counted more than once  
+          if( ibool_neighbours(j,iinterface) > 0 ) then
+            print*,'warning mpi interface rank:',myrank
+            print*,'  interface: ',my_neighbours(iinterface),'point: ',j,'of',npoin,'iglob=',ibool_neighbours(j,iinterface)
+            ! decrease number of points          
+            nibool_neighbours(iinterface) = nibool_neighbours(iinterface) - 1
+            if( nibool_neighbours(iinterface) <= 0 ) then
+              print*,'error zero mpi interface rank:',myrank,'interface=',my_neighbours(iinterface)
+              call exit_mpi(myrank,'error: zero mpi points on interface')
+            endif
+            ! shift values
+            do k = j+1,npoin-1
+              ii = ibool_neighbours(k+1,iinterface)
+              ibool_neighbours(k,iinterface) = ii
+            enddo
+            ! re-sets values  
+            ibool_neighbours(npoin,iinterface) = 0
+            npoin = nibool_neighbours(iinterface)
+            max_nibool_interfaces = maxval( nibool_neighbours(1:num_interfaces) )
+          endif
+        else
+          print*,'error mpi interface rank:',myrank
+          print*,'  interface: ',my_neighbours(iinterface),'point: ',j,'of',npoin,'iglob=',ibool_neighbours(j,iinterface)
+          call exit_mpi(myrank,'error: mpi points not unique on interface')
+        endif
       endif
     enddo
   enddo
 
-
   ! re-sets flags for outer elements
   is_on_a_slice_edge(:) = work_ispec_is_outer(:)
 
@@ -1715,6 +1824,487 @@
 !-------------------------------------------------------------------------------------------------
 !
 
+  subroutine rmd_add_interface_point(iglob,rank,icurrent, &
+                                    nibool_neighbours,MAX_NEIGHBOURS, &
+                                    ibool_neighbours,max_nibool, &
+                                    work_test_flag,NGLOB,myrank, &
+                                    is_face_edge,add_central_cube)
+
+  use constants
+
+  implicit none
+
+  integer,intent(in) :: iglob,rank,icurrent
+  integer,intent(in) :: myrank
+  
+  integer,intent(in) :: MAX_NEIGHBOURS,max_nibool
+  integer, dimension(MAX_NEIGHBOURS),intent(inout) :: nibool_neighbours
+  integer, dimension(max_nibool,MAX_NEIGHBOURS),intent(inout) :: ibool_neighbours
+
+  integer,intent(in) :: NGLOB
+  integer,dimension(NGLOB) :: work_test_flag
+  
+  logical,intent(in) :: is_face_edge,add_central_cube
+  
+  ! local parameters
+  integer :: i
+  logical :: is_done
+
+  ! let's check and be sure for central cube
+  !if( work_test_flag(iglob) <= 0 ) cycle ! continues to next point
+
+  ! checks that we take each global point (on edges and corners) only once
+  is_done = .false.
+  do i=1,nibool_neighbours(icurrent)
+    if( ibool_neighbours(i,icurrent) == iglob ) then
+      is_done = .true.
+      exit
+    endif
+  enddo
+  
+  ! checks if anything to do  
+  if( is_done ) then
+    ! special handling for central cube: removes rank if already added in inner core
+    if( add_central_cube ) then
+      if( is_face_edge .and. work_test_flag(iglob) < (rank + 1) ) then
+        ! re-sets if we missed this rank number
+        work_test_flag(iglob) = work_test_flag(iglob) + (rank + 1)
+      endif
+      ! re-sets flag
+      work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )      
+      if( is_face_edge .and. work_test_flag(iglob) < 0 ) then
+        ! re-sets to zero if we missed this rank number
+        if( work_test_flag(iglob) == - (rank + 1 ) ) work_test_flag(iglob) = 0
+      endif      
+    endif
+    return
+  endif
+
+  ! checks if flag was set correctly
+  if( work_test_flag(iglob) <= 0 ) then
+    ! we might have missed an interface point on an edge, just re-set to missing value
+    print*,'warning flag:',myrank,'rank=',rank,'interface=',icurrent
+    print*,'  flag=',work_test_flag(iglob),'missed iglob=',iglob                
+  endif
+  ! we might have missed an interface point on an edge, just re-set to missing value
+  if( is_face_edge ) then
+    if( work_test_flag(iglob) < (rank + 1) ) then
+      ! re-sets if we missed this rank number
+      work_test_flag(iglob) = work_test_flag(iglob) + (rank + 1)
+    endif
+  endif
+  
+  ! adds point
+  ! increases number of total points on this interface
+  nibool_neighbours(icurrent) = nibool_neighbours(icurrent) + 1
+  if( nibool_neighbours(icurrent) > max_nibool) &
+      call exit_mpi(myrank,'interface face exceeds max_nibool range')
+
+  ! stores interface iglob index
+  ibool_neighbours( nibool_neighbours(icurrent),icurrent ) = iglob
+
+  ! re-sets flag
+  work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )
+
+  ! checks
+  if( is_face_edge .and. work_test_flag(iglob) < 0 ) then
+    ! re-sets to zero if we missed this rank number
+    if( work_test_flag(iglob) == - (rank + 1 ) ) work_test_flag(iglob) = 0
+  endif
+
+  end subroutine rmd_add_interface_point
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine rmd_test_MPI_neighbours(num_interfaces,my_neighbours,nibool_interfaces)
+
+  use specfem_par
+  implicit none
+
+  include 'mpif.h'
+  
+  integer,intent(in) :: num_interfaces
+  integer,dimension(num_interfaces),intent(in) :: my_neighbours,nibool_interfaces
+  
+  ! local parameters
+  integer,dimension(:),allocatable :: dummy_i
+  integer,dimension(:,:),allocatable :: test_interfaces
+  integer,dimension(:,:),allocatable :: test_interfaces_nibool
+  integer :: msg_status(MPI_STATUS_SIZE)
+  integer :: ineighbour,iproc,inum,i,j,ier,ipoints,max_num
+  logical :: is_okay
+  
+  ! checks neighbors
+  ! gets maximum interfaces from all processes
+  call MPI_REDUCE(num_interfaces,max_num,1,MPI_INTEGER,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  
+  ! master gathers infos
+  if( myrank == 0 ) then
+    ! array for gathering infos
+    allocate(test_interfaces(max_num,0:NPROCTOT_VAL),stat=ier)  
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_interfaces')
+    test_interfaces = -1
+  
+    allocate(test_interfaces_nibool(max_num,0:NPROCTOT_VAL),stat=ier)  
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_interfaces_nibool')
+    test_interfaces_nibool = 0
+
+    ! used to store number of interfaces per proc
+    allocate(dummy_i(0:NPROCTOT_VAL),stat=ier)  
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating dummy_i for test interfaces')
+    dummy_i = 0
+    
+    ! sets infos for master process
+    test_interfaces(1:num_interfaces,0) = my_neighbours(1:num_interfaces)
+    test_interfaces_nibool(1:num_interfaces,0) = nibool_interfaces(1:num_interfaces)
+    dummy_i(0) = num_interfaces
+    
+    ! collects from other processes
+    do iproc=1,NPROCTOT_VAL-1
+      ! gets number of interfaces
+      call MPI_RECV(inum,1,MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+      dummy_i(iproc) = inum
+      if( inum > 0 ) then
+        call MPI_RECV(test_interfaces(1:inum,iproc),inum, &
+                      MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+        call MPI_RECV(test_interfaces_nibool(1:inum,iproc),inum, &
+                      MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+      endif  
+    enddo
+  else
+    ! sends infos to master process
+    call MPI_SEND(num_interfaces,1,MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+    if( num_interfaces > 0 ) then
+      call MPI_SEND(my_neighbours(1:num_interfaces),num_interfaces, &
+                    MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+      call MPI_SEND(nibool_interfaces(1:num_interfaces),num_interfaces, &
+                    MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+    endif
+  endif
+  call sync_all()
+  
+  ! checks if addressing is okay
+  if( myrank == 0 ) then
+    do iproc=0,NPROCTOT_VAL-1
+      do i=1,dummy_i(iproc)
+        ineighbour = test_interfaces(i,iproc)
+        if( ineighbour < 0 .or. ineighbour > NPROCTOT_VAL-1 ) then
+          print*,'error neighbour:',iproc,ineighbour
+          call exit_mpi(myrank,'error ineighbour')
+        endif
+        ipoints = test_interfaces_nibool(i,iproc)
+        if( ipoints <= 0 ) then
+          print*,'error neighbour points:',iproc,ipoints
+          call exit_mpi(myrank,'error ineighbour points')        
+        endif
+        ! looks up corresponding entry in neighbour array
+        is_okay = .false.
+        do j=1,dummy_i(ineighbour)
+          if( test_interfaces(j,ineighbour) == iproc ) then
+            ! checks if same number of interface points with this neighbour
+            if( test_interfaces_nibool(j,ineighbour) == ipoints ) then
+              is_okay = .true.
+            else
+              print*,'error neighbour points:',iproc,ipoints,'ineighbour found points: ', &
+                ineighbour,test_interfaces_nibool(j,ineighbour)
+              call exit_mpi(myrank,'error ineighbour points differ')
+            endif
+            exit
+          endif
+        enddo
+        if( .not. is_okay ) then
+          print*,'error neighbour:',iproc,'ineighbour not found: ',ineighbour
+          print*,'iproc ',iproc,' interfaces:'
+          print*,test_interfaces(1:dummy_i(iproc),iproc)
+          print*,'ineighbour ',ineighbour,' interfaces:'
+          print*,test_interfaces(1:dummy_i(ineighbour),ineighbour)
+          call exit_mpi(myrank,'error ineighbour not found')
+        endif
+      enddo
+    enddo
+
+    ! user output
+    write(IMAIN,*) '  mpi addressing maximum interfaces:',maxval(dummy_i)
+    write(IMAIN,*) '  mpi addressing : all interfaces okay'
+    write(IMAIN,*)
+    
+    deallocate(dummy_i)
+    deallocate(test_interfaces)
+  endif
+  call sync_all()
+
+  end subroutine rmd_test_MPI_neighbours
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine rmd_test_MPI_cm()
+
+  use specfem_par
+  use specfem_par_crustmantle
+  use specfem_par_outercore
+  use specfem_par_innercore
+  implicit none
+
+  include 'mpif.h'
+  
+  ! local parameters
+  real(kind=CUSTOM_REAL),dimension(:,:),allocatable :: test_flag_vector  
+  integer :: i,j,iglob,ier
+  integer :: inum,icount
+
+  ! crust mantle
+  allocate(test_flag_vector(NDIM,NGLOB_CRUST_MANTLE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array test_flag etc.'
+  
+  ! points defined by interfaces
+  test_flag_vector = 0.0
+  do i=1,num_interfaces_crust_mantle
+    do j=1,nibool_interfaces_crust_mantle(i)
+      iglob = ibool_interfaces_crust_mantle(j,i)
+      test_flag_vector(1,iglob) = 1.0_CUSTOM_REAL
+    enddo
+  enddo
+  i = sum(nibool_interfaces_crust_mantle)
+  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)      
+  i = nint( sum(test_flag_vector) )
+  call MPI_REDUCE(i,icount,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)      
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total MPI interface points : ',inum
+    write(IMAIN,*) '  unique MPI interface points: ',icount    
+  endif  
+  
+  ! initializes for assembly
+  test_flag_vector = myrank + 1.0_CUSTOM_REAL
+  call sync_all()
+
+  ! adds contributions from different partitions to flag arrays
+  ! custom_real arrays
+  ! send
+  call assemble_MPI_vector_ext_mesh_s(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+                      test_flag_vector, &
+                      buffer_send_vector_crust_mantle,buffer_recv_vector_crust_mantle, &
+                      num_interfaces_crust_mantle,max_nibool_interfaces_crust_mantle, &
+                      nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
+                      my_neighbours_crust_mantle, &
+                      request_send_vector_crust_mantle,request_recv_vector_crust_mantle)
+  ! receive
+  call assemble_MPI_vector_ext_mesh_w(NPROCTOT_VAL,NGLOB_CRUST_MANTLE, &
+                              test_flag_vector, &
+                              buffer_recv_vector_crust_mantle,num_interfaces_crust_mantle,&
+                              max_nibool_interfaces_crust_mantle, &
+                              nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle, &
+                              request_send_vector_crust_mantle,request_recv_vector_crust_mantle)
+  call sync_all()
+  
+  ! checks number of interface points
+  i = 0
+  do iglob=1,NGLOB_CRUST_MANTLE
+    ! only counts flags with MPI contributions
+    if( test_flag_vector(1,iglob) > myrank+1.0+0.5 ) i = i + 1
+  enddo
+  deallocate(test_flag_vector)
+  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)    
+
+  ! points defined by interfaces
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total assembled MPI interface points:',inum
+    write(IMAIN,*)
+
+    ! checks  
+    if( inum /= icount ) then
+      print*,'error crust mantle : total mpi points:',myrank,'total: ',inum,icount
+      call exit_mpi(myrank,'error MPI assembly crust mantle')
+    endif
+  endif
+
+  call sync_all()  
+
+  end subroutine rmd_test_MPI_cm
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine rmd_test_MPI_oc()
+
+  use specfem_par
+  use specfem_par_crustmantle
+  use specfem_par_outercore
+  use specfem_par_innercore
+  implicit none
+
+  include 'mpif.h'
+  
+  ! local parameters
+  real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
+  integer :: i,j,iglob,ier
+  integer :: inum,icount
+
+  ! outer core
+  allocate(test_flag(NGLOB_OUTER_CORE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array test_flag etc.'
+  
+  ! points defined by interfaces
+  test_flag = 0.0
+  do i=1,num_interfaces_outer_core
+    do j=1,nibool_interfaces_outer_core(i)
+      iglob = ibool_interfaces_outer_core(j,i)
+      test_flag(iglob) = 1.0_CUSTOM_REAL
+    enddo
+  enddo
+  i = sum(nibool_interfaces_outer_core)
+  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)      
+  i = nint( sum(test_flag) )
+  call MPI_REDUCE(i,icount,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)      
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total MPI interface points : ',inum
+    write(IMAIN,*) '  unique MPI interface points: ',icount
+  endif  
+  
+  ! initialized for assembly
+  test_flag = myrank + 1.0_CUSTOM_REAL
+  call sync_all()
+
+  ! adds contributions from different partitions to flag arrays
+  ! custom_real arrays
+  ! send
+  call assemble_MPI_scalar_ext_mesh_s(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+                                test_flag, &
+                                buffer_send_scalar_outer_core,buffer_recv_scalar_outer_core, &
+                                num_interfaces_outer_core,max_nibool_interfaces_outer_core, &
+                                nibool_interfaces_outer_core,ibool_interfaces_outer_core,&
+                                my_neighbours_outer_core, &
+                                request_send_scalar_outer_core,request_recv_scalar_outer_core)
+  ! receive
+  call assemble_MPI_scalar_ext_mesh_w(NPROCTOT_VAL,NGLOB_OUTER_CORE, &
+                                test_flag, &
+                                buffer_recv_scalar_outer_core,num_interfaces_outer_core,&
+                                max_nibool_interfaces_outer_core, &
+                                nibool_interfaces_outer_core,ibool_interfaces_outer_core, &
+                                request_send_scalar_outer_core,request_recv_scalar_outer_core)
+  call sync_all()
+  
+  ! checks number of interface points
+  i = 0
+  do iglob=1,NGLOB_OUTER_CORE
+    ! only counts flags with MPI contributions
+    if( test_flag(iglob) > myrank+1.0+0.5 ) i = i + 1
+  enddo
+  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)    
+  deallocate(test_flag)
+
+  ! output  
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total assembled MPI interface points:',inum
+    write(IMAIN,*)
+
+    ! checks
+    if( inum /= icount ) then
+      print*,'error outer core : total mpi points:',myrank,'total: ',inum,icount
+      call exit_mpi(myrank,'error MPI assembly outer_core')
+    endif
+  endif
+  
+  call sync_all()  
+
+  end subroutine rmd_test_MPI_oc
+
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine rmd_test_MPI_ic()
+
+  use specfem_par
+  use specfem_par_crustmantle
+  use specfem_par_outercore
+  use specfem_par_innercore
+  implicit none
+
+  include 'mpif.h'
+  
+  ! local parameters
+  real(kind=CUSTOM_REAL),dimension(:,:),allocatable :: test_flag_vector  
+  integer :: i,j,iglob,ier
+  integer :: inum,icount
+
+  ! inner core
+  allocate(test_flag_vector(NDIM,NGLOB_INNER_CORE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array test_flag etc.'
+
+  ! points defined by interfaces
+  test_flag_vector = 0.0
+  do i=1,num_interfaces_inner_core
+    do j=1,nibool_interfaces_inner_core(i)
+      iglob = ibool_interfaces_inner_core(j,i)
+      test_flag_vector(1,iglob) = 1.0_CUSTOM_REAL
+    enddo
+  enddo
+  i = sum(nibool_interfaces_inner_core)
+  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)      
+  i = nint( sum(test_flag_vector) )
+  call MPI_REDUCE(i,icount,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)      
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total MPI interface points : ',inum
+    write(IMAIN,*) '  unique MPI interface points: ',icount    
+  endif  
+  
+  ! initializes for assembly
+  test_flag_vector = myrank + 1.0_CUSTOM_REAL
+  call sync_all()
+
+  ! adds contributions from different partitions to flag arrays
+  ! custom_real arrays
+  ! send
+  call assemble_MPI_vector_ext_mesh_s(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+                      test_flag_vector, &
+                      buffer_send_vector_inner_core,buffer_recv_vector_inner_core, &
+                      num_interfaces_inner_core,max_nibool_interfaces_inner_core, &
+                      nibool_interfaces_inner_core,ibool_interfaces_inner_core,&
+                      my_neighbours_inner_core, &
+                      request_send_vector_inner_core,request_recv_vector_inner_core)
+  ! receive
+  call assemble_MPI_vector_ext_mesh_w(NPROCTOT_VAL,NGLOB_INNER_CORE, &
+                              test_flag_vector, &
+                              buffer_recv_vector_inner_core,num_interfaces_inner_core,&
+                              max_nibool_interfaces_inner_core, &
+                              nibool_interfaces_inner_core,ibool_interfaces_inner_core, &
+                              request_send_vector_inner_core,request_recv_vector_inner_core)
+  call sync_all()
+  
+  ! checks number of interface points
+  i = 0
+  do iglob=1,NGLOB_INNER_CORE
+    ! only counts flags with MPI contributions
+    if( test_flag_vector(1,iglob) > myrank+1.0+0.5 ) i = i + 1
+  enddo
+  deallocate(test_flag_vector)
+  call MPI_REDUCE(i,inum,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD,ier)    
+
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total assembled MPI interface points:',inum
+    write(IMAIN,*)
+
+    ! checks
+    if( inum /= icount ) then
+      print*,'error inner core : total mpi points:',myrank,'total: ',inum,icount
+      call exit_mpi(myrank,'error MPI assembly inner core')
+    endif
+  endif
+  
+  call sync_all()  
+
+  end subroutine rmd_test_MPI_ic
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
   subroutine read_mesh_databases_InnerOuter()
 
 ! sets up inner/outer elements for non-blocking MPI communication

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/setup_sources_receivers.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/setup_sources_receivers.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/setup_sources_receivers.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -456,14 +456,16 @@
     if( ier /= 0 ) call exit_MPI(myrank,'error allocating sourcearrays')
 
     ! stores source arrays
-    call setup_sources_receivers_srcarr(NSOURCES,myrank, &
-                      ispec_selected_source,islice_selected_source, &
-                      xi_source,eta_source,gamma_source, &
-                      Mxx,Myy,Mzz,Mxy,Mxz,Myz, &
-                      xix_crust_mantle,xiy_crust_mantle,xiz_crust_mantle, &
-                      etax_crust_mantle,etay_crust_mantle,etaz_crust_mantle, &
-                      gammax_crust_mantle,gammay_crust_mantle,gammaz_crust_mantle, &
-                      xigll,yigll,zigll,sourcearrays)
+    call setup_sources_receivers_srcarr()
+!                      NSOURCES,myrank, &
+!                      ispec_selected_source,islice_selected_source, &
+!                      xi_source,eta_source,gamma_source, &
+!                      Mxx,Myy,Mzz,Mxy,Mxz,Myz, &
+!                      xix_crust_mantle,xiy_crust_mantle,xiz_crust_mantle, &
+!                      etax_crust_mantle,etay_crust_mantle,etaz_crust_mantle, &
+!                      gammax_crust_mantle,gammay_crust_mantle,gammaz_crust_mantle, &
+!                      xigll,yigll,zigll,sourcearrays)
+
   endif
 
   ! adjoint source arrays

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/specfem3D_par.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/specfem3D_par.F90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/specfem3D_par.F90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -817,8 +817,13 @@
   real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: nu_3dmovie
   logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: mask_3dmovie
 
-  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: &
-    Iepsilondev_crust_mantle
+!  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: &
+!    Iepsilondev_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: &
+    Iepsilondev_xx_crust_mantle,Iepsilondev_yy_crust_mantle,Iepsilondev_xy_crust_mantle, &
+    Iepsilondev_xz_crust_mantle,Iepsilondev_yz_crust_mantle
+
+
   real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: &
     Ieps_trace_over_3_crust_mantle
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_output.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_output.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_output.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -95,7 +95,9 @@
         ! output the Time Integral of Strain, or \mu*TIS
         call  write_movie_volume_strains(myrank,npoints_3dmovie, &
                     LOCAL_PATH,MOVIE_VOLUME_TYPE,MOVIE_COARSE, &
-                    it,Ieps_trace_over_3_crust_mantle,Iepsilondev_crust_mantle, &
+                    it,Ieps_trace_over_3_crust_mantle, &
+                    Iepsilondev_xx_crust_mantle,Iepsilondev_yy_crust_mantle,Iepsilondev_xy_crust_mantle, &
+                    Iepsilondev_xz_crust_mantle,Iepsilondev_yz_crust_mantle, &
                     muvstore_crust_mantle_3dmovie, &
                     mask_3dmovie,nu_3dmovie)
 

Modified: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_volume.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_volume.f90	2012-03-05 02:50:35 UTC (rev 19724)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/specfem3D/write_movie_volume.f90	2012-03-05 06:13:16 UTC (rev 19725)
@@ -53,7 +53,7 @@
   integer :: npoints_3dmovie,nspecel_3dmovie
   integer, dimension(NGLOB_CRUST_MANTLE) :: num_ibool_3dmovie
   logical, dimension(NGLOB_CRUST_MANTLE) :: mask_ibool_3dmovie
-  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: mask_3dmovie
+  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: mask_3dmovie
 
 ! variables
   integer :: ipoints_3dmovie,ispecel_3dmovie,ispec,iglob,i,j,k,NIT
@@ -125,9 +125,9 @@
   integer :: npoints_3dmovie
   integer, dimension(NGLOB_CRUST_MANTLE) :: num_ibool_3dmovie
   real(kind=CUSTOM_REAL), dimension(NGLOB_CRUST_MANTLE) :: xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: muvstore_crust_mantle_3dmovie
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: muvstore_crust_mantle_3dmovie
   character(len=150) :: prname
-  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: mask_3dmovie
+  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: mask_3dmovie
   logical, dimension(NGLOB_CRUST_MANTLE) :: mask_ibool_3dmovie
   logical :: MOVIE_COARSE
   integer, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: ibool_crust_mantle
@@ -255,7 +255,8 @@
 
 ! ---------------------------------------------
 
-  subroutine write_movie_volume_strains(myrank,npoints_3dmovie,LOCAL_PATH,MOVIE_VOLUME_TYPE,MOVIE_COARSE, &
+  subroutine write_movie_volume_strains(myrank,npoints_3dmovie, &
+                                        LOCAL_PATH,MOVIE_VOLUME_TYPE,MOVIE_COARSE, &
                                         it,eps_trace_over_3_crust_mantle, &
                                         epsilondev_xx_crust_mantle,epsilondev_yy_crust_mantle,epsilondev_xy_crust_mantle, &
                                         epsilondev_xz_crust_mantle,epsilondev_yz_crust_mantle, &
@@ -270,15 +271,15 @@
 
   ! input
   integer :: myrank,npoints_3dmovie,MOVIE_VOLUME_TYPE,it
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: eps_trace_over_3_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: eps_trace_over_3_crust_mantle
 
 !  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: epsilondev_crust_mantle
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: &
     epsilondev_xx_crust_mantle,epsilondev_yy_crust_mantle,epsilondev_xy_crust_mantle, &
     epsilondev_xz_crust_mantle,epsilondev_yz_crust_mantle
 
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: muvstore_crust_mantle_3dmovie
-  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: mask_3dmovie
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: muvstore_crust_mantle_3dmovie
+  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: mask_3dmovie
   logical :: MOVIE_COARSE
   real(kind=CUSTOM_REAL), dimension(3,3,npoints_3dmovie) :: nu_3dmovie
   character(len=150) LOCAL_PATH,outputname
@@ -400,7 +401,7 @@
   real(kind=CUSTOM_REAL), dimension(3,3,npoints_3dmovie) :: nu_3dmovie
   double precision :: scalingval
   real(kind=CUSTOM_REAL), dimension(3) :: vector_local,vector_local_new
-  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: mask_3dmovie
+  logical, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: mask_3dmovie
   logical :: MOVIE_COARSE
   character(len=150) LOCAL_PATH
 
@@ -495,7 +496,7 @@
   include "OUTPUT_FILES/values_from_mesher.h"
 
   integer :: myrank,it
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: eps_trace_over_3_crust_mantle
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STRAIN_ONLY) :: eps_trace_over_3_crust_mantle
   real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_OUTER_CORE_ADJOINT) :: div_displ_outer_core
 
   real(kind=CUSTOM_REAL), dimension(NGLOB_OUTER_CORE) :: accel_outer_core
@@ -505,7 +506,7 @@
 
   real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE_STRAIN_ONLY) :: eps_trace_over_3_inner_core
 !  real(kind=CUSTOM_REAL), dimension(5,NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: epsilondev_crust_mantle
-  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE) :: &
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_CRUST_MANTLE_STR_OR_ATT) :: &
     epsilondev_xx_crust_mantle,epsilondev_yy_crust_mantle,epsilondev_xy_crust_mantle, &
     epsilondev_xz_crust_mantle,epsilondev_yz_crust_mantle
 



More information about the CIG-COMMITS mailing list