[cig-commits] r22469 - in seismo/3D/SPECFEM3D_GLOBE: branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D trunk/src/meshfem3D

dkomati1 at geodynamics.org dkomati1 at geodynamics.org
Sun Jun 30 18:33:15 PDT 2013


Author: dkomati1
Date: 2013-06-30 18:33:15 -0700 (Sun, 30 Jun 2013)
New Revision: 22469

Added:
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.F90
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/rules.mk
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_central_cube_mesh.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_scalar_mesh.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_vector_mesh.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_area.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_volumes.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_addressing.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_central_cube_buffers.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_meshes.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_regions_mesh_adios.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/finalize_mesher.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/fix_non_blocking_flags.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_absorb_adios.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/heap_sort.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_layers.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_mesher.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/meshfem3D_par.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/save_arrays_solver_adios.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_color_perm.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_counters.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_inner_outer.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_model.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/test_MPI_interfaces.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_chunks_data_adios.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_data_adios.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_faces_data_adios.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_surface_data_adios.f90
Removed:
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in
   seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90
Log:
done adding new files in "meshfem3D"


Added: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,56 @@
+#=====================================================================
+#
+#          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+#          --------------------------------------------------
+#
+#          Main authors: Dimitri Komatitsch and Jeroen Tromp
+#                        Princeton University, USA
+#             and University of Pau / CNRS / INRIA, France
+# (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+#                            April 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#=====================================================================
+
+DIR = meshfem3D
+
+# The rest of this file is generic
+#######################################
+
+####
+#### targets
+####
+
+default:
+	$(MAKE) -C ../.. $(DIR)
+
+all:
+	$(MAKE) -C ../.. all
+
+clean:
+	$(MAKE) -C ../.. CLEAN=$(DIR) clean
+
+cleanall:
+	$(MAKE) -C ../.. clean
+
+backup:
+	mkdir -p bak
+	cp *f90 *h Makefile bak
+
+bak: backup
+
+.PHONY: default all clean cleanall backup bak
+

Deleted: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in	2013-07-01 01:24:15 UTC (rev 22468)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/Makefile.in	2013-07-01 01:33:15 UTC (rev 22469)
@@ -1,330 +0,0 @@
-#=====================================================================
-#
-#          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
-#          --------------------------------------------------
-#
-#          Main authors: Dimitri Komatitsch and Jeroen Tromp
-#                        Princeton University, USA
-#             and University of Pau / CNRS / INRIA, France
-# (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
-#                            April 2011
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-#=====================================================================
-
-# @configure_input@
-
-FC = @FC@
-FCFLAGS = #@FCFLAGS@
-MPIFC = @MPIFC@
-MPILIBS = @MPILIBS@
-FLAGS_CHECK = @FLAGS_CHECK@
-FLAGS_NO_CHECK = @FLAGS_NO_CHECK@
-FCFLAGS_f90 = @FCFLAGS_f90@ -I../../setup
-
-FCCOMPILE_CHECK =@FCENV@ ${FC} ${FCFLAGS} $(FLAGS_CHECK)
-FCCOMPILE_NO_CHECK =@FCENV@ ${FC} ${FCFLAGS} $(FLAGS_NO_CHECK)
-MPIFCCOMPILE_CHECK =@FCENV@ ${MPIFC} ${FCFLAGS} $(FLAGS_CHECK)
-MPIFCCOMPILE_NO_CHECK =@FCENV@ ${MPIFC} ${FCFLAGS} $(FLAGS_NO_CHECK)
-
-CC = @CC@
-CFLAGS = @CFLAGS@
-CPPFLAGS = -I../../setup @CPPFLAGS@
-
-#AR = ar
-#ARFLAGS = cru
-#RANLIB = ranlib
-AR = @AR@
-ARFLAGS = @ARFLAGS@
-RANLIB = @RANLIB@
-
-## compilation directories
-# E : executables directory
-E = ../../bin
-# O : objects directory
-O = ../../obj
-# SHARED : shared directoy
-SHARED = ../shared
-# S : source file directory
-S = .
-# root directory
-S_TOP = ../..
-## setup file directory
-SETUP = ../../setup
-## output file directory
-OUTPUT = ../../OUTPUT_FILES
-
-#######################################
-
-libspecfem_a_OBJECTS_COMMON = \
-	$O/auto_ner.shared.o \
-	$O/broadcast_compute_parameters.sharedmpi.o \
-	$O/calendar.shared.o \
-	$O/count_elements.shared.o \
-	$O/count_number_of_sources.shared.o \
-	$O/count_points.shared.o \
-	$O/create_name_database.shared.o \
-	$O/define_all_layers.shared.o \
-	$O/euler_angles.shared.o \
-	$O/force_ftz.cc.o \
-	$O/get_model_parameters.shared.o \
-	$O/get_timestep_and_layers.shared.o \
-	$O/get_value_parameters.shared.o \
-	$O/gll_library.shared.o \
-	$O/hex_nodes.shared.o \
-	$O/intgrl.shared.o \
-	$O/lagrange_poly.shared.o \
-	$O/make_ellipticity.shared.o \
-	$O/make_gravity.shared.o \
-	$O/memory_eval.shared.o \
-	$O/model_prem.shared.o \
-	$O/model_topo_bathy.sharedmpi.o \
-	$O/param_reader.cc.o \
-	$O/read_compute_parameters.shared.o \
-	$O/read_parameter_file.shared.o \
-	$O/read_value_parameters.shared.o \
-	$O/reduce.shared.o \
-	$O/rthetaphi_xyz.shared.o \
-	$O/save_header_file.shared.o \
-	$O/spline_routines.shared.o \
-	$O/write_VTK_file.sharedmpi.o \
-	$(EMPTY_MACRO)
-
-
-libspecfem_a_OBJECTS_MESHER = \
-	$O/add_missing_nodes.o \
-	$O/add_topography.o \
-	$O/add_topography_410_650.o \
-	$O/add_topography_cmb.o \
-	$O/add_topography_icb.o \
-	$O/assemble_MPI_central_cube_mesh.mpi.o \
-	$O/assemble_MPI_scalar_mesh.mpi.o \
-	$O/assemble_MPI_vector_mesh.o \
-	$O/calc_jacobian.o \
-	$O/compute_coordinates_grid.o \
-	$O/compute_element_properties.o \
-	$O/compute_volumes.o \
-	$O/create_central_cube.o \
-	$O/create_central_cube_buffers.mpi.o \
-	$O/create_chunk_buffers.o \
-	$O/create_doubling_elements.o \
-	$O/create_mass_matrices.o \
-	$O/create_regions_mesh.o \
-	$O/create_regular_elements.o \
-	$O/define_superbrick.o \
-	$O/fix_non_blocking_flags.o \
-	$O/get_absorb.o \
-	$O/get_ellipticity.o \
-	$O/get_global.o \
-	$O/get_jacobian_boundaries.o \
-	$O/get_jacobian_discontinuities.o \
-	$O/get_model.o \
-	$O/get_MPI_1D_buffers.o \
-	$O/get_MPI_cutplanes_eta.o \
-	$O/get_MPI_cutplanes_xi.o \
-	$O/get_MPI_interfaces.mpi.o \
-	$O/get_perm_color.o \
-	$O/get_shape2D.o \
-	$O/get_shape3D.o \
-	$O/heap_sort.o \
-	$O/initialize_layers.o \
-	$O/lgndr.o \
-	$O/model_1dref.o \
-	$O/model_1066a.o \
-	$O/model_ak135.o \
-	$O/model_sea1d.o \
-	$O/model_aniso_inner_core.o \
-	$O/model_aniso_mantle.mpi.o \
-	$O/model_atten3D_QRFSI12.mpi.o \
-	$O/model_attenuation.mpi.o \
-	$O/model_crust.mpi.o \
-	$O/model_crustmaps.mpi.o \
-	$O/model_eucrust.mpi.o \
-	$O/model_epcrust.mpi.o \
-	$O/model_gapp2.mpi.o \
-	$O/model_gll.mpi.o \
-	$O/model_heterogen_mantle.mpi.o \
-	$O/model_iasp91.o \
-	$O/model_jp1d.o \
-	$O/model_jp3d.mpi.o \
-	$O/model_ppm.mpi.o \
-	$O/model_s20rts.mpi.o \
-	$O/model_s40rts.mpi.o \
-	$O/model_s362ani.mpi.o \
-	$O/model_sea99_s.mpi.o \
-	$O/moho_stretching.o \
-	$O/save_arrays_solver.o \
-	$O/setup_color_perm.o \
-	$O/setup_inner_outer.o \
-	$O/setup_MPI_interfaces.o \
-	$O/sort_array_coordinates.o \
-	$O/stretching_function.o \
-	$O/test_MPI_interfaces.o \
-	$O/write_AVS_DX_global_chunks_data.o \
-	$O/write_AVS_DX_global_data.o \
-	$O/write_AVS_DX_global_faces_data.o \
-	$O/write_AVS_DX_surface_data.o \
-	$(EMPTY_MACRO)
-
-MESHER_ARRAY_OBJECTS = \
-	$O/meshfem3D_par.o \
-	$O/meshfem3D_models.o \
-	$O/compute_area.o \
-	$O/create_addressing.o \
-	$O/create_meshes.o \
-	$O/create_MPI_interfaces.mpi.o \
-	$O/finalize_mesher.o \
-	$O/initialize_mesher.o \
-	$O/meshfem3D.mpi.o \
-	$O/setup_counters.o \
-	$O/setup_model.o \
-	$(EMPTY_MACRO)
-
-LIBSPECFEM_MESHER = $O/libspecfem_mesher.a
-#
-# using ADIOS files
-ADIOS_OBJECTS= \
-	$O/get_absorb_adios.adios.o  \
-	$O/adios_manager.shared_adios.o  \
-	$O/adios_helpers.shared_adios.o  \
-	$O/write_AVS_DX_global_data_adios.adios.o \
-	$O/write_AVS_DX_global_faces_data_adios.adios.o \
-	$O/write_AVS_DX_global_chunks_data_adios.adios.o \
-	$O/write_AVS_DX_surface_data_adios.adios.o \
-	$O/create_regions_mesh_adios.adios.o \
-	$O/save_arrays_solver_adios.adios.o
-ADIOS_STUBS = \
-	$O/adios_empty_stubs.noadios.o
-
- at COND_ADIOS_FALSE@ADIOS_OBJECTS = $(ADIOS_STUBS)
-
-# ADIOS
-# with configure: ./configure --with-cuda ADIOS_LIB=.. ADIOS_INC=.. MPI_INC=..
- at COND_ADIOS_TRUE@ADIOS_LIBS = -ladiosf -lmxml
- at COND_ADIOS_FALSE@ADIOS_LIBS = 
-
-ADIOS_LIB_LOCATION = @ADIOS_LIB@ @MXML_LIB@
-ADIOS_LINK = $(ADIOS_LIB_LOCATION) $(ADIOS_LIBS)
-ADIOS_INC = @ADIOS_INC@ -I../../setup -I../../ 
-
-#@COND_ADIOS_TRUE at CPPFLAGS = -I../../setup @CPPFLAGS@
- at COND_ADIOS_TRUE@MPIFCCOMPILE_CHECK =@FCENV@ ${MPIFC} ${FCFLAGS} $(ADIOS_INC) $(FLAGS_CHECK)
- at COND_ADIOS_TRUE@MPIFCCOMPILE_NO_CHECK =@FCENV@ ${MPIFC} ${FCFLAGS} $(ADIOS_INC) $(FLAGS_NO_CHECK)
-
-#######################################
-
-####
-#### targets
-####
-
-# default targets
-DEFAULT = \
-	xmeshfem3D \
-	$(EMPTY_MACRO)
-
-default: $(DEFAULT)
-
-all: clean default
-
-backup:
-	mkdir -p bak
-	cp *f90 *h Makefile bak
-
-bak: backup
-
-#######################################
-
-####
-#### rules for executables
-####
-
-# rules for the main programs
-XMESHFEM_OBJECTS = $(MESHER_ARRAY_OBJECTS) $(ADIOS_OBJECTS) $O/exit_mpi.sharedmpi.o $(LIBSPECFEM_MESHER)
-
-xmeshfem3D: $(XMESHFEM_OBJECTS)
-## use MPI here
-	${MPIFCCOMPILE_CHECK} -o ${E}/xmeshfem3D $(XMESHFEM_OBJECTS) $(MPILIBS) $(ADIOS_LINK)
-
-
-clean:
-	rm -f $O/* *.o work.pc* *.mod ${E}/xmeshfem3D  \
-      PI*
-
-#######################################
-
-###
-### rule for the archive library
-###
-
-$O/libspecfem_mesher.a: $(libspecfem_a_OBJECTS_MESHER) $(libspecfem_a_OBJECTS_COMMON)
-	-rm -f $O/libspecfem_mesher.a
-	$(AR) $(ARFLAGS) $O/libspecfem_mesher.a $(libspecfem_a_OBJECTS_MESHER) $(libspecfem_a_OBJECTS_COMMON)
-	$(RANLIB) $O/libspecfem_mesher.a
-
-
-#######################################
-
-####
-#### rule for each .o file below
-####
-
-##
-## shared
-##
-$O/%.shared.o: ${SHARED}/%.f90 ${SETUP}/constants.h
-	${FCCOMPILE_CHECK} ${FCFLAGS_f90} -c -o $@ $<
-
-$O/%.sharedmpi.o: ${SHARED}/%.f90 ${SETUP}/constants.h
-	${MPIFCCOMPILE_CHECK} ${FCFLAGS_f90} -c -o $@ $<
-
-$O/%.cc.o: ${SHARED}/%.c ${SETUP}/config.h
-	${CC} -c $(CPPFLAGS) $(CFLAGS) -o $@ $< 
-
-#######################################
-
-###
-### ADIOS compilation
-###
-
-$O/%.adios.o: %.F90 ${SETUP}/config.h
-	${MPIFC} -c $(ADIOS_INC) $(FCFLAGS) $(MPI_INC) -o $@ $<
-
-$O/%.adios.o: %.f90 ${SETUP}/config.h
-	${MPIFC} -c $(ADIOS_INC) $(FCFLAGS) $(MPI_INC) -o $@ $<
-
-$O/%.noadios.o: %.F90 
-	${FC} -c -o $@ $<
-
-$O/%.noadios.o: %.f90 
-	${FC} -c -o $@ $<
-
-$O/%.shared_adios.o: ${SHARED}/%.f90 ${SETUP}/constants.h
-	${MPIFC} -c $(ADIOS_INC) $(FCFLAGS) $(MPI_INC) -o $@ $<
-
-$O/%.shared_adios.o: ${SHARED}/%.F90 ${SETUP}/constants.h
-	${MPIFC} -c $(ADIOS_INC) $(FCFLAGS) $(MPI_INC) -o $@ $<
-#######################################
-
-###
-### meshfem3D objects
-###
-
-$O/%.o: $S/%.f90 ${SETUP}/constants.h
-	${FCCOMPILE_CHECK} ${FCFLAGS_f90} -c -o $@ $<
-
-$O/%.mpi.o: $S/%.f90 ${SETUP}/constants.h
-	${MPIFCCOMPILE_NO_CHECK} ${FCFLAGS_f90} -c -o $@ $<
-

Copied: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.F90 (from rev 22463, seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90)
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.F90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.F90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,1289 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine create_regions_mesh(iregion_code, &
+                          nspec,nglob_theor,npointot, &
+                          NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                          NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+                          NSPEC2D_BOTTOM,NSPEC2D_TOP, &
+                          offset_proc_xi,offset_proc_eta, &
+                          ipass)
+
+! creates the different regions of the mesh
+
+  use meshfem3D_par,only: &
+    ibool,idoubling,xstore,ystore,zstore, &
+    IMAIN,volume_total,myrank,LOCAL_PATH, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE, &
+    IFLAG_IN_FICTITIOUS_CUBE, &
+    NCHUNKS,SAVE_MESH_FILES,ABSORBING_CONDITIONS, &
+    R_CENTRAL_CUBE,RICB,RCMB, &
+    MAX_NUMBER_OF_MESH_LAYERS,MAX_NUM_REGIONS,NB_SQUARE_CORNERS, &
+    NGLOB1D_RADIAL_CORNER, &
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+    ADIOS_FOR_ARRAYS_SOLVER
+
+  use meshfem3D_models_par,only: &
+    SAVE_BOUNDARY_MESH,SUPPRESS_CRUSTAL_MESH,REGIONAL_MOHO_MESH, &
+    OCEANS
+
+  use create_MPI_interfaces_par, only: &
+    NGLOB1D_RADIAL_MAX,iboolcorner,iboolfaces, &
+    iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
+    ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
+    ibool1D_leftxi_righteta,ibool1D_rightxi_righteta, &
+    xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
+    xyz1D_leftxi_righteta,xyz1D_rightxi_righteta
+
+  use create_regions_mesh_par
+  use create_regions_mesh_par2
+
+  use MPI_crust_mantle_par,only: &
+    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
+  use MPI_outer_core_par,only: &
+    xstore_outer_core,ystore_outer_core,zstore_outer_core
+  use MPI_inner_core_par,only: &
+    xstore_inner_core,ystore_inner_core,zstore_inner_core
+
+  implicit none
+
+  ! code for the four regions of the mesh
+  integer :: iregion_code
+
+  ! correct number of spectral elements in each block depending on chunk type
+  integer :: nspec
+  integer :: nglob_theor,npointot
+
+  integer :: NEX_PER_PROC_XI,NEX_PER_PROC_ETA
+  integer :: NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX
+  integer :: NSPEC2D_BOTTOM,NSPEC2D_TOP
+
+  integer :: offset_proc_xi,offset_proc_eta
+
+  ! now perform two passes in this part to be able to save memory
+  integer,intent(in) :: ipass
+
+  ! local parameters
+  integer :: ier
+  integer :: nglob
+  ! check area and volume of the final mesh
+  double precision :: area_local_bottom,area_local_top
+  double precision :: volume_local
+
+  ! user output
+  if(myrank == 0 ) then
+    write(IMAIN,*)
+    select case(ipass)
+    case(1)
+      write(IMAIN,*) 'first pass'
+    case(2)
+      write(IMAIN,*) 'second pass'
+    case default
+      call exit_MPI(myrank,'error ipass value in create_regions_mesh')
+    end select
+    call flush_IMAIN()
+  endif
+
+  ! create the name for the database of the current slide and region
+  call create_name_database(prname,myrank,iregion_code,LOCAL_PATH)
+
+  ! initializes arrays
+  call sync_all()
+  if( myrank == 0) then
+    write(IMAIN,*)
+    write(IMAIN,*) '  ...allocating arrays '
+    call flush_IMAIN()
+  endif
+  call crm_allocate_arrays(iregion_code,nspec,ipass, &
+                          NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+                          NSPEC2D_BOTTOM,NSPEC2D_TOP)
+
+
+  ! initialize number of layers
+  call sync_all()
+  if( myrank == 0) then
+    write(IMAIN,*)
+    write(IMAIN,*) '  ...setting up layers '
+    call flush_IMAIN()
+  endif
+  call crm_setup_layers(iregion_code,nspec,ipass,NEX_PER_PROC_ETA)
+
+  !  creates mesh elements
+  call sync_all()
+  if( myrank == 0) then
+    write(IMAIN,*)
+    write(IMAIN,*) '  ...creating mesh elements '
+    call flush_IMAIN()
+  endif
+  call crm_create_elements(iregion_code,nspec,ipass, &
+                          NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                          offset_proc_xi,offset_proc_eta)
+
+
+  ! only create global addressing and the MPI buffers in the first pass
+  select case(ipass)
+  case( 1 )
+    ! creates ibool index array for projection from local to global points
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...creating global addressing'
+      call flush_IMAIN()
+    endif
+    call crm_setup_indexing(nspec,nglob_theor,npointot)
+
+
+    ! create MPI buffers
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...creating MPI buffers'
+      call flush_IMAIN()
+    endif
+    call crm_setup_mpi_buffers(npointot,nspec,iregion_code)
+
+
+    ! sets up Stacey absorbing boundary indices
+    if(NCHUNKS /= 6) then
+      call get_absorb(myrank,prname,iregion_code, iboun,nspec,nimin,nimax,&
+          njmin,njmax, nkmin_xi,nkmin_eta, NSPEC2DMAX_XMIN_XMAX, &
+          NSPEC2DMAX_YMIN_YMAX, NSPEC2D_BOTTOM)
+    endif
+
+  ! only create mass matrix and save all the final arrays in the second pass
+  case( 2 )
+    ! precomputes jacobian for 2d absorbing boundary surfaces
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...precomputing jacobian'
+      call flush_IMAIN()
+    endif
+    call get_jacobian_boundaries(myrank,iboun,nspec,xstore,ystore,zstore, &
+              dershape2D_x,dershape2D_y,dershape2D_bottom,dershape2D_top, &
+              ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top, &
+              nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, &
+              jacobian2D_xmin,jacobian2D_xmax, &
+              jacobian2D_ymin,jacobian2D_ymax, &
+              jacobian2D_bottom,jacobian2D_top, &
+              normal_xmin,normal_xmax, &
+              normal_ymin,normal_ymax, &
+              normal_bottom,normal_top, &
+              NSPEC2D_BOTTOM,NSPEC2D_TOP, &
+              NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX,&
+              xigll,yigll,zigll)
+
+    ! create chunk buffers if more than one chunk
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...creating chunk buffers'
+      call flush_IMAIN()
+    endif
+    call create_chunk_buffers(iregion_code,nspec,ibool,idoubling, &
+                              xstore,ystore,zstore,nglob_theor, &
+                              NGLOB1D_RADIAL_CORNER,NGLOB1D_RADIAL_MAX, &
+                              NGLOB2DMAX_XMIN_XMAX(iregion_code),NGLOB2DMAX_YMIN_YMAX(iregion_code))
+
+    ! only deallocates after second pass
+    deallocate(ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
+              ibool1D_leftxi_righteta,ibool1D_rightxi_righteta, &
+              xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
+              xyz1D_leftxi_righteta,xyz1D_rightxi_righteta)
+
+    ! setup mpi communication interfaces
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...preparing MPI interfaces'
+      call flush_IMAIN()
+    endif
+    ! creates MPI interface arrays
+    call create_MPI_interfaces(iregion_code)
+
+    ! sets up MPI interface arrays
+    call setup_MPI_interfaces(iregion_code)
+
+    ! only deallocates after second pass
+    deallocate(iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta)
+    deallocate(iboolfaces)
+    deallocate(iboolcorner)
+
+    ! sets up inner/outer element arrays
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...element inner/outer separation '
+      call flush_IMAIN()
+    endif
+    call setup_inner_outer(iregion_code)
+
+    ! sets up mesh coloring
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...element mesh coloring '
+      call flush_IMAIN()
+    endif
+    call setup_color_perm(iregion_code)
+
+    ! frees allocated mesh memory
+    select case( iregion_code )
+    case( IREGION_CRUST_MANTLE )
+      deallocate(xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle)
+    case( IREGION_OUTER_CORE )
+      deallocate(xstore_outer_core,ystore_outer_core,zstore_outer_core)
+    case( IREGION_INNER_CORE )
+      deallocate(xstore_inner_core,ystore_inner_core,zstore_inner_core)
+    end select
+
+    !uncomment: adds model smoothing for point profile models
+    !    if( THREE_D_MODEL == THREE_D_MODEL_PPM ) then
+    !     call smooth_model(myrank, nproc_xi,nproc_eta,&
+    !        rho_vp,rho_vs,nspec_stacey, &
+    !        iregion_code,xixstore,xiystore,xizstore, &
+    !        etaxstore,etaystore,etazstore, &
+    !        gammaxstore,gammaystore,gammazstore, &
+    !        xstore,ystore,zstore,rhostore,dvpstore, &
+    !        kappavstore,kappahstore,muvstore,muhstore,eta_anisostore,&
+    !        nspec,HETEROGEN_3D_MANTLE, &
+    !        NEX_XI,NCHUNKS,ABSORBING_CONDITIONS )
+
+
+    ! creates mass matrix
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...creating mass matrix'
+      call flush_IMAIN()
+    endif
+
+    ! allocates mass matrices in this slice (will be fully assembled in the solver)
+    !
+    ! in the case of stacey boundary conditions, add C*deltat/2 contribution to the mass matrix
+    ! on Stacey edges for the crust_mantle and outer_core regions but not for the inner_core region
+    ! thus the mass matrix must be replaced by three mass matrices including the "C" damping matrix
+    !
+    ! if absorbing_conditions are not set or if NCHUNKS=6, only one mass matrix is needed
+    ! for the sake of performance, only "rmassz" array will be filled and "rmassx" & "rmassy" will be obsolete
+
+    ! copy the theoretical number of points for the second pass
+    nglob = nglob_theor
+
+    if(NCHUNKS /= 6 .and. ABSORBING_CONDITIONS) then
+      select case(iregion_code)
+      case( IREGION_CRUST_MANTLE )
+        nglob_xy = nglob
+      case( IREGION_INNER_CORE, IREGION_OUTER_CORE )
+        nglob_xy = 1
+      endselect
+    else
+       nglob_xy = 1
+    endif
+
+    allocate(rmassx(nglob_xy), &
+            rmassy(nglob_xy), &
+            stat=ier)
+    if(ier /= 0) stop 'error in allocate 21'
+
+    allocate(rmassz(nglob),stat=ier)
+    if(ier /= 0) stop 'error in allocate 22'
+
+    ! allocates ocean load mass matrix as well if oceans
+    if(OCEANS .and. iregion_code == IREGION_CRUST_MANTLE) then
+      nglob_oceans = nglob
+    else
+      ! allocate dummy array if no oceans
+      nglob_oceans = 1
+    endif
+    allocate(rmass_ocean_load(nglob_oceans),stat=ier)
+    if(ier /= 0) stop 'error in allocate 22'
+
+    ! creating mass matrices in this slice (will be fully assembled in the solver)
+    call create_mass_matrices(myrank,nspec,idoubling,ibool, &
+                            iregion_code,xstore,ystore,zstore, &
+                            NSPEC2D_TOP,NSPEC2D_BOTTOM)
+
+    ! save the binary files
+    call sync_all()
+    if( myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '  ...saving binary files'
+      call flush_IMAIN()
+    endif
+    ! saves mesh and model parameters
+    if (ADIOS_FOR_ARRAYS_SOLVER) then
+      call save_arrays_solver_adios(myrank,nspec,nglob,idoubling,ibool, &
+          iregion_code,xstore,ystore,zstore,  &
+          NSPEC2DMAX_XMIN_XMAX, NSPEC2DMAX_YMIN_YMAX, &
+          NSPEC2D_TOP,NSPEC2D_BOTTOM)
+    else
+      call save_arrays_solver(myrank,nspec,nglob,idoubling,ibool, &
+          iregion_code,xstore,ystore,zstore, NSPEC2D_TOP,NSPEC2D_BOTTOM)
+    endif
+
+    ! frees memory
+    deallocate(rmassx,rmassy,rmassz)
+    deallocate(rmass_ocean_load)
+    ! Stacey
+    if( NCHUNKS /= 6 ) deallocate(nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta)
+
+    ! saves MPI interface infos
+    call save_arrays_solver_MPI(iregion_code)
+
+    ! frees MPI arrays memory
+    call crm_free_MPI_arrays(iregion_code)
+
+    ! boundary mesh for MOHO, 400 and 670 discontinuities
+    if (SAVE_BOUNDARY_MESH .and. iregion_code == IREGION_CRUST_MANTLE) then
+      ! user output
+      call sync_all()
+      if( myrank == 0) then
+        write(IMAIN,*)
+        write(IMAIN,*) '  ...saving boundary mesh files'
+        call flush_IMAIN()
+      endif
+      ! saves boundary file
+      if (ADIOS_FOR_ARRAYS_SOLVER) then
+        call save_arrays_solver_boundary_adios()
+      else
+        call save_arrays_solver_boundary()
+      endif
+
+    endif
+
+    ! compute volume, bottom and top area of that part of the slice
+    call compute_volumes(volume_local,area_local_bottom,area_local_top, &
+        nspec,wxgll,wygll,wzgll,xixstore,xiystore,xizstore, &
+        etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore, &
+        NSPEC2D_BOTTOM,jacobian2D_bottom,NSPEC2D_TOP,jacobian2D_top)
+
+    ! computes total area and volume
+    call compute_area(myrank,NCHUNKS,iregion_code, area_local_bottom, &
+        area_local_top, volume_local,volume_total, RCMB,RICB,R_CENTRAL_CUBE)
+
+    ! create AVS or DX mesh data for the slices
+    if(SAVE_MESH_FILES) then
+      ! user output
+      call sync_all()
+      if( myrank == 0) then
+        write(IMAIN,*)
+        write(IMAIN,*) '  ...saving AVS mesh files'
+        call flush_IMAIN()
+      endif
+      call crm_save_mesh_files(nspec,npointot,iregion_code)
+    endif
+
+  case default
+    stop 'there cannot be more than two passes in mesh creation'
+
+  end select  ! end of test if first or second pass
+
+
+  ! deallocate these arrays after each pass
+  ! because they have a different size in each pass to save memory
+  deallocate(xixstore,xiystore,xizstore)
+  deallocate(etaxstore,etaystore,etazstore)
+  deallocate(gammaxstore,gammaystore,gammazstore)
+
+  ! deallocate arrays
+  deallocate(rhostore,dvpstore,kappavstore,kappahstore)
+  deallocate(muvstore,muhstore)
+  deallocate(eta_anisostore)
+  deallocate(ispec_is_tiso)
+  deallocate(c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+            c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+            c36store,c44store,c45store,c46store,c55store,c56store,c66store)
+  deallocate(iboun)
+  deallocate(ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax)
+  deallocate(ibelm_bottom,ibelm_top)
+  deallocate(jacobian2D_xmin,jacobian2D_xmax,jacobian2D_ymin,jacobian2D_ymax)
+  deallocate(jacobian2D_bottom,jacobian2D_top)
+  deallocate(normal_xmin,normal_xmax,normal_ymin,normal_ymax)
+  deallocate(normal_bottom,normal_top)
+  deallocate(iMPIcut_xi,iMPIcut_eta)
+
+  deallocate(rho_vp,rho_vs)
+  deallocate(Qmu_store)
+  deallocate(tau_e_store)
+
+  deallocate(ibelm_moho_top,ibelm_moho_bot)
+  deallocate(ibelm_400_top,ibelm_400_bot)
+  deallocate(ibelm_670_top,ibelm_670_bot)
+  deallocate(normal_moho,normal_400,normal_670)
+
+  end subroutine create_regions_mesh
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine crm_allocate_arrays(iregion_code,nspec,ipass, &
+                                NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+                                NSPEC2D_BOTTOM,NSPEC2D_TOP)
+
+  use constants
+
+  use meshfem3D_par,only: &
+    NCHUNKS,NUMCORNERS_SHARED,NUMFACES_SHARED, &
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+    NGLOB1D_RADIAL,NGLOB1D_RADIAL_CORNER
+
+  use meshfem3D_models_par,only: &
+    ATTENUATION,ANISOTROPIC_INNER_CORE,ANISOTROPIC_3D_MANTLE, &
+    SAVE_BOUNDARY_MESH,AM_V
+
+  use create_regions_mesh_par2
+  use create_MPI_interfaces_par
+
+  implicit none
+
+  integer,intent(in) :: iregion_code,nspec
+  integer,intent(in) :: ipass
+
+  integer,intent(in) :: NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX
+  integer,intent(in) :: NSPEC2D_BOTTOM,NSPEC2D_TOP
+
+  ! local parameters
+  integer :: ier
+
+  ! New Attenuation definition on all GLL points
+  ! Attenuation
+  if (ATTENUATION) then
+    T_c_source = AM_V%QT_c_source
+    tau_s(:)   = AM_V%Qtau_s(:)
+    nspec_att = nspec
+
+    ! note: to save memory, one can set USE_3D_ATTENUATION_ARRAYS to .false. which
+    !          will create attenuation arrays storing only 1 point per element
+    !          (otherwise, 3D attenuation arrays will be defined for all GLL points)
+    if( USE_3D_ATTENUATION_ARRAYS ) then
+      ! attenuation arrays are fully 3D
+      allocate(Qmu_store(NGLLX,NGLLY,NGLLZ,nspec_att), &
+              tau_e_store(N_SLS,NGLLX,NGLLY,NGLLZ,nspec_att),stat=ier)
+    else
+      ! save some memory in case of 1D attenuation models
+      allocate(Qmu_store(1,1,1,nspec_att), &
+              tau_e_store(N_SLS,1,1,1,nspec_att),stat=ier)
+    endif
+  else
+    ! allocates dummy size arrays
+    nspec_att = 1
+    allocate(Qmu_store(1,1,1,nspec_att), &
+            tau_e_store(N_SLS,1,1,1,nspec_att),stat=ier)
+  end if
+  if(ier /= 0) stop 'error in allocate 1'
+
+  ! array with model density
+  allocate(rhostore(NGLLX,NGLLY,NGLLZ,nspec), &
+          dvpstore(NGLLX,NGLLY,NGLLZ,nspec),stat=ier)
+  if(ier /= 0) stop 'error in allocate 6'
+
+  ! for anisotropy
+  allocate(kappavstore(NGLLX,NGLLY,NGLLZ,nspec), &
+          muvstore(NGLLX,NGLLY,NGLLZ,nspec), &
+          kappahstore(NGLLX,NGLLY,NGLLZ,nspec), &
+          muhstore(NGLLX,NGLLY,NGLLZ,nspec), &
+          eta_anisostore(NGLLX,NGLLY,NGLLZ,nspec), &
+          ispec_is_tiso(nspec),stat=ier)
+  if(ier /= 0) stop 'error in allocate 7'
+
+  ! Stacey absorbing boundaries
+  if(NCHUNKS /= 6) then
+    nspec_stacey = nspec
+  else
+    nspec_stacey = 1
+  endif
+  allocate(rho_vp(NGLLX,NGLLY,NGLLZ,nspec_stacey), &
+          rho_vs(NGLLX,NGLLY,NGLLZ,nspec_stacey),stat=ier)
+  if(ier /= 0) stop 'error in allocate 8'
+
+  ! anisotropy
+  if((ANISOTROPIC_INNER_CORE .and. iregion_code == IREGION_INNER_CORE) .or. &
+     (ANISOTROPIC_3D_MANTLE .and. iregion_code == IREGION_CRUST_MANTLE)) then
+    nspec_ani = nspec
+  else
+    nspec_ani = 1
+  endif
+  allocate(c11store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c12store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c13store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c14store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c15store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c16store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c22store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c23store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c24store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c25store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c26store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c33store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c34store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c35store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c36store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c44store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c45store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c46store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c55store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c56store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
+          c66store(NGLLX,NGLLY,NGLLZ,nspec_ani),stat=ier)
+  if(ier /= 0) stop 'error in allocate 9'
+
+  ! boundary locator
+  allocate(iboun(6,nspec),stat=ier)
+  if(ier /= 0) stop 'error in allocate 10'
+
+  ! boundary parameters locator
+  allocate(ibelm_xmin(NSPEC2DMAX_XMIN_XMAX), &
+          ibelm_xmax(NSPEC2DMAX_XMIN_XMAX), &
+          ibelm_ymin(NSPEC2DMAX_YMIN_YMAX), &
+          ibelm_ymax(NSPEC2DMAX_YMIN_YMAX), &
+          ibelm_bottom(NSPEC2D_BOTTOM), &
+          ibelm_top(NSPEC2D_TOP),stat=ier)
+  if(ier /= 0) stop 'error in allocate 11'
+
+  ! 2-D jacobians and normals
+  allocate(jacobian2D_xmin(NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
+          jacobian2D_xmax(NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
+          jacobian2D_ymin(NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
+          jacobian2D_ymax(NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
+          jacobian2D_bottom(NGLLX,NGLLY,NSPEC2D_BOTTOM), &
+          jacobian2D_top(NGLLX,NGLLY,NSPEC2D_TOP),stat=ier)
+  if(ier /= 0) stop 'error in allocate 12'
+
+  allocate(normal_xmin(NDIM,NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
+          normal_xmax(NDIM,NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
+          normal_ymin(NDIM,NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
+          normal_ymax(NDIM,NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
+          normal_bottom(NDIM,NGLLX,NGLLY,NSPEC2D_BOTTOM), &
+          normal_top(NDIM,NGLLX,NGLLY,NSPEC2D_TOP),stat=ier)
+  if(ier /= 0) stop 'error in allocate 13'
+
+  ! Stacey
+  if( ipass == 1 .and. NCHUNKS /= 6 ) then
+    allocate(nimin(2,NSPEC2DMAX_YMIN_YMAX), &
+            nimax(2,NSPEC2DMAX_YMIN_YMAX), &
+            njmin(2,NSPEC2DMAX_XMIN_XMAX), &
+            njmax(2,NSPEC2DMAX_XMIN_XMAX), &
+            nkmin_xi(2,NSPEC2DMAX_XMIN_XMAX), &
+            nkmin_eta(2,NSPEC2DMAX_YMIN_YMAX),stat=ier)
+    if(ier /= 0) stop 'error in allocate 14'
+  endif
+
+  ! MPI cut-planes parameters along xi and along eta
+  allocate(iMPIcut_xi(2,nspec), &
+          iMPIcut_eta(2,nspec),stat=ier)
+  if(ier /= 0) stop 'error in allocate 15'
+
+  ! MPI buffer indices
+  !
+  ! define maximum size for message buffers
+  ! use number of elements found in the mantle since it is the largest region
+  NGLOB2DMAX_XY = max(NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE),NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE))
+  ! 1-D buffers
+  NGLOB1D_RADIAL_MAX = maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:))
+
+  if( ipass == 1 ) then
+    allocate(iboolleft_xi(NGLOB2DMAX_XMIN_XMAX(iregion_code)), &
+            iboolright_xi(NGLOB2DMAX_XMIN_XMAX(iregion_code)), &
+            iboolleft_eta(NGLOB2DMAX_YMIN_YMAX(iregion_code)), &
+            iboolright_eta(NGLOB2DMAX_YMIN_YMAX(iregion_code)), &
+            stat=ier)
+    if(ier /= 0) stop 'error in allocate 15b'
+
+    allocate(ibool1D_leftxi_lefteta(NGLOB1D_RADIAL_MAX), &
+            ibool1D_rightxi_lefteta(NGLOB1D_RADIAL_MAX), &
+            ibool1D_leftxi_righteta(NGLOB1D_RADIAL_MAX), &
+            ibool1D_rightxi_righteta(NGLOB1D_RADIAL_MAX), &
+            stat=ier)
+    if(ier /= 0) stop 'error in allocate 15c'
+
+    allocate(xyz1D_leftxi_lefteta(NGLOB1D_RADIAL_MAX,NDIM), &
+            xyz1D_rightxi_lefteta(NGLOB1D_RADIAL_MAX,NDIM), &
+            xyz1D_leftxi_righteta(NGLOB1D_RADIAL_MAX,NDIM), &
+            xyz1D_rightxi_righteta(NGLOB1D_RADIAL_MAX,NDIM), &
+            stat=ier)
+    if(ier /= 0) stop 'error in allocate 15c'
+
+    allocate(iboolcorner(NGLOB1D_RADIAL(iregion_code),NUMCORNERS_SHARED), &
+            iboolfaces(NGLOB2DMAX_XY,NUMFACES_SHARED), &
+            stat=ier)
+    if(ier /= 0) stop 'error in allocate 15b'
+
+  endif
+
+
+  ! store and save the final arrays only in the second pass
+  ! therefore in the first pass some arrays can be allocated with a dummy size
+  if(ipass == 1) then
+    nspec_actually = 1
+  else
+    nspec_actually = nspec
+  endif
+  allocate(xixstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          xiystore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          xizstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          etaxstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          etaystore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          etazstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          gammaxstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          gammaystore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
+          gammazstore(NGLLX,NGLLY,NGLLZ,nspec_actually),stat=ier)
+  if(ier /= 0) stop 'error in allocate 16'
+
+  ! boundary mesh
+  if (ipass == 2 .and. SAVE_BOUNDARY_MESH .and. iregion_code == IREGION_CRUST_MANTLE) then
+    NSPEC2D_MOHO = NSPEC2D_TOP
+    NSPEC2D_400 = NSPEC2D_MOHO / 4
+    NSPEC2D_670 = NSPEC2D_400
+  else
+    NSPEC2D_MOHO = 1
+    NSPEC2D_400 = 1
+    NSPEC2D_670 = 1
+  endif
+  allocate(ibelm_moho_top(NSPEC2D_MOHO),ibelm_moho_bot(NSPEC2D_MOHO), &
+          ibelm_400_top(NSPEC2D_400),ibelm_400_bot(NSPEC2D_400), &
+          ibelm_670_top(NSPEC2D_670),ibelm_670_bot(NSPEC2D_670), &
+          normal_moho(NDIM,NGLLX,NGLLY,NSPEC2D_MOHO), &
+          normal_400(NDIM,NGLLX,NGLLY,NSPEC2D_400), &
+          normal_670(NDIM,NGLLX,NGLLY,NSPEC2D_670), &
+          jacobian2D_moho(NGLLX,NGLLY,NSPEC2D_MOHO), &
+          jacobian2D_400(NGLLX,NGLLY,NSPEC2D_400), &
+          jacobian2D_670(NGLLX,NGLLY,NSPEC2D_670),stat=ier)
+  if(ier /= 0) stop 'error in allocate 17'
+
+  end subroutine crm_allocate_arrays
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine crm_setup_layers(iregion_code,nspec,ipass, &
+                              NEX_PER_PROC_ETA)
+
+  use meshfem3D_par,only: &
+    ibool,idoubling,is_on_a_slice_edge, &
+    xstore,ystore,zstore, &
+    myrank,NGLLX,NGLLY,NGLLZ, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE, &
+    R670,RMOHO,R400,RMIDDLE_CRUST,MAX_NUMBER_OF_MESH_LAYERS, &
+    ner,r_top,r_bottom
+
+  use meshfem3D_models_par,only: &
+    CASE_3D,SUPPRESS_CRUSTAL_MESH,ONE_CRUST,REGIONAL_MOHO_MESH
+
+  use create_regions_mesh_par
+  use create_regions_mesh_par2
+
+  implicit none
+
+  integer,intent(in) :: iregion_code,nspec
+  integer,intent(in) :: ipass
+  integer :: NEX_PER_PROC_ETA
+
+  ! local parameters
+  integer :: i,ier
+
+  ! initializes element layers
+  call initialize_layers(myrank,ipass,xigll,yigll,zigll,wxgll,wygll,wzgll, &
+                        shape3D,dershape3D,shape2D_x,shape2D_y,shape2D_bottom,shape2D_top, &
+                        dershape2D_x,dershape2D_y,dershape2D_bottom,dershape2D_top, &
+                        iaddx,iaddy,iaddz,nspec,xstore,ystore,zstore,ibool,idoubling, &
+                        iboun,iMPIcut_xi,iMPIcut_eta,ispec2D_moho_top,ispec2D_moho_bot, &
+                        ispec2D_400_top,ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot, &
+                        NEX_PER_PROC_ETA,nex_eta_moho,RMOHO,R400,R670,r_moho,r_400,r_670, &
+                        ONE_CRUST,NUMBER_OF_MESH_LAYERS,layer_shift, &
+                        iregion_code,ifirst_region,ilast_region, &
+                        first_layer_aniso,last_layer_aniso,nb_layer_above_aniso,is_on_a_slice_edge)
+
+  ! to consider anisotropic elements first and to build the mesh from the bottom to the top of the region
+  allocate (perm_layer(ifirst_region:ilast_region),stat=ier)
+  if(ier /= 0) stop 'error in allocate 18'
+  perm_layer = (/ (i, i=ilast_region,ifirst_region,-1) /)
+
+  if(iregion_code == IREGION_CRUST_MANTLE) then
+    cpt=3
+    perm_layer(1)=first_layer_aniso
+    perm_layer(2)=last_layer_aniso
+    do i = ilast_region,ifirst_region,-1
+      if (i/=first_layer_aniso .and. i/=last_layer_aniso) then
+        perm_layer(cpt) = i
+        cpt=cpt+1
+      endif
+    enddo
+  endif
+
+  ! crustal layer stretching: element layer's top and bottom radii will get stretched when in crust
+  ! (number of element layers in crust can vary for different resolutions and 1chunk simulations)
+  allocate(stretch_tab(2,ner(1)),stat=ier)
+  if(ier /= 0) stop 'error in allocate 19'
+  if (CASE_3D .and. iregion_code == IREGION_CRUST_MANTLE .and. .not. SUPPRESS_CRUSTAL_MESH) then
+    ! stretching function determines top and bottom of each element layer in the
+    ! crust region (between r_top(1) and r_bottom(1)), where ner(1) is the
+    ! number of element layers in this crust region
+
+    ! differentiate between regional meshes or global meshes
+    if( REGIONAL_MOHO_MESH ) then
+      call stretching_function_regional(r_top(1),r_bottom(1),ner(1),stretch_tab)
+    else
+      call stretching_function(r_top(1),r_bottom(1),ner(1),stretch_tab)
+    endif
+
+    ! RMIDDLE_CRUST so far is only used for 1D - models with two layers in the crust
+    ! (i.e. ONE_CRUST is set to .false.), those models do not use CASE_3D
+
+    ! all 3D models use this stretching function to honor a 3D crustal model
+    ! for those models, we set RMIDDLE_CRUST to the bottom of the first element layer
+    ! this value will be used in moho_stretching.f90 to decide whether or not elements
+    ! have to be stretched under oceanic crust.
+    !
+    ! note: stretch_tab uses (dimensionalized) radii from r_top and r_bottom
+    !(with stretch_tab( index_radius(1=top,2=bottom), index_layer( 1=first layer, 2=second layer, 3= ...) )
+    RMIDDLE_CRUST = stretch_tab(2,1)
+  endif
+
+  end subroutine crm_setup_layers
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine crm_create_elements(iregion_code,nspec,ipass, &
+                                NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                                offset_proc_xi,offset_proc_eta)
+
+! creates the different regions of the mesh
+
+  use meshfem3D_par,only: &
+    idoubling,is_on_a_slice_edge, &
+    xstore,ystore,zstore, &
+    IMAIN,myrank, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE,IFLAG_IN_FICTITIOUS_CUBE, &
+    NPROC_XI,NPROC_ETA,NCHUNKS, &
+    INCLUDE_CENTRAL_CUBE,R_CENTRAL_CUBE, &
+    MAX_NUMBER_OF_MESH_LAYERS,MAX_NUM_REGIONS,NB_SQUARE_CORNERS, &
+    rmins,rmaxs,iproc_xi,iproc_eta,ichunk,NEX_XI, &
+    rotation_matrix,ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD, &
+    ratio_sampling_array,doubling_index,this_region_has_a_doubling, &
+    ratio_divide_central_cube,CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA, &
+    ner,r_top,r_bottom
+
+  use meshfem3D_models_par,only: &
+    SAVE_BOUNDARY_MESH,SUPPRESS_CRUSTAL_MESH,REGIONAL_MOHO_MESH, &
+    TRANSVERSE_ISOTROPY
+
+  use create_regions_mesh_par
+  use create_regions_mesh_par2
+
+  implicit none
+
+  integer,intent(in) :: iregion_code,nspec
+  integer,intent(in) :: ipass
+
+  integer :: NEX_PER_PROC_XI,NEX_PER_PROC_ETA
+
+  integer :: offset_proc_xi,offset_proc_eta
+
+  ! local parameters
+  integer :: ispec,nspec_tiso
+  ! parameters needed to store the radii of the grid points in the spherically symmetric Earth
+  double precision :: rmin,rmax
+  integer :: ner_without_doubling,ilayer,ilayer_loop
+  ! timing
+  double precision, external :: wtime
+  double precision :: time_start,tCPU
+  integer,dimension(8) :: tval
+
+  ! initializes flags for transverse isotropic elements
+  ispec_is_tiso(:) = .false.
+
+  ! get MPI starting time
+  time_start = wtime()
+
+  ! loop on all the layers in this region of the mesh
+  ispec = 0 ! counts all the elements in this region of the mesh
+  do ilayer_loop = ifirst_region,ilast_region
+
+    ilayer = perm_layer(ilayer_loop)
+
+    ! user output
+    if(myrank == 0 ) then
+      write(IMAIN,*) '  creating layer ',ilayer_loop-ifirst_region+1, &
+                                   'out of ',ilast_region-ifirst_region+1
+    endif
+
+    ! determine the radii that define the shell
+    rmin = rmins(ilayer)
+    rmax = rmaxs(ilayer)
+
+    if(iregion_code == IREGION_CRUST_MANTLE .and. ilayer_loop==3) then
+      FIRST_ELT_NON_ANISO = ispec+1
+    endif
+    if(iregion_code == IREGION_CRUST_MANTLE &
+      .and. ilayer_loop==(ilast_region-nb_layer_above_aniso+1)) then
+      FIRST_ELT_ABOVE_ANISO = ispec+1
+    endif
+
+    ner_without_doubling = ner(ilayer)
+
+    ! if there is a doubling at the top of this region, we implement it in the last two layers of elements
+    ! and therefore we suppress two layers of regular elements here
+    USE_ONE_LAYER_SB = .false.
+    if(this_region_has_a_doubling(ilayer)) then
+      if (ner(ilayer) == 1) then
+        ner_without_doubling = ner_without_doubling - 1
+        USE_ONE_LAYER_SB = .true.
+      else
+        ner_without_doubling = ner_without_doubling - 2
+        USE_ONE_LAYER_SB = .false.
+      endif
+    endif
+
+    ! regular mesh elements
+    call create_regular_elements(myrank,ilayer,ichunk,ispec,ipass, &
+                    ifirst_region,ilast_region,iregion_code, &
+                    nspec,NCHUNKS,NUMBER_OF_MESH_LAYERS, &
+                    NPROC_XI,NPROC_ETA,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                    ner_without_doubling,ner,ratio_sampling_array,r_top,r_bottom, &
+                    xstore,ystore,zstore, &
+                    iaddx,iaddy,iaddz,xigll,yigll,zigll, &
+                    shape3D,dershape2D_bottom, &
+                    INCLUDE_CENTRAL_CUBE, &
+                    rmin,rmax,r_moho,r_400,r_670, &
+                    rhostore,dvpstore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
+                    nspec_ani,c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+                    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+                    c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
+                    nspec_actually,xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,&
+                    gammaxstore,gammaystore,gammazstore,&
+                    nspec_stacey,rho_vp,rho_vs,iboun,iMPIcut_xi,iMPIcut_eta, &
+                    ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD,iproc_xi,iproc_eta, &
+                    nspec_att,Qmu_store,tau_e_store,tau_s,T_c_source, &
+                    size(tau_e_store,2),size(tau_e_store,3),size(tau_e_store,4), &
+                    rotation_matrix,idoubling,doubling_index,USE_ONE_LAYER_SB, &
+                    stretch_tab, &
+                    NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho, &
+                    ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot,ibelm_670_top,ibelm_670_bot, &
+                    normal_moho,normal_400,normal_670,jacobian2D_moho,jacobian2D_400,jacobian2D_670, &
+                    ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,&
+                    ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot,&
+                    ispec_is_tiso)
+
+
+    ! mesh doubling elements
+    if( this_region_has_a_doubling(ilayer) ) &
+      call create_doubling_elements(myrank,ilayer,ichunk,ispec,ipass, &
+                    ifirst_region,ilast_region,iregion_code, &
+                    nspec,NCHUNKS,NUMBER_OF_MESH_LAYERS, &
+                    NPROC_XI,NPROC_ETA,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                    ner,ratio_sampling_array,r_top,r_bottom, &
+                    xstore,ystore,zstore,xigll,yigll,zigll, &
+                    shape3D,dershape2D_bottom, &
+                    INCLUDE_CENTRAL_CUBE, &
+                    rmin,rmax,r_moho,r_400,r_670, &
+                    rhostore,dvpstore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
+                    nspec_ani,c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+                    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+                    c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
+                    nspec_actually,xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,&
+                    gammaxstore,gammaystore,gammazstore,&
+                    nspec_stacey,rho_vp,rho_vs,iboun,iMPIcut_xi,iMPIcut_eta, &
+                    ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD,iproc_xi,iproc_eta, &
+                    nspec_att,Qmu_store,tau_e_store,tau_s,T_c_source, &
+                    size(tau_e_store,2),size(tau_e_store,3),size(tau_e_store,4), &
+                    rotation_matrix,idoubling,doubling_index,USE_ONE_LAYER_SB, &
+                    NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho, &
+                    ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot,ibelm_670_top,ibelm_670_bot, &
+                    normal_moho,normal_400,normal_670,jacobian2D_moho,jacobian2D_400,jacobian2D_670, &
+                    ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,&
+                    ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot, &
+                    CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA,offset_proc_xi,offset_proc_eta, &
+                    ispec_is_tiso)
+
+    ! user output
+    if(myrank == 0 ) then
+      ! time estimate
+      tCPU = wtime() - time_start
+
+      ! outputs current time on system
+      call date_and_time(VALUES=tval)
+
+      ! debug: outputs remaining time (poor estimation)
+      !tCPU = (1.0-(ilayer_loop-ifirst_region+1.0)/(ilast_region-ifirst_region+1.0)) &
+      !          /(ilayer_loop-ifirst_region+1.0)/(ilast_region-ifirst_region+1.0)*tCPU*10.0
+
+      ! user output
+      write(IMAIN,'(a,f5.1,a,a,i2.2,a,i2.2,a,i2.2,a)') &
+        "    ",(ilayer_loop-ifirst_region+1.0)/(ilast_region-ifirst_region+1.0) * 100.0,"%", &
+        "    time: ",tval(5),"h ",tval(6),"min ",tval(7),"sec"
+
+      ! flushes I/O buffer
+      call flush_IMAIN()
+    endif
+
+  enddo !ilayer_loop
+
+  deallocate(stretch_tab)
+  deallocate(perm_layer)
+  deallocate(jacobian2D_moho,jacobian2D_400,jacobian2D_670)
+
+  if(myrank == 0 ) write(IMAIN,*)
+
+  ! define central cube in inner core
+  if(INCLUDE_CENTRAL_CUBE .and. iregion_code == IREGION_INNER_CORE) then
+    ! user output
+    if(myrank == 0 ) write(IMAIN,*) '  creating central cube'
+
+    call create_central_cube(myrank,ichunk,ispec,iaddx,iaddy,iaddz,ipass, &
+                        nspec,NEX_XI,NEX_PER_PROC_XI,NEX_PER_PROC_ETA,R_CENTRAL_CUBE, &
+                        iproc_xi,iproc_eta,NPROC_XI,NPROC_ETA,ratio_divide_central_cube, &
+                        iMPIcut_xi,iMPIcut_eta,iboun, &
+                        idoubling,iregion_code,xstore,ystore,zstore, &
+                        shape3D,rmin,rmax,rhostore,dvpstore,&
+                        kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
+                        xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,&
+                        gammaxstore,gammaystore,gammazstore,nspec_actually, &
+                        c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+                        c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+                        c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
+                        nspec_ani,nspec_stacey,nspec_att,Qmu_store,tau_e_store,tau_s,T_c_source,&
+                        size(tau_e_store,2),size(tau_e_store,3),size(tau_e_store,4), &
+                        rho_vp,rho_vs,xigll,yigll,zigll, &
+                        ispec_is_tiso)
+  endif
+
+  ! check total number of spectral elements created
+  if(ispec /= nspec) call exit_MPI(myrank,'ispec should equal nspec')
+
+  ! if any of these flags is true, the element is on a communication edge
+  ! this is not enough because it can also be in contact by an edge or a corner but not a full face
+  ! therefore we will have to fix array "is_on_a_slice_edge" later in the solver to take this into account
+  is_on_a_slice_edge(:) = &
+      iMPIcut_xi(1,:) .or. iMPIcut_xi(2,:) .or. &
+      iMPIcut_eta(1,:) .or. iMPIcut_eta(2,:) .or. &
+      iboun(1,:) .or. iboun(2,:) .or. &
+      iboun(3,:) .or. iboun(4,:) .or. &
+      iboun(5,:) .or. iboun(6,:)
+
+  ! no need to count fictitious elements on the edges
+  ! for which communications cannot be overlapped with calculations
+  where(idoubling == IFLAG_IN_FICTITIOUS_CUBE) is_on_a_slice_edge = .false.
+
+  ! checks transverse isotropic elements
+  if( ipass == 2 ) then
+    ! count number of anisotropic elements in current region
+    ! should be zero in all the regions except in the mantle
+    nspec_tiso = count(ispec_is_tiso(:))
+
+    ! checks number of anisotropic elements found in the mantle
+    if(iregion_code /= IREGION_CRUST_MANTLE .and. nspec_tiso /= 0 ) &
+      call exit_MPI(myrank,'found anisotropic elements outside of the mantle')
+    if( TRANSVERSE_ISOTROPY ) then
+      if(iregion_code == IREGION_CRUST_MANTLE .and. nspec_tiso == 0) &
+        call exit_MPI(myrank,'found no anisotropic elements in the mantle')
+    endif
+  endif
+
+  end subroutine crm_create_elements
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine crm_setup_indexing(nspec,nglob_theor,npointot)
+
+! creates global indexing array ibool
+
+  use constants,only: NGLLX,NGLLY,NGLLZ,ZERO
+
+  use meshfem3d_par,only: &
+    ibool,xstore,ystore,zstore,  &
+    myrank
+
+  use create_regions_mesh_par2
+
+  implicit none
+
+  ! number of spectral elements in each block
+  integer,intent(in) :: nspec,npointot,nglob_theor
+
+  ! local parameters
+  ! variables for creating array ibool
+  double precision, dimension(:), allocatable :: xp,yp,zp
+  integer, dimension(:), allocatable :: locval
+  logical, dimension(:), allocatable :: ifseg
+
+  integer :: nglob
+  integer :: ieoff,ilocnum,ier
+  integer :: i,j,k,ispec
+  character(len=150) :: errmsg
+
+  ! allocate memory for arrays
+  allocate(locval(npointot), &
+          ifseg(npointot), &
+          xp(npointot), &
+          yp(npointot), &
+          zp(npointot),stat=ier)
+  if(ier /= 0) stop 'error in allocate 20'
+
+  locval(:) = 0
+  ifseg(:) = .false.
+  xp(:) = ZERO
+  yp(:) = ZERO
+  zp(:) = ZERO
+
+  ! we need to create a copy of the x, y and z arrays because sorting in get_global will swap
+  ! these arrays and therefore destroy them
+  do ispec=1,nspec
+    ieoff = NGLLX * NGLLY * NGLLZ * (ispec-1)
+    ilocnum = 0
+    do k=1,NGLLZ
+      do j=1,NGLLY
+        do i=1,NGLLX
+          ilocnum = ilocnum + 1
+          xp(ilocnum+ieoff) = xstore(i,j,k,ispec)
+          yp(ilocnum+ieoff) = ystore(i,j,k,ispec)
+          zp(ilocnum+ieoff) = zstore(i,j,k,ispec)
+        enddo
+      enddo
+    enddo
+  enddo
+
+  call get_global(nspec,xp,yp,zp,ibool,locval,ifseg,nglob,npointot)
+
+  deallocate(xp,yp,zp)
+  deallocate(locval,ifseg)
+
+  ! check that number of points found equals theoretical value
+  if(nglob /= nglob_theor) then
+    write(errmsg,*) 'incorrect total number of points found: myrank,nglob,nglob_theor = ',&
+      myrank,nglob,nglob_theor
+    call exit_MPI(myrank,errmsg)
+  endif
+  if(minval(ibool) /= 1 .or. maxval(ibool) /= nglob_theor) &
+    call exit_MPI(myrank,'incorrect global numbering')
+
+  ! creates a new indirect addressing to reduce cache misses in memory access in the solver
+  ! this is *critical* to improve performance in the solver
+  call get_global_indirect_addressing(nspec,nglob_theor,ibool)
+
+  ! checks again
+  if(minval(ibool) /= 1 .or. maxval(ibool) /= nglob_theor) &
+    call exit_MPI(myrank,'incorrect global numbering after sorting')
+
+  end subroutine crm_setup_indexing
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine crm_setup_mpi_buffers(npointot,nspec,iregion_code)
+
+! sets up MPI cutplane arrays
+
+  use meshfem3d_par,only: &
+    ibool,idoubling, &
+    xstore,ystore,zstore, &
+    myrank,NGLLX,NGLLY,NGLLZ, &
+    NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER, &
+    NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE, &
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
+
+  use create_MPI_interfaces_par
+  use create_regions_mesh_par2
+
+  implicit none
+
+  ! number of spectral elements in each block
+  integer,intent(in) :: nspec,npointot
+
+  integer,intent(in) :: iregion_code
+
+  ! local parameters
+  logical, dimension(:), allocatable :: mask_ibool
+  integer :: ier
+
+  ! arrays mask_ibool(npointot) used to save memory
+  ! allocate memory for arrays
+  allocate(mask_ibool(npointot), &
+          stat=ier)
+  if(ier /= 0) stop 'error in allocate 20b'
+
+  ! initializes
+  npoin2D_xi_all(:) = 0
+  npoin2D_eta_all(:) = 0
+  iboolleft_xi(:) = 0
+  iboolleft_eta(:) = 0
+  iboolright_xi(:) = 0
+  iboolright_eta(:) = 0
+
+  ! gets MPI buffer indices
+  call get_MPI_cutplanes_xi(myrank,prname,nspec,iMPIcut_xi,ibool, &
+                  xstore,ystore,zstore,mask_ibool,npointot, &
+                  NSPEC2D_ETA_FACE,iregion_code,npoin2D_xi, &
+                  iboolleft_xi,iboolright_xi, &
+                  npoin2D_xi_all,NGLOB2DMAX_XMIN_XMAX(iregion_code))
+
+  call get_MPI_cutplanes_eta(myrank,prname,nspec,iMPIcut_eta,ibool, &
+                  xstore,ystore,zstore,mask_ibool,npointot, &
+                  NSPEC2D_XI_FACE,iregion_code,npoin2D_eta, &
+                  iboolleft_eta,iboolright_eta, &
+                  npoin2D_eta_all,NGLOB2DMAX_YMIN_YMAX(iregion_code))
+
+  call get_MPI_1D_buffers(myrank,prname,nspec,iMPIcut_xi,iMPIcut_eta, &
+                  ibool,idoubling, &
+                  xstore,ystore,zstore,mask_ibool,npointot, &
+                  NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER,iregion_code, &
+                  ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
+                  ibool1D_leftxi_righteta,ibool1D_rightxi_righteta, &
+                  xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
+                  xyz1D_leftxi_righteta,xyz1D_rightxi_righteta, &
+                  NGLOB1D_RADIAL_MAX)
+
+  deallocate(mask_ibool)
+
+  end subroutine crm_setup_mpi_buffers
+
+
+!
+!-------------------------------------------------------------------------------
+!
+
+subroutine crm_save_mesh_files(nspec,npointot,iregion_code)
+
+  use meshfem3d_par,only: &
+    ibool,idoubling, &
+    xstore,ystore,zstore, &
+    myrank,NGLLX,NGLLY,NGLLZ, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN, &
+    ADIOS_FOR_AVS_DX
+
+
+  use meshfem3D_models_par,only: &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    nspl,rspl,espl,espl2
+
+  use create_regions_mesh_par2
+
+  ! Modules for temporary AVS/DX data
+  use AVS_DX_global_mod
+
+  implicit none
+
+  ! number of spectral elements in each block
+  integer,intent(in) :: nspec,npointot,iregion_code
+
+  ! local parameters
+  ! arrays used for AVS or DX files
+  integer, dimension(:), allocatable :: num_ibool_AVS_DX
+  logical, dimension(:), allocatable :: mask_ibool
+  ! structures used for ADIOS AVS/DX files
+  type(avs_dx_global_t) :: avs_dx_global_vars
+
+  character(len=150) :: reg_name, outputname, group_name
+  integer :: comm, sizeprocs, ier
+  integer(kind=8) :: adios_group, group_size_inc, adios_totalsize, adios_handle
+
+  ! arrays num_ibool_AVS_DX and mask_ibool used to save memory
+  ! allocate memory for arrays
+  allocate(num_ibool_AVS_DX(npointot), &
+          mask_ibool(npointot), &
+          stat=ier)
+  if(ier /= 0) stop 'error in allocate 21'
+
+  if (ADIOS_FOR_AVS_DX) then 
+    call crm_save_mesh_files_adios(nspec,npointot,iregion_code, &
+        num_ibool_AVS_DX, mask_ibool)
+  else
+    call write_AVS_DX_global_data(myrank,prname,nspec,ibool,idoubling, &
+        xstore,ystore,zstore, num_ibool_AVS_DX,mask_ibool,npointot)
+
+    call write_AVS_DX_global_faces_data(myrank,prname,nspec,iMPIcut_xi, &
+        iMPIcut_eta,ibool, idoubling,xstore,ystore,zstore,num_ibool_AVS_DX, &
+        mask_ibool,npointot, rhostore,kappavstore,muvstore,nspl,rspl, &
+        espl,espl2, ELLIPTICITY,ISOTROPIC_3D_MANTLE, RICB,RCMB, &
+        RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+        RMIDDLE_CRUST,ROCEAN,iregion_code)
+
+  call write_AVS_DX_global_chunks_data(myrank,prname,nspec,iboun,ibool, &
+          idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+          npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+          ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+          RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+          RMIDDLE_CRUST,ROCEAN,iregion_code)
+
+  call write_AVS_DX_surface_data(myrank,prname,nspec,iboun,ibool, &
+          idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,npointot, &
+          rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+          ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+          RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+          RMIDDLE_CRUST,ROCEAN,iregion_code)
+  endif
+
+  ! Output material information for all GLL points
+  ! Can be use to check the mesh
+  !    call write_AVS_DX_global_data_gll(prname,nspec,xstore,ystore,zstore,&
+  !                rhostore,kappavstore,muvstore,Qmu_store,ATTENUATION)
+  deallocate(num_ibool_AVS_DX,mask_ibool)
+
+end subroutine crm_save_mesh_files
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine crm_free_MPI_arrays(iregion_code)
+
+  use create_MPI_interfaces_par
+
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer,intent(in):: iregion_code
+
+  ! free memory
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    deallocate(phase_ispec_inner_crust_mantle)
+    deallocate(num_elem_colors_crust_mantle)
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    deallocate(phase_ispec_inner_outer_core)
+    deallocate(num_elem_colors_outer_core)
+  case( IREGION_INNER_CORE )
+    ! inner core
+    deallocate(phase_ispec_inner_inner_core)
+    deallocate(num_elem_colors_inner_core)
+  end select
+
+  end subroutine crm_free_MPI_arrays
+

Deleted: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90	2013-07-01 01:24:15 UTC (rev 22468)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/create_regions_mesh.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -1,1289 +0,0 @@
-!=====================================================================
-!
-!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
-!          --------------------------------------------------
-!
-!          Main authors: Dimitri Komatitsch and Jeroen Tromp
-!                        Princeton University, USA
-!             and University of Pau / CNRS / INRIA, France
-! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
-!                            April 2011
-!
-! This program is free software; you can redistribute it and/or modify
-! it under the terms of the GNU General Public License as published by
-! the Free Software Foundation; either version 2 of the License, or
-! (at your option) any later version.
-!
-! This program is distributed in the hope that it will be useful,
-! but WITHOUT ANY WARRANTY; without even the implied warranty of
-! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-! GNU General Public License for more details.
-!
-! You should have received a copy of the GNU General Public License along
-! with this program; if not, write to the Free Software Foundation, Inc.,
-! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-!
-!=====================================================================
-
-
-  subroutine create_regions_mesh(iregion_code, &
-                          nspec,nglob_theor,npointot, &
-                          NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
-                          NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
-                          NSPEC2D_BOTTOM,NSPEC2D_TOP, &
-                          offset_proc_xi,offset_proc_eta, &
-                          ipass)
-
-! creates the different regions of the mesh
-
-  use meshfem3D_par,only: &
-    ibool,idoubling,xstore,ystore,zstore, &
-    IMAIN,volume_total,myrank,LOCAL_PATH, &
-    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE, &
-    IFLAG_IN_FICTITIOUS_CUBE, &
-    NCHUNKS,SAVE_MESH_FILES,ABSORBING_CONDITIONS, &
-    R_CENTRAL_CUBE,RICB,RCMB, &
-    MAX_NUMBER_OF_MESH_LAYERS,MAX_NUM_REGIONS,NB_SQUARE_CORNERS, &
-    NGLOB1D_RADIAL_CORNER, &
-    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
-    ADIOS_FOR_ARRAYS_SOLVER
-
-  use meshfem3D_models_par,only: &
-    SAVE_BOUNDARY_MESH,SUPPRESS_CRUSTAL_MESH,REGIONAL_MOHO_MESH, &
-    OCEANS
-
-  use create_MPI_interfaces_par, only: &
-    NGLOB1D_RADIAL_MAX,iboolcorner,iboolfaces, &
-    iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
-    ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
-    ibool1D_leftxi_righteta,ibool1D_rightxi_righteta, &
-    xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
-    xyz1D_leftxi_righteta,xyz1D_rightxi_righteta
-
-  use create_regions_mesh_par
-  use create_regions_mesh_par2
-
-  use MPI_crust_mantle_par,only: &
-    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
-  use MPI_outer_core_par,only: &
-    xstore_outer_core,ystore_outer_core,zstore_outer_core
-  use MPI_inner_core_par,only: &
-    xstore_inner_core,ystore_inner_core,zstore_inner_core
-
-  implicit none
-
-  ! code for the four regions of the mesh
-  integer :: iregion_code
-
-  ! correct number of spectral elements in each block depending on chunk type
-  integer :: nspec
-  integer :: nglob_theor,npointot
-
-  integer :: NEX_PER_PROC_XI,NEX_PER_PROC_ETA
-  integer :: NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX
-  integer :: NSPEC2D_BOTTOM,NSPEC2D_TOP
-
-  integer :: offset_proc_xi,offset_proc_eta
-
-  ! now perform two passes in this part to be able to save memory
-  integer,intent(in) :: ipass
-
-  ! local parameters
-  integer :: ier
-  integer :: nglob
-  ! check area and volume of the final mesh
-  double precision :: area_local_bottom,area_local_top
-  double precision :: volume_local
-
-  ! user output
-  if(myrank == 0 ) then
-    write(IMAIN,*)
-    select case(ipass)
-    case(1)
-      write(IMAIN,*) 'first pass'
-    case(2)
-      write(IMAIN,*) 'second pass'
-    case default
-      call exit_MPI(myrank,'error ipass value in create_regions_mesh')
-    end select
-    call flush_IMAIN()
-  endif
-
-  ! create the name for the database of the current slide and region
-  call create_name_database(prname,myrank,iregion_code,LOCAL_PATH)
-
-  ! initializes arrays
-  call sync_all()
-  if( myrank == 0) then
-    write(IMAIN,*)
-    write(IMAIN,*) '  ...allocating arrays '
-    call flush_IMAIN()
-  endif
-  call crm_allocate_arrays(iregion_code,nspec,ipass, &
-                          NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
-                          NSPEC2D_BOTTOM,NSPEC2D_TOP)
-
-
-  ! initialize number of layers
-  call sync_all()
-  if( myrank == 0) then
-    write(IMAIN,*)
-    write(IMAIN,*) '  ...setting up layers '
-    call flush_IMAIN()
-  endif
-  call crm_setup_layers(iregion_code,nspec,ipass,NEX_PER_PROC_ETA)
-
-  !  creates mesh elements
-  call sync_all()
-  if( myrank == 0) then
-    write(IMAIN,*)
-    write(IMAIN,*) '  ...creating mesh elements '
-    call flush_IMAIN()
-  endif
-  call crm_create_elements(iregion_code,nspec,ipass, &
-                          NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
-                          offset_proc_xi,offset_proc_eta)
-
-
-  ! only create global addressing and the MPI buffers in the first pass
-  select case(ipass)
-  case( 1 )
-    ! creates ibool index array for projection from local to global points
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...creating global addressing'
-      call flush_IMAIN()
-    endif
-    call crm_setup_indexing(nspec,nglob_theor,npointot)
-
-
-    ! create MPI buffers
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...creating MPI buffers'
-      call flush_IMAIN()
-    endif
-    call crm_setup_mpi_buffers(npointot,nspec,iregion_code)
-
-
-    ! sets up Stacey absorbing boundary indices
-    if(NCHUNKS /= 6) then
-      call get_absorb(myrank,prname,iregion_code, iboun,nspec,nimin,nimax,&
-          njmin,njmax, nkmin_xi,nkmin_eta, NSPEC2DMAX_XMIN_XMAX, &
-          NSPEC2DMAX_YMIN_YMAX, NSPEC2D_BOTTOM)
-    endif
-
-  ! only create mass matrix and save all the final arrays in the second pass
-  case( 2 )
-    ! precomputes jacobian for 2d absorbing boundary surfaces
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...precomputing jacobian'
-      call flush_IMAIN()
-    endif
-    call get_jacobian_boundaries(myrank,iboun,nspec,xstore,ystore,zstore, &
-              dershape2D_x,dershape2D_y,dershape2D_bottom,dershape2D_top, &
-              ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top, &
-              nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, &
-              jacobian2D_xmin,jacobian2D_xmax, &
-              jacobian2D_ymin,jacobian2D_ymax, &
-              jacobian2D_bottom,jacobian2D_top, &
-              normal_xmin,normal_xmax, &
-              normal_ymin,normal_ymax, &
-              normal_bottom,normal_top, &
-              NSPEC2D_BOTTOM,NSPEC2D_TOP, &
-              NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX,&
-              xigll,yigll,zigll)
-
-    ! create chunk buffers if more than one chunk
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...creating chunk buffers'
-      call flush_IMAIN()
-    endif
-    call create_chunk_buffers(iregion_code,nspec,ibool,idoubling, &
-                              xstore,ystore,zstore,nglob_theor, &
-                              NGLOB1D_RADIAL_CORNER,NGLOB1D_RADIAL_MAX, &
-                              NGLOB2DMAX_XMIN_XMAX(iregion_code),NGLOB2DMAX_YMIN_YMAX(iregion_code))
-
-    ! only deallocates after second pass
-    deallocate(ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
-              ibool1D_leftxi_righteta,ibool1D_rightxi_righteta, &
-              xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
-              xyz1D_leftxi_righteta,xyz1D_rightxi_righteta)
-
-    ! setup mpi communication interfaces
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...preparing MPI interfaces'
-      call flush_IMAIN()
-    endif
-    ! creates MPI interface arrays
-    call create_MPI_interfaces(iregion_code)
-
-    ! sets up MPI interface arrays
-    call setup_MPI_interfaces(iregion_code)
-
-    ! only deallocates after second pass
-    deallocate(iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta)
-    deallocate(iboolfaces)
-    deallocate(iboolcorner)
-
-    ! sets up inner/outer element arrays
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...element inner/outer separation '
-      call flush_IMAIN()
-    endif
-    call setup_inner_outer(iregion_code)
-
-    ! sets up mesh coloring
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...element mesh coloring '
-      call flush_IMAIN()
-    endif
-    call setup_color_perm(iregion_code)
-
-    ! frees allocated mesh memory
-    select case( iregion_code )
-    case( IREGION_CRUST_MANTLE )
-      deallocate(xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle)
-    case( IREGION_OUTER_CORE )
-      deallocate(xstore_outer_core,ystore_outer_core,zstore_outer_core)
-    case( IREGION_INNER_CORE )
-      deallocate(xstore_inner_core,ystore_inner_core,zstore_inner_core)
-    end select
-
-    !uncomment: adds model smoothing for point profile models
-    !    if( THREE_D_MODEL == THREE_D_MODEL_PPM ) then
-    !     call smooth_model(myrank, nproc_xi,nproc_eta,&
-    !        rho_vp,rho_vs,nspec_stacey, &
-    !        iregion_code,xixstore,xiystore,xizstore, &
-    !        etaxstore,etaystore,etazstore, &
-    !        gammaxstore,gammaystore,gammazstore, &
-    !        xstore,ystore,zstore,rhostore,dvpstore, &
-    !        kappavstore,kappahstore,muvstore,muhstore,eta_anisostore,&
-    !        nspec,HETEROGEN_3D_MANTLE, &
-    !        NEX_XI,NCHUNKS,ABSORBING_CONDITIONS )
-
-
-    ! creates mass matrix
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...creating mass matrix'
-      call flush_IMAIN()
-    endif
-
-    ! allocates mass matrices in this slice (will be fully assembled in the solver)
-    !
-    ! in the case of stacey boundary conditions, add C*deltat/2 contribution to the mass matrix
-    ! on Stacey edges for the crust_mantle and outer_core regions but not for the inner_core region
-    ! thus the mass matrix must be replaced by three mass matrices including the "C" damping matrix
-    !
-    ! if absorbing_conditions are not set or if NCHUNKS=6, only one mass matrix is needed
-    ! for the sake of performance, only "rmassz" array will be filled and "rmassx" & "rmassy" will be obsolete
-
-    ! copy the theoretical number of points for the second pass
-    nglob = nglob_theor
-
-    if(NCHUNKS /= 6 .and. ABSORBING_CONDITIONS) then
-      select case(iregion_code)
-      case( IREGION_CRUST_MANTLE )
-        nglob_xy = nglob
-      case( IREGION_INNER_CORE, IREGION_OUTER_CORE )
-        nglob_xy = 1
-      endselect
-    else
-       nglob_xy = 1
-    endif
-
-    allocate(rmassx(nglob_xy), &
-            rmassy(nglob_xy), &
-            stat=ier)
-    if(ier /= 0) stop 'error in allocate 21'
-
-    allocate(rmassz(nglob),stat=ier)
-    if(ier /= 0) stop 'error in allocate 22'
-
-    ! allocates ocean load mass matrix as well if oceans
-    if(OCEANS .and. iregion_code == IREGION_CRUST_MANTLE) then
-      nglob_oceans = nglob
-    else
-      ! allocate dummy array if no oceans
-      nglob_oceans = 1
-    endif
-    allocate(rmass_ocean_load(nglob_oceans),stat=ier)
-    if(ier /= 0) stop 'error in allocate 22'
-
-    ! creating mass matrices in this slice (will be fully assembled in the solver)
-    call create_mass_matrices(myrank,nspec,idoubling,ibool, &
-                            iregion_code,xstore,ystore,zstore, &
-                            NSPEC2D_TOP,NSPEC2D_BOTTOM)
-
-    ! save the binary files
-    call sync_all()
-    if( myrank == 0) then
-      write(IMAIN,*)
-      write(IMAIN,*) '  ...saving binary files'
-      call flush_IMAIN()
-    endif
-    ! saves mesh and model parameters
-    if (ADIOS_FOR_ARRAYS_SOLVER) then
-      call save_arrays_solver_adios(myrank,nspec,nglob,idoubling,ibool, &
-          iregion_code,xstore,ystore,zstore,  &
-          NSPEC2DMAX_XMIN_XMAX, NSPEC2DMAX_YMIN_YMAX, &
-          NSPEC2D_TOP,NSPEC2D_BOTTOM)
-    else
-      call save_arrays_solver(myrank,nspec,nglob,idoubling,ibool, &
-          iregion_code,xstore,ystore,zstore, NSPEC2D_TOP,NSPEC2D_BOTTOM)
-    endif
-
-    ! frees memory
-    deallocate(rmassx,rmassy,rmassz)
-    deallocate(rmass_ocean_load)
-    ! Stacey
-    if( NCHUNKS /= 6 ) deallocate(nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta)
-
-    ! saves MPI interface infos
-    call save_arrays_solver_MPI(iregion_code)
-
-    ! frees MPI arrays memory
-    call crm_free_MPI_arrays(iregion_code)
-
-    ! boundary mesh for MOHO, 400 and 670 discontinuities
-    if (SAVE_BOUNDARY_MESH .and. iregion_code == IREGION_CRUST_MANTLE) then
-      ! user output
-      call sync_all()
-      if( myrank == 0) then
-        write(IMAIN,*)
-        write(IMAIN,*) '  ...saving boundary mesh files'
-        call flush_IMAIN()
-      endif
-      ! saves boundary file
-      if (ADIOS_FOR_ARRAYS_SOLVER) then
-        call save_arrays_solver_boundary_adios()
-      else
-        call save_arrays_solver_boundary()
-      endif
-
-    endif
-
-    ! compute volume, bottom and top area of that part of the slice
-    call compute_volumes(volume_local,area_local_bottom,area_local_top, &
-        nspec,wxgll,wygll,wzgll,xixstore,xiystore,xizstore, &
-        etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore, &
-        NSPEC2D_BOTTOM,jacobian2D_bottom,NSPEC2D_TOP,jacobian2D_top)
-
-    ! computes total area and volume
-    call compute_area(myrank,NCHUNKS,iregion_code, area_local_bottom, &
-        area_local_top, volume_local,volume_total, RCMB,RICB,R_CENTRAL_CUBE)
-
-    ! create AVS or DX mesh data for the slices
-    if(SAVE_MESH_FILES) then
-      ! user output
-      call sync_all()
-      if( myrank == 0) then
-        write(IMAIN,*)
-        write(IMAIN,*) '  ...saving AVS mesh files'
-        call flush_IMAIN()
-      endif
-      call crm_save_mesh_files(nspec,npointot,iregion_code)
-    endif
-
-  case default
-    stop 'there cannot be more than two passes in mesh creation'
-
-  end select  ! end of test if first or second pass
-
-
-  ! deallocate these arrays after each pass
-  ! because they have a different size in each pass to save memory
-  deallocate(xixstore,xiystore,xizstore)
-  deallocate(etaxstore,etaystore,etazstore)
-  deallocate(gammaxstore,gammaystore,gammazstore)
-
-  ! deallocate arrays
-  deallocate(rhostore,dvpstore,kappavstore,kappahstore)
-  deallocate(muvstore,muhstore)
-  deallocate(eta_anisostore)
-  deallocate(ispec_is_tiso)
-  deallocate(c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
-            c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
-            c36store,c44store,c45store,c46store,c55store,c56store,c66store)
-  deallocate(iboun)
-  deallocate(ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax)
-  deallocate(ibelm_bottom,ibelm_top)
-  deallocate(jacobian2D_xmin,jacobian2D_xmax,jacobian2D_ymin,jacobian2D_ymax)
-  deallocate(jacobian2D_bottom,jacobian2D_top)
-  deallocate(normal_xmin,normal_xmax,normal_ymin,normal_ymax)
-  deallocate(normal_bottom,normal_top)
-  deallocate(iMPIcut_xi,iMPIcut_eta)
-
-  deallocate(rho_vp,rho_vs)
-  deallocate(Qmu_store)
-  deallocate(tau_e_store)
-
-  deallocate(ibelm_moho_top,ibelm_moho_bot)
-  deallocate(ibelm_400_top,ibelm_400_bot)
-  deallocate(ibelm_670_top,ibelm_670_bot)
-  deallocate(normal_moho,normal_400,normal_670)
-
-  end subroutine create_regions_mesh
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine crm_allocate_arrays(iregion_code,nspec,ipass, &
-                                NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
-                                NSPEC2D_BOTTOM,NSPEC2D_TOP)
-
-  use constants
-
-  use meshfem3D_par,only: &
-    NCHUNKS,NUMCORNERS_SHARED,NUMFACES_SHARED, &
-    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
-    NGLOB1D_RADIAL,NGLOB1D_RADIAL_CORNER
-
-  use meshfem3D_models_par,only: &
-    ATTENUATION,ANISOTROPIC_INNER_CORE,ANISOTROPIC_3D_MANTLE, &
-    SAVE_BOUNDARY_MESH,AM_V
-
-  use create_regions_mesh_par2
-  use create_MPI_interfaces_par
-
-  implicit none
-
-  integer,intent(in) :: iregion_code,nspec
-  integer,intent(in) :: ipass
-
-  integer,intent(in) :: NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX
-  integer,intent(in) :: NSPEC2D_BOTTOM,NSPEC2D_TOP
-
-  ! local parameters
-  integer :: ier
-
-  ! New Attenuation definition on all GLL points
-  ! Attenuation
-  if (ATTENUATION) then
-    T_c_source = AM_V%QT_c_source
-    tau_s(:)   = AM_V%Qtau_s(:)
-    nspec_att = nspec
-
-    ! note: to save memory, one can set USE_3D_ATTENUATION_ARRAYS to .false. which
-    !          will create attenuation arrays storing only 1 point per element
-    !          (otherwise, 3D attenuation arrays will be defined for all GLL points)
-    if( USE_3D_ATTENUATION_ARRAYS ) then
-      ! attenuation arrays are fully 3D
-      allocate(Qmu_store(NGLLX,NGLLY,NGLLZ,nspec_att), &
-              tau_e_store(N_SLS,NGLLX,NGLLY,NGLLZ,nspec_att),stat=ier)
-    else
-      ! save some memory in case of 1D attenuation models
-      allocate(Qmu_store(1,1,1,nspec_att), &
-              tau_e_store(N_SLS,1,1,1,nspec_att),stat=ier)
-    endif
-  else
-    ! allocates dummy size arrays
-    nspec_att = 1
-    allocate(Qmu_store(1,1,1,nspec_att), &
-            tau_e_store(N_SLS,1,1,1,nspec_att),stat=ier)
-  end if
-  if(ier /= 0) stop 'error in allocate 1'
-
-  ! array with model density
-  allocate(rhostore(NGLLX,NGLLY,NGLLZ,nspec), &
-          dvpstore(NGLLX,NGLLY,NGLLZ,nspec),stat=ier)
-  if(ier /= 0) stop 'error in allocate 6'
-
-  ! for anisotropy
-  allocate(kappavstore(NGLLX,NGLLY,NGLLZ,nspec), &
-          muvstore(NGLLX,NGLLY,NGLLZ,nspec), &
-          kappahstore(NGLLX,NGLLY,NGLLZ,nspec), &
-          muhstore(NGLLX,NGLLY,NGLLZ,nspec), &
-          eta_anisostore(NGLLX,NGLLY,NGLLZ,nspec), &
-          ispec_is_tiso(nspec),stat=ier)
-  if(ier /= 0) stop 'error in allocate 7'
-
-  ! Stacey absorbing boundaries
-  if(NCHUNKS /= 6) then
-    nspec_stacey = nspec
-  else
-    nspec_stacey = 1
-  endif
-  allocate(rho_vp(NGLLX,NGLLY,NGLLZ,nspec_stacey), &
-          rho_vs(NGLLX,NGLLY,NGLLZ,nspec_stacey),stat=ier)
-  if(ier /= 0) stop 'error in allocate 8'
-
-  ! anisotropy
-  if((ANISOTROPIC_INNER_CORE .and. iregion_code == IREGION_INNER_CORE) .or. &
-     (ANISOTROPIC_3D_MANTLE .and. iregion_code == IREGION_CRUST_MANTLE)) then
-    nspec_ani = nspec
-  else
-    nspec_ani = 1
-  endif
-  allocate(c11store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c12store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c13store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c14store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c15store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c16store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c22store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c23store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c24store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c25store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c26store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c33store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c34store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c35store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c36store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c44store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c45store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c46store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c55store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c56store(NGLLX,NGLLY,NGLLZ,nspec_ani), &
-          c66store(NGLLX,NGLLY,NGLLZ,nspec_ani),stat=ier)
-  if(ier /= 0) stop 'error in allocate 9'
-
-  ! boundary locator
-  allocate(iboun(6,nspec),stat=ier)
-  if(ier /= 0) stop 'error in allocate 10'
-
-  ! boundary parameters locator
-  allocate(ibelm_xmin(NSPEC2DMAX_XMIN_XMAX), &
-          ibelm_xmax(NSPEC2DMAX_XMIN_XMAX), &
-          ibelm_ymin(NSPEC2DMAX_YMIN_YMAX), &
-          ibelm_ymax(NSPEC2DMAX_YMIN_YMAX), &
-          ibelm_bottom(NSPEC2D_BOTTOM), &
-          ibelm_top(NSPEC2D_TOP),stat=ier)
-  if(ier /= 0) stop 'error in allocate 11'
-
-  ! 2-D jacobians and normals
-  allocate(jacobian2D_xmin(NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
-          jacobian2D_xmax(NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
-          jacobian2D_ymin(NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
-          jacobian2D_ymax(NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
-          jacobian2D_bottom(NGLLX,NGLLY,NSPEC2D_BOTTOM), &
-          jacobian2D_top(NGLLX,NGLLY,NSPEC2D_TOP),stat=ier)
-  if(ier /= 0) stop 'error in allocate 12'
-
-  allocate(normal_xmin(NDIM,NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
-          normal_xmax(NDIM,NGLLY,NGLLZ,NSPEC2DMAX_XMIN_XMAX), &
-          normal_ymin(NDIM,NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
-          normal_ymax(NDIM,NGLLX,NGLLZ,NSPEC2DMAX_YMIN_YMAX), &
-          normal_bottom(NDIM,NGLLX,NGLLY,NSPEC2D_BOTTOM), &
-          normal_top(NDIM,NGLLX,NGLLY,NSPEC2D_TOP),stat=ier)
-  if(ier /= 0) stop 'error in allocate 13'
-
-  ! Stacey
-  if( ipass == 1 .and. NCHUNKS /= 6 ) then
-    allocate(nimin(2,NSPEC2DMAX_YMIN_YMAX), &
-            nimax(2,NSPEC2DMAX_YMIN_YMAX), &
-            njmin(2,NSPEC2DMAX_XMIN_XMAX), &
-            njmax(2,NSPEC2DMAX_XMIN_XMAX), &
-            nkmin_xi(2,NSPEC2DMAX_XMIN_XMAX), &
-            nkmin_eta(2,NSPEC2DMAX_YMIN_YMAX),stat=ier)
-    if(ier /= 0) stop 'error in allocate 14'
-  endif
-
-  ! MPI cut-planes parameters along xi and along eta
-  allocate(iMPIcut_xi(2,nspec), &
-          iMPIcut_eta(2,nspec),stat=ier)
-  if(ier /= 0) stop 'error in allocate 15'
-
-  ! MPI buffer indices
-  !
-  ! define maximum size for message buffers
-  ! use number of elements found in the mantle since it is the largest region
-  NGLOB2DMAX_XY = max(NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE),NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE))
-  ! 1-D buffers
-  NGLOB1D_RADIAL_MAX = maxval(NGLOB1D_RADIAL_CORNER(iregion_code,:))
-
-  if( ipass == 1 ) then
-    allocate(iboolleft_xi(NGLOB2DMAX_XMIN_XMAX(iregion_code)), &
-            iboolright_xi(NGLOB2DMAX_XMIN_XMAX(iregion_code)), &
-            iboolleft_eta(NGLOB2DMAX_YMIN_YMAX(iregion_code)), &
-            iboolright_eta(NGLOB2DMAX_YMIN_YMAX(iregion_code)), &
-            stat=ier)
-    if(ier /= 0) stop 'error in allocate 15b'
-
-    allocate(ibool1D_leftxi_lefteta(NGLOB1D_RADIAL_MAX), &
-            ibool1D_rightxi_lefteta(NGLOB1D_RADIAL_MAX), &
-            ibool1D_leftxi_righteta(NGLOB1D_RADIAL_MAX), &
-            ibool1D_rightxi_righteta(NGLOB1D_RADIAL_MAX), &
-            stat=ier)
-    if(ier /= 0) stop 'error in allocate 15c'
-
-    allocate(xyz1D_leftxi_lefteta(NGLOB1D_RADIAL_MAX,NDIM), &
-            xyz1D_rightxi_lefteta(NGLOB1D_RADIAL_MAX,NDIM), &
-            xyz1D_leftxi_righteta(NGLOB1D_RADIAL_MAX,NDIM), &
-            xyz1D_rightxi_righteta(NGLOB1D_RADIAL_MAX,NDIM), &
-            stat=ier)
-    if(ier /= 0) stop 'error in allocate 15c'
-
-    allocate(iboolcorner(NGLOB1D_RADIAL(iregion_code),NUMCORNERS_SHARED), &
-            iboolfaces(NGLOB2DMAX_XY,NUMFACES_SHARED), &
-            stat=ier)
-    if(ier /= 0) stop 'error in allocate 15b'
-
-  endif
-
-
-  ! store and save the final arrays only in the second pass
-  ! therefore in the first pass some arrays can be allocated with a dummy size
-  if(ipass == 1) then
-    nspec_actually = 1
-  else
-    nspec_actually = nspec
-  endif
-  allocate(xixstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          xiystore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          xizstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          etaxstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          etaystore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          etazstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          gammaxstore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          gammaystore(NGLLX,NGLLY,NGLLZ,nspec_actually), &
-          gammazstore(NGLLX,NGLLY,NGLLZ,nspec_actually),stat=ier)
-  if(ier /= 0) stop 'error in allocate 16'
-
-  ! boundary mesh
-  if (ipass == 2 .and. SAVE_BOUNDARY_MESH .and. iregion_code == IREGION_CRUST_MANTLE) then
-    NSPEC2D_MOHO = NSPEC2D_TOP
-    NSPEC2D_400 = NSPEC2D_MOHO / 4
-    NSPEC2D_670 = NSPEC2D_400
-  else
-    NSPEC2D_MOHO = 1
-    NSPEC2D_400 = 1
-    NSPEC2D_670 = 1
-  endif
-  allocate(ibelm_moho_top(NSPEC2D_MOHO),ibelm_moho_bot(NSPEC2D_MOHO), &
-          ibelm_400_top(NSPEC2D_400),ibelm_400_bot(NSPEC2D_400), &
-          ibelm_670_top(NSPEC2D_670),ibelm_670_bot(NSPEC2D_670), &
-          normal_moho(NDIM,NGLLX,NGLLY,NSPEC2D_MOHO), &
-          normal_400(NDIM,NGLLX,NGLLY,NSPEC2D_400), &
-          normal_670(NDIM,NGLLX,NGLLY,NSPEC2D_670), &
-          jacobian2D_moho(NGLLX,NGLLY,NSPEC2D_MOHO), &
-          jacobian2D_400(NGLLX,NGLLY,NSPEC2D_400), &
-          jacobian2D_670(NGLLX,NGLLY,NSPEC2D_670),stat=ier)
-  if(ier /= 0) stop 'error in allocate 17'
-
-  end subroutine crm_allocate_arrays
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine crm_setup_layers(iregion_code,nspec,ipass, &
-                              NEX_PER_PROC_ETA)
-
-  use meshfem3D_par,only: &
-    ibool,idoubling,is_on_a_slice_edge, &
-    xstore,ystore,zstore, &
-    myrank,NGLLX,NGLLY,NGLLZ, &
-    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE, &
-    R670,RMOHO,R400,RMIDDLE_CRUST,MAX_NUMBER_OF_MESH_LAYERS, &
-    ner,r_top,r_bottom
-
-  use meshfem3D_models_par,only: &
-    CASE_3D,SUPPRESS_CRUSTAL_MESH,ONE_CRUST,REGIONAL_MOHO_MESH
-
-  use create_regions_mesh_par
-  use create_regions_mesh_par2
-
-  implicit none
-
-  integer,intent(in) :: iregion_code,nspec
-  integer,intent(in) :: ipass
-  integer :: NEX_PER_PROC_ETA
-
-  ! local parameters
-  integer :: i,ier
-
-  ! initializes element layers
-  call initialize_layers(myrank,ipass,xigll,yigll,zigll,wxgll,wygll,wzgll, &
-                        shape3D,dershape3D,shape2D_x,shape2D_y,shape2D_bottom,shape2D_top, &
-                        dershape2D_x,dershape2D_y,dershape2D_bottom,dershape2D_top, &
-                        iaddx,iaddy,iaddz,nspec,xstore,ystore,zstore,ibool,idoubling, &
-                        iboun,iMPIcut_xi,iMPIcut_eta,ispec2D_moho_top,ispec2D_moho_bot, &
-                        ispec2D_400_top,ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot, &
-                        NEX_PER_PROC_ETA,nex_eta_moho,RMOHO,R400,R670,r_moho,r_400,r_670, &
-                        ONE_CRUST,NUMBER_OF_MESH_LAYERS,layer_shift, &
-                        iregion_code,ifirst_region,ilast_region, &
-                        first_layer_aniso,last_layer_aniso,nb_layer_above_aniso,is_on_a_slice_edge)
-
-  ! to consider anisotropic elements first and to build the mesh from the bottom to the top of the region
-  allocate (perm_layer(ifirst_region:ilast_region),stat=ier)
-  if(ier /= 0) stop 'error in allocate 18'
-  perm_layer = (/ (i, i=ilast_region,ifirst_region,-1) /)
-
-  if(iregion_code == IREGION_CRUST_MANTLE) then
-    cpt=3
-    perm_layer(1)=first_layer_aniso
-    perm_layer(2)=last_layer_aniso
-    do i = ilast_region,ifirst_region,-1
-      if (i/=first_layer_aniso .and. i/=last_layer_aniso) then
-        perm_layer(cpt) = i
-        cpt=cpt+1
-      endif
-    enddo
-  endif
-
-  ! crustal layer stretching: element layer's top and bottom radii will get stretched when in crust
-  ! (number of element layers in crust can vary for different resolutions and 1chunk simulations)
-  allocate(stretch_tab(2,ner(1)),stat=ier)
-  if(ier /= 0) stop 'error in allocate 19'
-  if (CASE_3D .and. iregion_code == IREGION_CRUST_MANTLE .and. .not. SUPPRESS_CRUSTAL_MESH) then
-    ! stretching function determines top and bottom of each element layer in the
-    ! crust region (between r_top(1) and r_bottom(1)), where ner(1) is the
-    ! number of element layers in this crust region
-
-    ! differentiate between regional meshes or global meshes
-    if( REGIONAL_MOHO_MESH ) then
-      call stretching_function_regional(r_top(1),r_bottom(1),ner(1),stretch_tab)
-    else
-      call stretching_function(r_top(1),r_bottom(1),ner(1),stretch_tab)
-    endif
-
-    ! RMIDDLE_CRUST so far is only used for 1D - models with two layers in the crust
-    ! (i.e. ONE_CRUST is set to .false.), those models do not use CASE_3D
-
-    ! all 3D models use this stretching function to honor a 3D crustal model
-    ! for those models, we set RMIDDLE_CRUST to the bottom of the first element layer
-    ! this value will be used in moho_stretching.f90 to decide whether or not elements
-    ! have to be stretched under oceanic crust.
-    !
-    ! note: stretch_tab uses (dimensionalized) radii from r_top and r_bottom
-    !(with stretch_tab( index_radius(1=top,2=bottom), index_layer( 1=first layer, 2=second layer, 3= ...) )
-    RMIDDLE_CRUST = stretch_tab(2,1)
-  endif
-
-  end subroutine crm_setup_layers
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine crm_create_elements(iregion_code,nspec,ipass, &
-                                NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
-                                offset_proc_xi,offset_proc_eta)
-
-! creates the different regions of the mesh
-
-  use meshfem3D_par,only: &
-    idoubling,is_on_a_slice_edge, &
-    xstore,ystore,zstore, &
-    IMAIN,myrank, &
-    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE,IFLAG_IN_FICTITIOUS_CUBE, &
-    NPROC_XI,NPROC_ETA,NCHUNKS, &
-    INCLUDE_CENTRAL_CUBE,R_CENTRAL_CUBE, &
-    MAX_NUMBER_OF_MESH_LAYERS,MAX_NUM_REGIONS,NB_SQUARE_CORNERS, &
-    rmins,rmaxs,iproc_xi,iproc_eta,ichunk,NEX_XI, &
-    rotation_matrix,ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD, &
-    ratio_sampling_array,doubling_index,this_region_has_a_doubling, &
-    ratio_divide_central_cube,CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA, &
-    ner,r_top,r_bottom
-
-  use meshfem3D_models_par,only: &
-    SAVE_BOUNDARY_MESH,SUPPRESS_CRUSTAL_MESH,REGIONAL_MOHO_MESH, &
-    TRANSVERSE_ISOTROPY
-
-  use create_regions_mesh_par
-  use create_regions_mesh_par2
-
-  implicit none
-
-  integer,intent(in) :: iregion_code,nspec
-  integer,intent(in) :: ipass
-
-  integer :: NEX_PER_PROC_XI,NEX_PER_PROC_ETA
-
-  integer :: offset_proc_xi,offset_proc_eta
-
-  ! local parameters
-  integer :: ispec,nspec_tiso
-  ! parameters needed to store the radii of the grid points in the spherically symmetric Earth
-  double precision :: rmin,rmax
-  integer :: ner_without_doubling,ilayer,ilayer_loop
-  ! timing
-  double precision, external :: wtime
-  double precision :: time_start,tCPU
-  integer,dimension(8) :: tval
-
-  ! initializes flags for transverse isotropic elements
-  ispec_is_tiso(:) = .false.
-
-  ! get MPI starting time
-  time_start = wtime()
-
-  ! loop on all the layers in this region of the mesh
-  ispec = 0 ! counts all the elements in this region of the mesh
-  do ilayer_loop = ifirst_region,ilast_region
-
-    ilayer = perm_layer(ilayer_loop)
-
-    ! user output
-    if(myrank == 0 ) then
-      write(IMAIN,*) '  creating layer ',ilayer_loop-ifirst_region+1, &
-                                   'out of ',ilast_region-ifirst_region+1
-    endif
-
-    ! determine the radii that define the shell
-    rmin = rmins(ilayer)
-    rmax = rmaxs(ilayer)
-
-    if(iregion_code == IREGION_CRUST_MANTLE .and. ilayer_loop==3) then
-      FIRST_ELT_NON_ANISO = ispec+1
-    endif
-    if(iregion_code == IREGION_CRUST_MANTLE &
-      .and. ilayer_loop==(ilast_region-nb_layer_above_aniso+1)) then
-      FIRST_ELT_ABOVE_ANISO = ispec+1
-    endif
-
-    ner_without_doubling = ner(ilayer)
-
-    ! if there is a doubling at the top of this region, we implement it in the last two layers of elements
-    ! and therefore we suppress two layers of regular elements here
-    USE_ONE_LAYER_SB = .false.
-    if(this_region_has_a_doubling(ilayer)) then
-      if (ner(ilayer) == 1) then
-        ner_without_doubling = ner_without_doubling - 1
-        USE_ONE_LAYER_SB = .true.
-      else
-        ner_without_doubling = ner_without_doubling - 2
-        USE_ONE_LAYER_SB = .false.
-      endif
-    endif
-
-    ! regular mesh elements
-    call create_regular_elements(myrank,ilayer,ichunk,ispec,ipass, &
-                    ifirst_region,ilast_region,iregion_code, &
-                    nspec,NCHUNKS,NUMBER_OF_MESH_LAYERS, &
-                    NPROC_XI,NPROC_ETA,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
-                    ner_without_doubling,ner,ratio_sampling_array,r_top,r_bottom, &
-                    xstore,ystore,zstore, &
-                    iaddx,iaddy,iaddz,xigll,yigll,zigll, &
-                    shape3D,dershape2D_bottom, &
-                    INCLUDE_CENTRAL_CUBE, &
-                    rmin,rmax,r_moho,r_400,r_670, &
-                    rhostore,dvpstore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
-                    nspec_ani,c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
-                    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
-                    c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
-                    nspec_actually,xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,&
-                    gammaxstore,gammaystore,gammazstore,&
-                    nspec_stacey,rho_vp,rho_vs,iboun,iMPIcut_xi,iMPIcut_eta, &
-                    ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD,iproc_xi,iproc_eta, &
-                    nspec_att,Qmu_store,tau_e_store,tau_s,T_c_source, &
-                    size(tau_e_store,2),size(tau_e_store,3),size(tau_e_store,4), &
-                    rotation_matrix,idoubling,doubling_index,USE_ONE_LAYER_SB, &
-                    stretch_tab, &
-                    NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho, &
-                    ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot,ibelm_670_top,ibelm_670_bot, &
-                    normal_moho,normal_400,normal_670,jacobian2D_moho,jacobian2D_400,jacobian2D_670, &
-                    ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,&
-                    ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot,&
-                    ispec_is_tiso)
-
-
-    ! mesh doubling elements
-    if( this_region_has_a_doubling(ilayer) ) &
-      call create_doubling_elements(myrank,ilayer,ichunk,ispec,ipass, &
-                    ifirst_region,ilast_region,iregion_code, &
-                    nspec,NCHUNKS,NUMBER_OF_MESH_LAYERS, &
-                    NPROC_XI,NPROC_ETA,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
-                    ner,ratio_sampling_array,r_top,r_bottom, &
-                    xstore,ystore,zstore,xigll,yigll,zigll, &
-                    shape3D,dershape2D_bottom, &
-                    INCLUDE_CENTRAL_CUBE, &
-                    rmin,rmax,r_moho,r_400,r_670, &
-                    rhostore,dvpstore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
-                    nspec_ani,c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
-                    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
-                    c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
-                    nspec_actually,xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,&
-                    gammaxstore,gammaystore,gammazstore,&
-                    nspec_stacey,rho_vp,rho_vs,iboun,iMPIcut_xi,iMPIcut_eta, &
-                    ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD,iproc_xi,iproc_eta, &
-                    nspec_att,Qmu_store,tau_e_store,tau_s,T_c_source, &
-                    size(tau_e_store,2),size(tau_e_store,3),size(tau_e_store,4), &
-                    rotation_matrix,idoubling,doubling_index,USE_ONE_LAYER_SB, &
-                    NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho, &
-                    ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot,ibelm_670_top,ibelm_670_bot, &
-                    normal_moho,normal_400,normal_670,jacobian2D_moho,jacobian2D_400,jacobian2D_670, &
-                    ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,&
-                    ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot, &
-                    CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA,offset_proc_xi,offset_proc_eta, &
-                    ispec_is_tiso)
-
-    ! user output
-    if(myrank == 0 ) then
-      ! time estimate
-      tCPU = wtime() - time_start
-
-      ! outputs current time on system
-      call date_and_time(VALUES=tval)
-
-      ! debug: outputs remaining time (poor estimation)
-      !tCPU = (1.0-(ilayer_loop-ifirst_region+1.0)/(ilast_region-ifirst_region+1.0)) &
-      !          /(ilayer_loop-ifirst_region+1.0)/(ilast_region-ifirst_region+1.0)*tCPU*10.0
-
-      ! user output
-      write(IMAIN,'(a,f5.1,a,a,i2.2,a,i2.2,a,i2.2,a)') &
-        "    ",(ilayer_loop-ifirst_region+1.0)/(ilast_region-ifirst_region+1.0) * 100.0,"%", &
-        "    time: ",tval(5),"h ",tval(6),"min ",tval(7),"sec"
-
-      ! flushes I/O buffer
-      call flush_IMAIN()
-    endif
-
-  enddo !ilayer_loop
-
-  deallocate(stretch_tab)
-  deallocate(perm_layer)
-  deallocate(jacobian2D_moho,jacobian2D_400,jacobian2D_670)
-
-  if(myrank == 0 ) write(IMAIN,*)
-
-  ! define central cube in inner core
-  if(INCLUDE_CENTRAL_CUBE .and. iregion_code == IREGION_INNER_CORE) then
-    ! user output
-    if(myrank == 0 ) write(IMAIN,*) '  creating central cube'
-
-    call create_central_cube(myrank,ichunk,ispec,iaddx,iaddy,iaddz,ipass, &
-                        nspec,NEX_XI,NEX_PER_PROC_XI,NEX_PER_PROC_ETA,R_CENTRAL_CUBE, &
-                        iproc_xi,iproc_eta,NPROC_XI,NPROC_ETA,ratio_divide_central_cube, &
-                        iMPIcut_xi,iMPIcut_eta,iboun, &
-                        idoubling,iregion_code,xstore,ystore,zstore, &
-                        shape3D,rmin,rmax,rhostore,dvpstore,&
-                        kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
-                        xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,&
-                        gammaxstore,gammaystore,gammazstore,nspec_actually, &
-                        c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
-                        c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
-                        c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
-                        nspec_ani,nspec_stacey,nspec_att,Qmu_store,tau_e_store,tau_s,T_c_source,&
-                        size(tau_e_store,2),size(tau_e_store,3),size(tau_e_store,4), &
-                        rho_vp,rho_vs,xigll,yigll,zigll, &
-                        ispec_is_tiso)
-  endif
-
-  ! check total number of spectral elements created
-  if(ispec /= nspec) call exit_MPI(myrank,'ispec should equal nspec')
-
-  ! if any of these flags is true, the element is on a communication edge
-  ! this is not enough because it can also be in contact by an edge or a corner but not a full face
-  ! therefore we will have to fix array "is_on_a_slice_edge" later in the solver to take this into account
-  is_on_a_slice_edge(:) = &
-      iMPIcut_xi(1,:) .or. iMPIcut_xi(2,:) .or. &
-      iMPIcut_eta(1,:) .or. iMPIcut_eta(2,:) .or. &
-      iboun(1,:) .or. iboun(2,:) .or. &
-      iboun(3,:) .or. iboun(4,:) .or. &
-      iboun(5,:) .or. iboun(6,:)
-
-  ! no need to count fictitious elements on the edges
-  ! for which communications cannot be overlapped with calculations
-  where(idoubling == IFLAG_IN_FICTITIOUS_CUBE) is_on_a_slice_edge = .false.
-
-  ! checks transverse isotropic elements
-  if( ipass == 2 ) then
-    ! count number of anisotropic elements in current region
-    ! should be zero in all the regions except in the mantle
-    nspec_tiso = count(ispec_is_tiso(:))
-
-    ! checks number of anisotropic elements found in the mantle
-    if(iregion_code /= IREGION_CRUST_MANTLE .and. nspec_tiso /= 0 ) &
-      call exit_MPI(myrank,'found anisotropic elements outside of the mantle')
-    if( TRANSVERSE_ISOTROPY ) then
-      if(iregion_code == IREGION_CRUST_MANTLE .and. nspec_tiso == 0) &
-        call exit_MPI(myrank,'found no anisotropic elements in the mantle')
-    endif
-  endif
-
-  end subroutine crm_create_elements
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine crm_setup_indexing(nspec,nglob_theor,npointot)
-
-! creates global indexing array ibool
-
-  use constants,only: NGLLX,NGLLY,NGLLZ,ZERO
-
-  use meshfem3d_par,only: &
-    ibool,xstore,ystore,zstore,  &
-    myrank
-
-  use create_regions_mesh_par2
-
-  implicit none
-
-  ! number of spectral elements in each block
-  integer,intent(in) :: nspec,npointot,nglob_theor
-
-  ! local parameters
-  ! variables for creating array ibool
-  double precision, dimension(:), allocatable :: xp,yp,zp
-  integer, dimension(:), allocatable :: locval
-  logical, dimension(:), allocatable :: ifseg
-
-  integer :: nglob
-  integer :: ieoff,ilocnum,ier
-  integer :: i,j,k,ispec
-  character(len=150) :: errmsg
-
-  ! allocate memory for arrays
-  allocate(locval(npointot), &
-          ifseg(npointot), &
-          xp(npointot), &
-          yp(npointot), &
-          zp(npointot),stat=ier)
-  if(ier /= 0) stop 'error in allocate 20'
-
-  locval(:) = 0
-  ifseg(:) = .false.
-  xp(:) = ZERO
-  yp(:) = ZERO
-  zp(:) = ZERO
-
-  ! we need to create a copy of the x, y and z arrays because sorting in get_global will swap
-  ! these arrays and therefore destroy them
-  do ispec=1,nspec
-    ieoff = NGLLX * NGLLY * NGLLZ * (ispec-1)
-    ilocnum = 0
-    do k=1,NGLLZ
-      do j=1,NGLLY
-        do i=1,NGLLX
-          ilocnum = ilocnum + 1
-          xp(ilocnum+ieoff) = xstore(i,j,k,ispec)
-          yp(ilocnum+ieoff) = ystore(i,j,k,ispec)
-          zp(ilocnum+ieoff) = zstore(i,j,k,ispec)
-        enddo
-      enddo
-    enddo
-  enddo
-
-  call get_global(nspec,xp,yp,zp,ibool,locval,ifseg,nglob,npointot)
-
-  deallocate(xp,yp,zp)
-  deallocate(locval,ifseg)
-
-  ! check that number of points found equals theoretical value
-  if(nglob /= nglob_theor) then
-    write(errmsg,*) 'incorrect total number of points found: myrank,nglob,nglob_theor = ',&
-      myrank,nglob,nglob_theor
-    call exit_MPI(myrank,errmsg)
-  endif
-  if(minval(ibool) /= 1 .or. maxval(ibool) /= nglob_theor) &
-    call exit_MPI(myrank,'incorrect global numbering')
-
-  ! creates a new indirect addressing to reduce cache misses in memory access in the solver
-  ! this is *critical* to improve performance in the solver
-  call get_global_indirect_addressing(nspec,nglob_theor,ibool)
-
-  ! checks again
-  if(minval(ibool) /= 1 .or. maxval(ibool) /= nglob_theor) &
-    call exit_MPI(myrank,'incorrect global numbering after sorting')
-
-  end subroutine crm_setup_indexing
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine crm_setup_mpi_buffers(npointot,nspec,iregion_code)
-
-! sets up MPI cutplane arrays
-
-  use meshfem3d_par,only: &
-    ibool,idoubling, &
-    xstore,ystore,zstore, &
-    myrank,NGLLX,NGLLY,NGLLZ, &
-    NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER, &
-    NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE, &
-    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
-
-  use create_MPI_interfaces_par
-  use create_regions_mesh_par2
-
-  implicit none
-
-  ! number of spectral elements in each block
-  integer,intent(in) :: nspec,npointot
-
-  integer,intent(in) :: iregion_code
-
-  ! local parameters
-  logical, dimension(:), allocatable :: mask_ibool
-  integer :: ier
-
-  ! arrays mask_ibool(npointot) used to save memory
-  ! allocate memory for arrays
-  allocate(mask_ibool(npointot), &
-          stat=ier)
-  if(ier /= 0) stop 'error in allocate 20b'
-
-  ! initializes
-  npoin2D_xi_all(:) = 0
-  npoin2D_eta_all(:) = 0
-  iboolleft_xi(:) = 0
-  iboolleft_eta(:) = 0
-  iboolright_xi(:) = 0
-  iboolright_eta(:) = 0
-
-  ! gets MPI buffer indices
-  call get_MPI_cutplanes_xi(myrank,prname,nspec,iMPIcut_xi,ibool, &
-                  xstore,ystore,zstore,mask_ibool,npointot, &
-                  NSPEC2D_ETA_FACE,iregion_code,npoin2D_xi, &
-                  iboolleft_xi,iboolright_xi, &
-                  npoin2D_xi_all,NGLOB2DMAX_XMIN_XMAX(iregion_code))
-
-  call get_MPI_cutplanes_eta(myrank,prname,nspec,iMPIcut_eta,ibool, &
-                  xstore,ystore,zstore,mask_ibool,npointot, &
-                  NSPEC2D_XI_FACE,iregion_code,npoin2D_eta, &
-                  iboolleft_eta,iboolright_eta, &
-                  npoin2D_eta_all,NGLOB2DMAX_YMIN_YMAX(iregion_code))
-
-  call get_MPI_1D_buffers(myrank,prname,nspec,iMPIcut_xi,iMPIcut_eta, &
-                  ibool,idoubling, &
-                  xstore,ystore,zstore,mask_ibool,npointot, &
-                  NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER,iregion_code, &
-                  ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
-                  ibool1D_leftxi_righteta,ibool1D_rightxi_righteta, &
-                  xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
-                  xyz1D_leftxi_righteta,xyz1D_rightxi_righteta, &
-                  NGLOB1D_RADIAL_MAX)
-
-  deallocate(mask_ibool)
-
-  end subroutine crm_setup_mpi_buffers
-
-
-!
-!-------------------------------------------------------------------------------
-!
-
-subroutine crm_save_mesh_files(nspec,npointot,iregion_code)
-
-  use meshfem3d_par,only: &
-    ibool,idoubling, &
-    xstore,ystore,zstore, &
-    myrank,NGLLX,NGLLY,NGLLZ, &
-    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
-    RMIDDLE_CRUST,ROCEAN, &
-    ADIOS_FOR_AVS_DX
-
-
-  use meshfem3D_models_par,only: &
-    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
-    nspl,rspl,espl,espl2
-
-  use create_regions_mesh_par2
-
-  ! Modules for temporary AVS/DX data
-  use AVS_DX_global_mod
-
-  implicit none
-
-  ! number of spectral elements in each block
-  integer,intent(in) :: nspec,npointot,iregion_code
-
-  ! local parameters
-  ! arrays used for AVS or DX files
-  integer, dimension(:), allocatable :: num_ibool_AVS_DX
-  logical, dimension(:), allocatable :: mask_ibool
-  ! structures used for ADIOS AVS/DX files
-  type(avs_dx_global_t) :: avs_dx_global_vars
-
-  character(len=150) :: reg_name, outputname, group_name
-  integer :: comm, sizeprocs, ier
-  integer(kind=8) :: adios_group, group_size_inc, adios_totalsize, adios_handle
-
-  ! arrays num_ibool_AVS_DX and mask_ibool used to save memory
-  ! allocate memory for arrays
-  allocate(num_ibool_AVS_DX(npointot), &
-          mask_ibool(npointot), &
-          stat=ier)
-  if(ier /= 0) stop 'error in allocate 21'
-
-  if (ADIOS_FOR_AVS_DX) then 
-    call crm_save_mesh_files_adios(nspec,npointot,iregion_code, &
-        num_ibool_AVS_DX, mask_ibool)
-  else
-    call write_AVS_DX_global_data(myrank,prname,nspec,ibool,idoubling, &
-        xstore,ystore,zstore, num_ibool_AVS_DX,mask_ibool,npointot)
-
-    call write_AVS_DX_global_faces_data(myrank,prname,nspec,iMPIcut_xi, &
-        iMPIcut_eta,ibool, idoubling,xstore,ystore,zstore,num_ibool_AVS_DX, &
-        mask_ibool,npointot, rhostore,kappavstore,muvstore,nspl,rspl, &
-        espl,espl2, ELLIPTICITY,ISOTROPIC_3D_MANTLE, RICB,RCMB, &
-        RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
-        RMIDDLE_CRUST,ROCEAN,iregion_code)
-
-  call write_AVS_DX_global_chunks_data(myrank,prname,nspec,iboun,ibool, &
-          idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
-          npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
-          ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
-          RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
-          RMIDDLE_CRUST,ROCEAN,iregion_code)
-
-  call write_AVS_DX_surface_data(myrank,prname,nspec,iboun,ibool, &
-          idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,npointot, &
-          rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
-          ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
-          RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
-          RMIDDLE_CRUST,ROCEAN,iregion_code)
-  endif
-
-  ! Output material information for all GLL points
-  ! Can be use to check the mesh
-  !    call write_AVS_DX_global_data_gll(prname,nspec,xstore,ystore,zstore,&
-  !                rhostore,kappavstore,muvstore,Qmu_store,ATTENUATION)
-  deallocate(num_ibool_AVS_DX,mask_ibool)
-
-end subroutine crm_save_mesh_files
-
-!
-!-------------------------------------------------------------------------------------------------
-!
-
-  subroutine crm_free_MPI_arrays(iregion_code)
-
-  use create_MPI_interfaces_par
-
-  use MPI_crust_mantle_par
-  use MPI_outer_core_par
-  use MPI_inner_core_par
-
-  implicit none
-
-  integer,intent(in):: iregion_code
-
-  ! free memory
-  select case( iregion_code )
-  case( IREGION_CRUST_MANTLE )
-    ! crust mantle
-    deallocate(phase_ispec_inner_crust_mantle)
-    deallocate(num_elem_colors_crust_mantle)
-  case( IREGION_OUTER_CORE )
-    ! outer core
-    deallocate(phase_ispec_inner_outer_core)
-    deallocate(num_elem_colors_outer_core)
-  case( IREGION_INNER_CORE )
-    ! inner core
-    deallocate(phase_ispec_inner_inner_core)
-    deallocate(num_elem_colors_inner_core)
-  end select
-
-  end subroutine crm_free_MPI_arrays
-

Added: seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/rules.mk
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/rules.mk	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/branches/SPECFEM3D_GLOBE_SUNFLOWER/src/meshfem3D/rules.mk	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,344 @@
+#=====================================================================
+#
+#          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+#          --------------------------------------------------
+#
+#          Main authors: Dimitri Komatitsch and Jeroen Tromp
+#                        Princeton University, USA
+#             and University of Pau / CNRS / INRIA, France
+# (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+#                            April 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#=====================================================================
+
+#######################################
+
+meshfem3D_TARGETS = \
+	$E/xmeshfem3D \
+	$(EMPTY_MACRO)
+
+meshfem3D_OBJECTS = \
+	$O/meshfem3D.o \
+	$O/meshfem3D_models.o \
+	$O/add_missing_nodes.o \
+	$O/add_topography.o \
+	$O/add_topography_410_650.o \
+	$O/add_topography_cmb.o \
+	$O/add_topography_icb.o \
+	$O/calc_jacobian.o \
+	$O/compute_coordinates_grid.o \
+	$O/compute_element_properties.o \
+	$O/create_central_cube.o \
+	$O/create_chunk_buffers.o \
+	$O/create_doubling_elements.o \
+	$O/create_mass_matrices.o \
+	$O/create_regions_mesh.o \
+	$O/get_perm_color.o \
+	$O/create_regular_elements.o \
+	$O/define_superbrick.o \
+	$O/get_absorb.o \
+	$O/get_ellipticity.o \
+	$O/get_global.o \
+	$O/get_jacobian_boundaries.o \
+	$O/get_jacobian_discontinuities.o \
+	$O/get_model.o \
+	$O/get_MPI_1D_buffers.o \
+	$O/get_MPI_cutplanes_eta.o \
+	$O/get_MPI_cutplanes_xi.o \
+	$O/get_shape2D.o \
+	$O/get_shape3D.o \
+	$O/lgndr.o \
+	$O/model_sea99_s.o \
+	$O/model_1066a.o \
+	$O/model_ak135.o \
+	$O/model_aniso_inner_core.o \
+	$O/model_aniso_mantle.o \
+	$O/model_atten3D_QRFSI12.o \
+	$O/model_attenuation.o \
+	$O/model_crust.o \
+	$O/model_eucrust.o \
+	$O/model_epcrust.o \
+	$O/model_crustmaps.o \
+	$O/model_gll.o \
+	$O/model_heterogen_mantle.o \
+	$O/model_iasp91.o \
+	$O/model_jp1d.o \
+	$O/model_jp3d.o \
+	$O/model_ppm.o \
+	$O/model_gapp2.o \
+	$O/model_1dref.o \
+	$O/model_s20rts.o \
+	$O/model_s40rts.o \
+	$O/model_s362ani.o \
+	$O/model_sea1d.o \
+	$O/moho_stretching.o \
+	$O/save_arrays_solver.o \
+	$O/sort_array_coordinates.o \
+	$O/stretching_function.o \
+	$O/write_AVS_DX_global_chunks_data.o \
+	$O/write_AVS_DX_global_data.o \
+	$O/write_AVS_DX_global_faces_data.o \
+	$O/write_AVS_DX_surface_data.o \
+	$(EMPTY_MACRO)
+
+meshfem3D_MODULES = \
+	$(FC_MODDIR)/gapp2_mantle_model_constants.$(FC_MODEXT) \
+	$(FC_MODDIR)/meshfem3d_models_par.$(FC_MODEXT) \
+	$(FC_MODDIR)/module_ppm.$(FC_MODEXT) \
+	$(EMPTY_MACRO)
+
+# These files come from the shared directory
+meshfem3D_SHARED_OBJECTS = \
+	$O/auto_ner.o \
+	$O/broadcast_compute_parameters.o \
+	$O/calendar.o \
+	$O/count_number_of_sources.o \
+	$O/create_name_database.o \
+	$O/euler_angles.o \
+	$O/exit_mpi.o \
+	$O/force_ftz.o \
+	$O/get_model_parameters.o \
+	$O/get_value_parameters.o \
+	$O/gll_library.o \
+	$O/hex_nodes.o \
+	$O/intgrl.o \
+	$O/lagrange_poly.o \
+	$O/make_ellipticity.o \
+	$O/make_gravity.o \
+	$O/memory_eval.o \
+	$O/model_prem.o \
+	$O/model_topo_bathy.o \
+	$O/param_reader.o \
+	$O/read_compute_parameters.o \
+	$O/read_parameter_file.o \
+	$O/read_value_parameters.o \
+	$O/reduce.o \
+	$O/rthetaphi_xyz.o \
+	$O/save_header_file.o \
+	$O/spline_routines.o \
+	$(EMPTY_MACRO)
+
+#######################################
+
+####
+#### rules for executables
+####
+
+${E}/xmeshfem3D: $(meshfem3D_OBJECTS) $(meshfem3D_SHARED_OBJECTS)
+## use MPI here
+	${MPIFCCOMPILE_CHECK} -o ${E}/xmeshfem3D $(meshfem3D_OBJECTS) $(meshfem3D_SHARED_OBJECTS) $(MPILIBS)
+
+#######################################
+
+## compilation directories
+S := ${S_TOP}/src/meshfem3D
+$(meshfem3D_OBJECTS): S = ${S_TOP}/src/meshfem3D
+
+####
+#### rule for each .o file below
+####
+
+###
+### meshfem3D objects
+###
+
+$O/add_missing_nodes.o: ${SETUP}/constants.h $S/add_missing_nodes.f90
+	${FCCOMPILE_CHECK} -c -o $O/add_missing_nodes.o ${FCFLAGS_f90} $S/add_missing_nodes.f90
+
+$O/add_topography.o: ${SETUP}/constants.h $S/add_topography.f90
+	${FCCOMPILE_CHECK} -c -o $O/add_topography.o ${FCFLAGS_f90} $S/add_topography.f90
+
+$O/add_topography_410_650.o: ${SETUP}/constants.h $S/add_topography_410_650.f90
+	${FCCOMPILE_CHECK} -c -o $O/add_topography_410_650.o ${FCFLAGS_f90} $S/add_topography_410_650.f90
+
+$O/add_topography_cmb.o: ${SETUP}/constants.h $S/add_topography_cmb.f90
+	${FCCOMPILE_CHECK} -c -o $O/add_topography_cmb.o ${FCFLAGS_f90} $S/add_topography_cmb.f90
+
+$O/add_topography_icb.o: ${SETUP}/constants.h $S/add_topography_icb.f90
+	${FCCOMPILE_CHECK} -c -o $O/add_topography_icb.o ${FCFLAGS_f90} $S/add_topography_icb.f90
+
+$O/calc_jacobian.o: ${SETUP}/constants.h $S/calc_jacobian.f90
+	${FCCOMPILE_CHECK} -c -o $O/calc_jacobian.o ${FCFLAGS_f90} $S/calc_jacobian.f90
+
+$O/compute_coordinates_grid.o: ${SETUP}/constants.h $S/compute_coordinates_grid.f90
+	${FCCOMPILE_CHECK} -c -o $O/compute_coordinates_grid.o ${FCFLAGS_f90} $S/compute_coordinates_grid.f90
+
+$O/compute_element_properties.o: ${SETUP}/constants.h $S/compute_element_properties.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/compute_element_properties.o ${FCFLAGS_f90} $S/compute_element_properties.f90
+
+$O/create_central_cube.o: ${SETUP}/constants.h $S/create_central_cube.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/create_central_cube.o ${FCFLAGS_f90} $S/create_central_cube.f90
+
+$O/create_doubling_elements.o: ${SETUP}/constants.h $S/create_doubling_elements.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/create_doubling_elements.o ${FCFLAGS_f90} $S/create_doubling_elements.f90
+
+$O/create_mass_matrices.o: ${SETUP}/constants.h $S/create_mass_matrices.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/create_mass_matrices.o ${FCFLAGS_f90} $S/create_mass_matrices.f90
+
+$O/create_regions_mesh.o: ${SETUP}/constants.h $S/create_regions_mesh.F90 $O/meshfem3D_models.o
+	${MPIFCCOMPILE_CHECK} -c -o $O/create_regions_mesh.o ${FCFLAGS_f90} $S/create_regions_mesh.F90
+
+$O/get_perm_color.o: ${SETUP}/constants.h $S/get_perm_color.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_perm_color.o ${FCFLAGS_f90} $S/get_perm_color.f90
+
+$O/create_regular_elements.o: ${SETUP}/constants.h $S/create_regular_elements.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/create_regular_elements.o ${FCFLAGS_f90} $S/create_regular_elements.f90
+
+$O/define_superbrick.o: ${SETUP}/constants.h $S/define_superbrick.f90
+	${FCCOMPILE_CHECK} -c -o $O/define_superbrick.o ${FCFLAGS_f90} $S/define_superbrick.f90
+
+$O/get_absorb.o: ${SETUP}/constants.h $S/get_absorb.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_absorb.o ${FCFLAGS_f90} $S/get_absorb.f90
+
+$O/get_ellipticity.o: ${SETUP}/constants.h $S/get_ellipticity.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_ellipticity.o ${FCFLAGS_f90} $S/get_ellipticity.f90
+
+$O/get_global.o: ${SETUP}/constants.h $S/get_global.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_global.o ${FCFLAGS_f90} $S/get_global.f90
+
+$O/get_jacobian_boundaries.o: ${SETUP}/constants.h $S/get_jacobian_boundaries.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_jacobian_boundaries.o ${FCFLAGS_f90} $S/get_jacobian_boundaries.f90
+
+$O/get_jacobian_discontinuities.o: ${SETUP}/constants.h $S/get_jacobian_discontinuities.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_jacobian_discontinuities.o ${FCFLAGS_f90} $S/get_jacobian_discontinuities.f90
+
+$O/get_model.o: ${SETUP}/constants.h $S/get_model.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/get_model.o ${FCFLAGS_f90} $S/get_model.f90
+
+$O/get_MPI_1D_buffers.o: ${SETUP}/constants.h $S/get_MPI_1D_buffers.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_MPI_1D_buffers.o ${FCFLAGS_f90} $S/get_MPI_1D_buffers.f90
+
+$O/get_MPI_cutplanes_xi.o: ${SETUP}/constants.h $S/get_MPI_cutplanes_xi.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_MPI_cutplanes_xi.o ${FCFLAGS_f90} $S/get_MPI_cutplanes_xi.f90
+
+$O/get_MPI_cutplanes_eta.o: ${SETUP}/constants.h $S/get_MPI_cutplanes_eta.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_MPI_cutplanes_eta.o ${FCFLAGS_f90} $S/get_MPI_cutplanes_eta.f90
+
+$O/get_shape2D.o: ${SETUP}/constants.h $S/get_shape2D.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_shape2D.o ${FCFLAGS_f90} $S/get_shape2D.f90
+
+$O/get_shape3D.o: ${SETUP}/constants.h $S/get_shape3D.f90
+	${FCCOMPILE_CHECK} -c -o $O/get_shape3D.o ${FCFLAGS_f90} $S/get_shape3D.f90
+
+$O/lgndr.o: ${SETUP}/constants.h $S/lgndr.f90
+	${FCCOMPILE_CHECK} -c -o $O/lgndr.o ${FCFLAGS_f90} $S/lgndr.f90
+
+$O/model_iasp91.o: ${SETUP}/constants.h $S/model_iasp91.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_iasp91.o ${FCFLAGS_f90} $S/model_iasp91.f90
+
+$O/model_1066a.o: ${SETUP}/constants.h $S/model_1066a.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_1066a.o ${FCFLAGS_f90} $S/model_1066a.f90
+
+$O/model_ak135.o: ${SETUP}/constants.h $S/model_ak135.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_ak135.o ${FCFLAGS_f90} $S/model_ak135.f90
+
+$O/model_aniso_inner_core.o: ${SETUP}/constants.h $S/model_aniso_inner_core.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_aniso_inner_core.o ${FCFLAGS_f90} $S/model_aniso_inner_core.f90
+
+$O/model_1dref.o: ${SETUP}/constants.h $S/model_1dref.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_1dref.o ${FCFLAGS_f90} $S/model_1dref.f90
+
+$O/model_jp1d.o: ${SETUP}/constants.h $S/model_jp1d.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_jp1d.o ${FCFLAGS_f90} $S/model_jp1d.f90
+
+$O/model_sea1d.o: ${SETUP}/constants.h $S/model_sea1d.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_sea1d.o ${FCFLAGS_f90} $S/model_sea1d.f90
+
+$O/moho_stretching.o: ${SETUP}/constants.h $S/moho_stretching.f90 $O/meshfem3D_models.o
+	${FCCOMPILE_CHECK} -c -o $O/moho_stretching.o ${FCFLAGS_f90} $S/moho_stretching.f90
+
+$O/save_arrays_solver.o: ${SETUP}/constants.h $S/save_arrays_solver.f90
+	${FCCOMPILE_CHECK} -c -o $O/save_arrays_solver.o ${FCFLAGS_f90} $S/save_arrays_solver.f90
+
+$O/sort_array_coordinates.o: ${SETUP}/constants.h $S/sort_array_coordinates.f90
+	${FCCOMPILE_CHECK} -c -o $O/sort_array_coordinates.o ${FCFLAGS_f90} $S/sort_array_coordinates.f90
+
+$O/stretching_function.o: ${SETUP}/constants.h $S/stretching_function.f90
+	${FCCOMPILE_CHECK} -c -o $O/stretching_function.o ${FCFLAGS_f90} $S/stretching_function.f90
+
+$O/write_AVS_DX_global_faces_data.o: ${SETUP}/constants.h $S/write_AVS_DX_global_faces_data.f90
+	${FCCOMPILE_CHECK} -c -o $O/write_AVS_DX_global_faces_data.o ${FCFLAGS_f90} $S/write_AVS_DX_global_faces_data.f90
+
+$O/write_AVS_DX_global_chunks_data.o: ${SETUP}/constants.h $S/write_AVS_DX_global_chunks_data.f90
+	${FCCOMPILE_CHECK} -c -o $O/write_AVS_DX_global_chunks_data.o ${FCFLAGS_f90} $S/write_AVS_DX_global_chunks_data.f90
+
+$O/write_AVS_DX_surface_data.o: ${SETUP}/constants.h $S/write_AVS_DX_surface_data.f90
+	${FCCOMPILE_CHECK} -c -o $O/write_AVS_DX_surface_data.o ${FCFLAGS_f90} $S/write_AVS_DX_surface_data.f90
+
+$O/write_AVS_DX_global_data.o: ${SETUP}/constants.h $S/write_AVS_DX_global_data.f90
+	${FCCOMPILE_CHECK} -c -o $O/write_AVS_DX_global_data.o ${FCFLAGS_f90} $S/write_AVS_DX_global_data.f90
+
+##
+## meshfem3D objects with mpi compilation
+##
+$O/create_chunk_buffers.o: ${SETUP}/constants.h $S/create_chunk_buffers.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/create_chunk_buffers.o ${FCFLAGS_f90} $S/create_chunk_buffers.f90
+
+$O/meshfem3D.o: ${SETUP}/constants.h $S/meshfem3D.f90 $O/meshfem3D_models.o
+	${MPIFCCOMPILE_CHECK} -c -o $O/meshfem3D.o ${FCFLAGS_f90} $S/meshfem3D.f90
+
+$O/meshfem3D_models.o: ${SETUP}/constants.h $S/meshfem3D_models.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/meshfem3D_models.o ${FCFLAGS_f90} $S/meshfem3D_models.f90
+
+$O/model_aniso_mantle.o: ${SETUP}/constants.h $S/model_aniso_mantle.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_aniso_mantle.o ${FCFLAGS_f90} $S/model_aniso_mantle.f90
+
+$O/model_atten3D_QRFSI12.o: ${SETUP}/constants.h $S/model_atten3D_QRFSI12.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_atten3D_QRFSI12.o ${FCFLAGS_f90} $S/model_atten3D_QRFSI12.f90
+
+$O/model_attenuation.o: ${SETUP}/constants.h $S/model_attenuation.f90 $O/model_ak135.o $O/model_1066a.o $O/model_1dref.o
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_attenuation.o ${FCFLAGS_f90} $S/model_attenuation.f90
+
+$O/model_crust.o: ${SETUP}/constants.h $S/model_crust.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_crust.o ${FCFLAGS_f90} $S/model_crust.f90
+
+$O/model_eucrust.o: ${SETUP}/constants.h $S/model_eucrust.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_eucrust.o ${FCFLAGS_f90} $S/model_eucrust.f90
+
+$O/model_epcrust.o: ${SETUP}/constants.h $S/model_epcrust.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_epcrust.o ${FCFLAGS_f90} $S/model_epcrust.f90
+
+$O/model_crustmaps.o: ${SETUP}/constants.h $S/model_crustmaps.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_crustmaps.o ${FCFLAGS_f90} $S/model_crustmaps.f90
+
+$O/model_gll.o: ${SETUP}/constants.h $S/model_gll.f90 $O/meshfem3D_models.o
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_gll.o ${FCFLAGS_f90} $S/model_gll.f90
+
+$O/model_heterogen_mantle.o: ${SETUP}/constants.h $S/model_heterogen_mantle.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_heterogen_mantle.o ${FCFLAGS_f90} $S/model_heterogen_mantle.f90
+
+$O/model_jp3d.o: ${SETUP}/constants.h $S/model_jp3d.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_jp3d.o ${FCFLAGS_f90} $S/model_jp3d.f90
+
+$O/model_ppm.o: ${SETUP}/constants.h $S/model_ppm.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_ppm.o ${FCFLAGS_f90} $S/model_ppm.f90
+
+$O/model_gapp2.o: ${SETUP}/constants.h $S/model_gapp2.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_gapp2.o ${FCFLAGS_f90} $S/model_gapp2.f90
+
+$O/model_s20rts.o: ${SETUP}/constants.h $S/model_s20rts.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_s20rts.o ${FCFLAGS_f90} $S/model_s20rts.f90
+
+$O/model_s40rts.o: ${SETUP}/constants.h $S/model_s40rts.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_s40rts.o ${FCFLAGS_f90} $S/model_s40rts.f90
+
+$O/model_s362ani.o: ${SETUP}/constants.h $S/model_s362ani.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_s362ani.o ${FCFLAGS_f90} $S/model_s362ani.f90
+
+$O/model_sea99_s.o: ${SETUP}/constants.h $S/model_sea99_s.f90
+	${MPIFCCOMPILE_CHECK} -c -o $O/model_sea99_s.o ${FCFLAGS_f90} $S/model_sea99_s.f90
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_central_cube_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_central_cube_mesh.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_central_cube_mesh.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,330 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+  subroutine assemble_MPI_central_cube_block(ichunk,nb_msgs_theor_in_cube, sender_from_slices_to_cube, &
+                                          npoin2D_cube_from_slices, &
+                                          buffer_all_cube_from_slices, buffer_slices, buffer_slices2, &
+                                          ibool_central_cube, &
+                                          receiver_cube_from_slices, ibool_inner_core, &
+                                          idoubling_inner_core, NSPEC_INNER_CORE, &
+                                          ibelm_bottom_inner_core, NSPEC2D_BOTTOM_INNER_CORE,NGLOB_INNER_CORE, &
+                                          vector_assemble,ndim_assemble, &
+                                          iproc_eta,addressing,NCHUNKS,NPROC_XI,NPROC_ETA)
+
+  ! this version of the routine is based on blocking MPI calls
+
+  implicit none
+
+  ! standard include of the MPI library
+  include 'mpif.h'
+  include 'constants.h'
+
+  ! for matching with central cube in inner core
+  integer ichunk, nb_msgs_theor_in_cube, npoin2D_cube_from_slices
+  integer, dimension(nb_msgs_theor_in_cube) :: sender_from_slices_to_cube
+  double precision, dimension(npoin2D_cube_from_slices,NDIM) :: &
+    buffer_slices,buffer_slices2
+  double precision, dimension(nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM) :: &
+    buffer_all_cube_from_slices
+  integer, dimension(nb_msgs_theor_in_cube,npoin2D_cube_from_slices):: ibool_central_cube
+  integer receiver_cube_from_slices
+
+  ! local to global mapping
+  integer NSPEC_INNER_CORE,NSPEC2D_BOTTOM_INNER_CORE, NGLOB_INNER_CORE
+  integer, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE) :: ibool_inner_core
+  integer, dimension(NSPEC_INNER_CORE) :: idoubling_inner_core
+  integer, dimension(NSPEC2D_BOTTOM_INNER_CORE) :: ibelm_bottom_inner_core
+
+  ! vector
+  integer ndim_assemble
+  real(kind=CUSTOM_REAL), dimension(ndim_assemble,NGLOB_INNER_CORE) :: vector_assemble
+
+  !for addressing of the slices
+  integer, intent(in) :: NCHUNKS,NPROC_XI,NPROC_ETA
+  integer, dimension(NCHUNKS,0:NPROC_XI-1,0:NPROC_ETA-1), intent(in) :: addressing
+  integer, intent(in) :: iproc_eta
+
+  integer ipoin,idimension, ispec2D, ispec
+  integer i,j,k
+  integer sender,receiver,imsg
+
+  real(kind=CUSTOM_REAL), dimension(NGLOB_INNER_CORE) :: array_central_cube
+
+  ! MPI status of messages to be received
+  integer msg_status(MPI_STATUS_SIZE), ier
+
+  ! mask
+  logical, dimension(NGLOB_INNER_CORE) :: mask
+
+  !---
+  !---  now use buffers to assemble mass matrix with central cube once and for all
+  !---
+
+  ! on chunks AB and AB_ANTIPODE, receive all the messages from slices
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+
+    do imsg = 1,nb_msgs_theor_in_cube-1
+
+  ! receive buffers from slices
+    sender = sender_from_slices_to_cube(imsg)
+    call MPI_RECV(buffer_slices, &
+                ndim_assemble*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,sender, &
+                itag,MPI_COMM_WORLD,msg_status,ier)
+
+  ! copy buffer in 2D array for each slice
+    buffer_all_cube_from_slices(imsg,:,1:ndim_assemble) = buffer_slices(:,1:ndim_assemble)
+
+    enddo
+  endif
+
+  ! send info to central cube from all the slices except those in CHUNK_AB & CHUNK_AB_ANTIPODE
+  if(ichunk /= CHUNK_AB .and. ichunk /= CHUNK_AB_ANTIPODE) then
+    ! for bottom elements in contact with central cube from the slices side
+    ipoin = 0
+    do ispec2D = 1,NSPEC2D_BOTTOM_INNER_CORE
+
+      ispec = ibelm_bottom_inner_core(ispec2D)
+
+      ! only for DOFs exactly on surface of central cube (bottom of these elements)
+      k = 1
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          ipoin = ipoin + 1
+          buffer_slices(ipoin,1:ndim_assemble) = dble(vector_assemble(1:ndim_assemble,ibool_inner_core(i,j,k,ispec)))
+        enddo
+      enddo
+    enddo
+
+    ! send buffer to central cube
+    receiver = receiver_cube_from_slices
+    call MPI_SEND(buffer_slices,ndim_assemble*npoin2D_cube_from_slices, &
+                 MPI_DOUBLE_PRECISION,receiver,itag,MPI_COMM_WORLD,ier)
+
+    ! in case NPROC_XI == 1, the other chunks exchange all bottom points with
+    ! CHUNK_AB **and** CHUNK_AB_ANTIPODE
+    if(NPROC_XI==1) then
+      call MPI_SEND(buffer_slices,ndim_assemble*npoin2D_cube_from_slices, &
+                   MPI_DOUBLE_PRECISION, &
+                   addressing(CHUNK_AB_ANTIPODE,0,iproc_eta), &
+                   itag,MPI_COMM_WORLD,ier)
+    endif
+
+  endif  ! end sending info to central cube
+
+
+  ! exchange of their bottom faces between chunks AB and AB_ANTIPODE
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+
+    ipoin = 0
+    do ispec = NSPEC_INNER_CORE, 1, -1
+      if (idoubling_inner_core(ispec) == IFLAG_BOTTOM_CENTRAL_CUBE) then
+        k = 1
+        do j = 1,NGLLY
+          do i = 1,NGLLX
+            ipoin = ipoin + 1
+            buffer_slices(ipoin,1:ndim_assemble) = dble(vector_assemble(1:ndim_assemble,ibool_inner_core(i,j,k,ispec)))
+          enddo
+        enddo
+      endif
+    enddo
+
+    sender = sender_from_slices_to_cube(nb_msgs_theor_in_cube)
+
+    call MPI_SENDRECV(buffer_slices,ndim_assemble*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,receiver_cube_from_slices, &
+        itag,buffer_slices2,ndim_assemble*npoin2D_cube_from_slices,&
+        MPI_DOUBLE_PRECISION,sender,itag,MPI_COMM_WORLD,msg_status,ier)
+
+   buffer_all_cube_from_slices(nb_msgs_theor_in_cube,:,1:ndim_assemble) = buffer_slices2(:,1:ndim_assemble)
+
+  endif
+
+  !--- now we need to assemble the contributions
+
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+
+    do idimension = 1,ndim_assemble
+  ! erase contributions to central cube array
+      array_central_cube(:) = 0._CUSTOM_REAL
+
+  ! use indirect addressing to store contributions only once
+  ! distinguish between single and double precision for reals
+      do imsg = 1,nb_msgs_theor_in_cube-1
+        do ipoin = 1,npoin2D_cube_from_slices
+          if(NPROC_XI==1) then
+            if(ibool_central_cube(imsg,ipoin) > 0 ) then
+              if(CUSTOM_REAL == SIZE_REAL) then
+                array_central_cube(ibool_central_cube(imsg,ipoin)) = sngl(buffer_all_cube_from_slices(imsg,ipoin,idimension))
+              else
+                array_central_cube(ibool_central_cube(imsg,ipoin)) = buffer_all_cube_from_slices(imsg,ipoin,idimension)
+              endif
+            endif
+          else
+            if(CUSTOM_REAL == SIZE_REAL) then
+              array_central_cube(ibool_central_cube(imsg,ipoin)) = sngl(buffer_all_cube_from_slices(imsg,ipoin,idimension))
+            else
+              array_central_cube(ibool_central_cube(imsg,ipoin)) = buffer_all_cube_from_slices(imsg,ipoin,idimension)
+            endif
+          endif
+        enddo
+      enddo
+  ! add the constribution of AB or AB_ANTIPODE to sum with the external slices on the edges
+  ! use a mask to avoid taking the same point into account several times.
+      mask(:) = .false.
+      do ipoin = 1,npoin2D_cube_from_slices
+        if(NPROC_XI==1) then
+          if( ibool_central_cube(nb_msgs_theor_in_cube,ipoin) > 0 ) then
+            if (.not. mask(ibool_central_cube(nb_msgs_theor_in_cube,ipoin))) then
+              if(CUSTOM_REAL == SIZE_REAL) then
+                array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) = &
+                  array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) + &
+                  sngl(buffer_all_cube_from_slices(nb_msgs_theor_in_cube,ipoin,idimension))
+              else
+                array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) = &
+                  array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) + &
+                  buffer_all_cube_from_slices(nb_msgs_theor_in_cube,ipoin,idimension)
+              endif
+            endif
+            mask(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) = .true.
+          endif
+        else
+          if (.not. mask(ibool_central_cube(nb_msgs_theor_in_cube,ipoin))) then
+            if(CUSTOM_REAL == SIZE_REAL) then
+              array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) = &
+                array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) + &
+                sngl(buffer_all_cube_from_slices(nb_msgs_theor_in_cube,ipoin,idimension))
+            else
+              array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) = &
+                array_central_cube(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) + &
+                buffer_all_cube_from_slices(nb_msgs_theor_in_cube,ipoin,idimension)
+            endif
+          endif
+          mask(ibool_central_cube(nb_msgs_theor_in_cube,ipoin)) = .true.
+        endif
+      enddo
+
+  ! suppress degrees of freedom already assembled at top of cube on edges
+      do ispec = 1,NSPEC_INNER_CORE
+        if(idoubling_inner_core(ispec) == IFLAG_TOP_CENTRAL_CUBE) then
+          k = NGLLZ
+          do j = 1,NGLLY
+            do i = 1,NGLLX
+              array_central_cube(ibool_inner_core(i,j,k,ispec)) = 0._CUSTOM_REAL
+            enddo
+          enddo
+        endif
+      enddo
+
+  ! assemble contributions
+      vector_assemble(idimension,:) = vector_assemble(idimension,:) + array_central_cube(:)
+
+  ! copy sum back
+      do imsg = 1,nb_msgs_theor_in_cube-1
+        do ipoin = 1,npoin2D_cube_from_slices
+          if(NPROC_XI==1) then
+            if( ibool_central_cube(imsg,ipoin) > 0 ) then
+              buffer_all_cube_from_slices(imsg,ipoin,idimension) = &
+                      vector_assemble(idimension,ibool_central_cube(imsg,ipoin))
+            else
+              buffer_all_cube_from_slices(imsg,ipoin,idimension) = 0._CUSTOM_REAL
+            endif
+          else
+            buffer_all_cube_from_slices(imsg,ipoin,idimension) = &
+                    vector_assemble(idimension,ibool_central_cube(imsg,ipoin))
+          endif
+        enddo
+      enddo
+
+    enddo
+
+  endif
+
+  !----------
+
+  ! receive info from central cube on all the slices except those in CHUNK_AB & CHUNK_AB_ANTIPODE
+  if(ichunk /= CHUNK_AB .and. ichunk /= CHUNK_AB_ANTIPODE) then
+    ! receive buffers from slices
+    sender = receiver_cube_from_slices
+    call MPI_RECV(buffer_slices, &
+                ndim_assemble*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,sender, &
+                itag,MPI_COMM_WORLD,msg_status,ier)
+
+    ! in case NPROC_XI == 1, the other chunks exchange all bottom points with
+    ! CHUNK_AB **and** CHUNK_AB_ANTIPODE
+    if(NPROC_XI==1) then
+      call MPI_RECV(buffer_slices2, &
+                  ndim_assemble*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION, &
+                  addressing(CHUNK_AB_ANTIPODE,0,iproc_eta), &
+                  itag,MPI_COMM_WORLD,msg_status,ier)
+
+      buffer_slices = buffer_slices + buffer_slices2
+    endif
+
+    ! for bottom elements in contact with central cube from the slices side
+    ipoin = 0
+    do ispec2D = 1,NSPEC2D_BOTTOM_INNER_CORE
+
+      ispec = ibelm_bottom_inner_core(ispec2D)
+
+      ! only for DOFs exactly on surface of central cube (bottom of these elements)
+      k = 1
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          ipoin = ipoin + 1
+
+          ! distinguish between single and double precision for reals
+          if(CUSTOM_REAL == SIZE_REAL) then
+            vector_assemble(1:ndim_assemble,ibool_inner_core(i,j,k,ispec)) = sngl(buffer_slices(ipoin,1:ndim_assemble))
+          else
+            vector_assemble(1:ndim_assemble,ibool_inner_core(i,j,k,ispec)) = buffer_slices(ipoin,1:ndim_assemble)
+          endif
+
+        enddo
+      enddo
+    enddo
+
+  endif  ! end receiving info from central cube
+
+  !------- send info back from central cube to slices
+
+  ! on chunk AB & CHUNK_AB_ANTIPODE, send all the messages to slices
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+
+   do imsg = 1,nb_msgs_theor_in_cube-1
+
+  ! copy buffer in 2D array for each slice
+   buffer_slices(:,1:ndim_assemble) = buffer_all_cube_from_slices(imsg,:,1:ndim_assemble)
+
+  ! send buffers to slices
+    receiver = sender_from_slices_to_cube(imsg)
+    call MPI_SEND(buffer_slices,ndim_assemble*npoin2D_cube_from_slices, &
+              MPI_DOUBLE_PRECISION,receiver,itag,MPI_COMM_WORLD,ier)
+
+   enddo
+   endif
+
+  end subroutine assemble_MPI_central_cube_block
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_scalar_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_scalar_mesh.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_scalar_mesh.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,539 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+!----
+!---- assemble the contributions between slices and chunks using MPI
+!----
+
+  subroutine assemble_MPI_scalar_block(myrank,array_val,nglob, &
+            iproc_xi,iproc_eta,ichunk,addressing, &
+            iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
+            npoin2D_faces,npoin2D_xi,npoin2D_eta, &
+            iboolfaces,iboolcorner, &
+            iprocfrom_faces,iprocto_faces,imsg_type, &
+            iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all_CM_IC, &
+            buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar, &
+            NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
+            NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL, &
+            NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NCHUNKS)
+
+! this version of the routine is based on blocking MPI calls
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  include "constants.h"
+  include "precision.h"
+
+  integer myrank,nglob,NCHUNKS
+
+! array to assemble
+  real(kind=CUSTOM_REAL), dimension(nglob) :: array_val
+
+  integer iproc_xi,iproc_eta,ichunk
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
+  integer npoin2D_faces(NUMFACES_SHARED)
+
+  integer NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY
+  integer NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL
+  integer NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS
+
+! for addressing of the slices
+  integer, dimension(NCHUNKS,0:NPROC_XI-1,0:NPROC_ETA-1) :: addressing
+
+! 2-D addressing and buffers for summation between slices
+  integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi,iboolright_xi
+  integer, dimension(NGLOB2DMAX_YMIN_YMAX) :: iboolleft_eta,iboolright_eta
+
+! indirect addressing for each corner of the chunks
+  integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner
+  integer icount_corners
+
+  integer :: npoin2D_max_all_CM_IC
+  integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces
+  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all_CM_IC) :: buffer_send_faces_scalar,buffer_received_faces_scalar
+
+! buffers for send and receive between corners of the chunks
+  real(kind=CUSTOM_REAL), dimension(NGLOB1D_RADIAL) :: buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar
+
+! ---- arrays to assemble between chunks
+
+! communication pattern for faces between chunks
+  integer, dimension(NUMMSGS_FACES) :: iprocfrom_faces,iprocto_faces,imsg_type
+
+! communication pattern for corners between chunks
+  integer, dimension(NCORNERSCHUNKS) :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
+
+! MPI status of messages to be received
+  integer msg_status(MPI_STATUS_SIZE)
+
+  integer ipoin,ipoin2D,ipoin1D
+  integer sender,receiver,ier
+  integer imsg,imsg_loop
+  integer icount_faces,npoin2D_chunks
+
+! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
+
+! check flag to see if we need to assemble (might be turned off when debugging)
+  if (.not. ACTUALLY_ASSEMBLE_MPI_SLICES) return
+
+! here we have to assemble all the contributions between slices using MPI
+
+!----
+!---- assemble the contributions between slices using MPI
+!----
+
+!----
+!---- first assemble along xi using the 2-D topology
+!----
+
+! assemble along xi only if more than one slice
+  if(NPROC_XI > 1) then
+
+    ! slices copy the right face into the buffer
+    do ipoin=1,npoin2D_xi(2)
+      buffer_send_faces_scalar(ipoin) = array_val(iboolright_xi(ipoin))
+    enddo
+
+    ! send messages forward along each row
+    if(iproc_xi == 0) then
+      sender = MPI_PROC_NULL
+    else
+      sender = addressing(ichunk,iproc_xi - 1,iproc_eta)
+    endif
+    if(iproc_xi == NPROC_XI-1) then
+      receiver = MPI_PROC_NULL
+    else
+      receiver = addressing(ichunk,iproc_xi + 1,iproc_eta)
+    endif
+    call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,receiver, &
+          itag2,buffer_received_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,sender, &
+          itag,MPI_COMM_WORLD,msg_status,ier)
+
+    ! all slices add the buffer received to the contributions on the left face
+    if(iproc_xi > 0) then
+      do ipoin=1,npoin2D_xi(1)
+        array_val(iboolleft_xi(ipoin)) = array_val(iboolleft_xi(ipoin)) + &
+                                buffer_received_faces_scalar(ipoin)
+      enddo
+    endif
+
+    ! the contributions are correctly assembled on the left side of each slice
+    ! now we have to send the result back to the sender
+    ! all slices copy the left face into the buffer
+    do ipoin=1,npoin2D_xi(1)
+      buffer_send_faces_scalar(ipoin) = array_val(iboolleft_xi(ipoin))
+    enddo
+
+    ! send messages backward along each row
+    if(iproc_xi == NPROC_XI-1) then
+      sender = MPI_PROC_NULL
+    else
+      sender = addressing(ichunk,iproc_xi + 1,iproc_eta)
+    endif
+    if(iproc_xi == 0) then
+      receiver = MPI_PROC_NULL
+    else
+      receiver = addressing(ichunk,iproc_xi - 1,iproc_eta)
+    endif
+    call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_xi(1),CUSTOM_MPI_TYPE,receiver, &
+          itag2,buffer_received_faces_scalar,npoin2D_xi(2),CUSTOM_MPI_TYPE,sender, &
+          itag,MPI_COMM_WORLD,msg_status,ier)
+
+    ! all slices copy the buffer received to the contributions on the right face
+    if(iproc_xi < NPROC_XI-1) then
+      do ipoin=1,npoin2D_xi(2)
+        array_val(iboolright_xi(ipoin)) = buffer_received_faces_scalar(ipoin)
+      enddo
+    endif
+
+  endif
+
+!----
+!---- then assemble along eta using the 2-D topology
+!----
+
+! assemble along eta only if more than one slice
+  if(NPROC_ETA > 1) then
+
+    ! slices copy the right face into the buffer
+    do ipoin=1,npoin2D_eta(2)
+      buffer_send_faces_scalar(ipoin) = array_val(iboolright_eta(ipoin))
+    enddo
+
+    ! send messages forward along each row
+    if(iproc_eta == 0) then
+      sender = MPI_PROC_NULL
+    else
+      sender = addressing(ichunk,iproc_xi,iproc_eta - 1)
+    endif
+    if(iproc_eta == NPROC_ETA-1) then
+      receiver = MPI_PROC_NULL
+    else
+      receiver = addressing(ichunk,iproc_xi,iproc_eta + 1)
+    endif
+    call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,receiver, &
+      itag2,buffer_received_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,sender, &
+      itag,MPI_COMM_WORLD,msg_status,ier)
+
+    ! all slices add the buffer received to the contributions on the left face
+    if(iproc_eta > 0) then
+      do ipoin=1,npoin2D_eta(1)
+        array_val(iboolleft_eta(ipoin)) = array_val(iboolleft_eta(ipoin)) + &
+                                buffer_received_faces_scalar(ipoin)
+      enddo
+    endif
+
+    ! the contributions are correctly assembled on the left side of each slice
+    ! now we have to send the result back to the sender
+    ! all slices copy the left face into the buffer
+    do ipoin=1,npoin2D_eta(1)
+      buffer_send_faces_scalar(ipoin) = array_val(iboolleft_eta(ipoin))
+    enddo
+
+    ! send messages backward along each row
+    if(iproc_eta == NPROC_ETA-1) then
+      sender = MPI_PROC_NULL
+    else
+      sender = addressing(ichunk,iproc_xi,iproc_eta + 1)
+    endif
+    if(iproc_eta == 0) then
+      receiver = MPI_PROC_NULL
+    else
+      receiver = addressing(ichunk,iproc_xi,iproc_eta - 1)
+    endif
+    call MPI_SENDRECV(buffer_send_faces_scalar,npoin2D_eta(1),CUSTOM_MPI_TYPE,receiver, &
+      itag2,buffer_received_faces_scalar,npoin2D_eta(2),CUSTOM_MPI_TYPE,sender, &
+      itag,MPI_COMM_WORLD,msg_status,ier)
+
+    ! all slices copy the buffer received to the contributions on the right face
+    if(iproc_eta < NPROC_ETA-1) then
+      do ipoin=1,npoin2D_eta(2)
+        array_val(iboolright_eta(ipoin)) = buffer_received_faces_scalar(ipoin)
+      enddo
+    endif
+
+  endif
+
+!----
+!---- start MPI assembling phase between chunks
+!----
+
+! check flag to see if we need to assemble (might be turned off when debugging)
+! and do not assemble if only one chunk
+  if (.not. ACTUALLY_ASSEMBLE_MPI_CHUNKS .or. NCHUNKS == 1) return
+
+! ***************************************************************
+!  transmit messages in forward direction (iprocfrom -> iprocto)
+! ***************************************************************
+
+!---- put slices in receive mode
+!---- a given slice can belong to at most two faces
+
+! use three step scheme that can never deadlock
+! scheme for faces cannot deadlock even if NPROC_XI = NPROC_ETA = 1
+  do imsg_loop = 1,NUM_MSG_TYPES
+
+    icount_faces = 0
+    do imsg = 1,NUMMSGS_FACES
+      if(myrank==iprocfrom_faces(imsg) .or. &
+           myrank==iprocto_faces(imsg)) icount_faces = icount_faces + 1
+      if(myrank==iprocto_faces(imsg) .and. imsg_type(imsg) == imsg_loop) then
+        sender = iprocfrom_faces(imsg)
+        npoin2D_chunks = npoin2D_faces(icount_faces)
+        call MPI_RECV(buffer_received_faces_scalar, &
+                  npoin2D_chunks,CUSTOM_MPI_TYPE,sender, &
+                  itag,MPI_COMM_WORLD,msg_status,ier)
+        do ipoin2D=1,npoin2D_chunks
+          array_val(iboolfaces(ipoin2D,icount_faces)) = &
+             array_val(iboolfaces(ipoin2D,icount_faces)) + buffer_received_faces_scalar(ipoin2D)
+        enddo
+      endif
+    enddo
+
+    !---- put slices in send mode
+    !---- a given slice can belong to at most two faces
+    icount_faces = 0
+    do imsg = 1,NUMMSGS_FACES
+      if(myrank==iprocfrom_faces(imsg) .or. &
+           myrank==iprocto_faces(imsg)) icount_faces = icount_faces + 1
+      if(myrank==iprocfrom_faces(imsg) .and. imsg_type(imsg) == imsg_loop) then
+        receiver = iprocto_faces(imsg)
+        npoin2D_chunks = npoin2D_faces(icount_faces)
+        do ipoin2D=1,npoin2D_chunks
+          buffer_send_faces_scalar(ipoin2D) = array_val(iboolfaces(ipoin2D,icount_faces))
+        enddo
+        call MPI_SEND(buffer_send_faces_scalar,npoin2D_chunks, &
+                  CUSTOM_MPI_TYPE,receiver,itag,MPI_COMM_WORLD,ier)
+      endif
+    enddo
+
+    ! *********************************************************************
+    !  transmit messages back in opposite direction (iprocto -> iprocfrom)
+    ! *********************************************************************
+
+    !---- put slices in receive mode
+    !---- a given slice can belong to at most two faces
+
+    icount_faces = 0
+    do imsg = 1,NUMMSGS_FACES
+      if(myrank==iprocfrom_faces(imsg) .or. &
+           myrank==iprocto_faces(imsg)) icount_faces = icount_faces + 1
+      if(myrank==iprocfrom_faces(imsg) .and. imsg_type(imsg) == imsg_loop) then
+        sender = iprocto_faces(imsg)
+        npoin2D_chunks = npoin2D_faces(icount_faces)
+        call MPI_RECV(buffer_received_faces_scalar, &
+                  npoin2D_chunks,CUSTOM_MPI_TYPE,sender, &
+                  itag,MPI_COMM_WORLD,msg_status,ier)
+        do ipoin2D=1,npoin2D_chunks
+          array_val(iboolfaces(ipoin2D,icount_faces)) = buffer_received_faces_scalar(ipoin2D)
+        enddo
+      endif
+    enddo
+
+    !---- put slices in send mode
+    !---- a given slice can belong to at most two faces
+    icount_faces = 0
+    do imsg = 1,NUMMSGS_FACES
+      if(myrank==iprocfrom_faces(imsg) .or. &
+           myrank==iprocto_faces(imsg)) icount_faces = icount_faces + 1
+      if(myrank==iprocto_faces(imsg) .and. imsg_type(imsg) == imsg_loop) then
+        receiver = iprocfrom_faces(imsg)
+        npoin2D_chunks = npoin2D_faces(icount_faces)
+        do ipoin2D=1,npoin2D_chunks
+          buffer_send_faces_scalar(ipoin2D) = array_val(iboolfaces(ipoin2D,icount_faces))
+        enddo
+        call MPI_SEND(buffer_send_faces_scalar,npoin2D_chunks, &
+                  CUSTOM_MPI_TYPE,receiver,itag,MPI_COMM_WORLD,ier)
+      endif
+    enddo
+
+! end of anti-deadlocking loop
+  enddo
+
+!----
+!---- start MPI assembling corners
+!----
+
+! scheme for corners cannot deadlock even if NPROC_XI = NPROC_ETA = 1
+
+! ***************************************************************
+!  transmit messages in forward direction (two workers -> master)
+! ***************************************************************
+
+  icount_corners = 0
+
+  do imsg = 1,NCORNERSCHUNKS
+
+    if(myrank == iproc_master_corners(imsg) .or. &
+       myrank == iproc_worker1_corners(imsg) .or. &
+       (NCHUNKS /= 2 .and. myrank == iproc_worker2_corners(imsg))) icount_corners = icount_corners + 1
+
+    !---- receive messages from the two workers on the master
+    if(myrank==iproc_master_corners(imsg)) then
+
+      ! receive from worker #1 and add to local array
+      sender = iproc_worker1_corners(imsg)
+      call MPI_RECV(buffer_recv_chunkcorn_scalar,NGLOB1D_RADIAL, &
+            CUSTOM_MPI_TYPE,sender,itag,MPI_COMM_WORLD,msg_status,ier)
+      do ipoin1D=1,NGLOB1D_RADIAL
+        array_val(iboolcorner(ipoin1D,icount_corners)) = array_val(iboolcorner(ipoin1D,icount_corners)) + &
+                 buffer_recv_chunkcorn_scalar(ipoin1D)
+      enddo
+
+      ! receive from worker #2 and add to local array
+      if(NCHUNKS /= 2) then
+        sender = iproc_worker2_corners(imsg)
+        call MPI_RECV(buffer_recv_chunkcorn_scalar,NGLOB1D_RADIAL, &
+              CUSTOM_MPI_TYPE,sender,itag,MPI_COMM_WORLD,msg_status,ier)
+        do ipoin1D=1,NGLOB1D_RADIAL
+          array_val(iboolcorner(ipoin1D,icount_corners)) = array_val(iboolcorner(ipoin1D,icount_corners)) + &
+                   buffer_recv_chunkcorn_scalar(ipoin1D)
+        enddo
+      endif
+
+    endif
+
+    !---- send messages from the two workers to the master
+    if(myrank==iproc_worker1_corners(imsg) .or. &
+                (NCHUNKS /= 2 .and. myrank==iproc_worker2_corners(imsg))) then
+
+      receiver = iproc_master_corners(imsg)
+      do ipoin1D=1,NGLOB1D_RADIAL
+        buffer_send_chunkcorn_scalar(ipoin1D) = array_val(iboolcorner(ipoin1D,icount_corners))
+      enddo
+      call MPI_SEND(buffer_send_chunkcorn_scalar,NGLOB1D_RADIAL,CUSTOM_MPI_TYPE, &
+                receiver,itag,MPI_COMM_WORLD,ier)
+
+    endif
+
+    ! *********************************************************************
+    !  transmit messages back in opposite direction (master -> two workers)
+    ! *********************************************************************
+
+    !---- receive messages from the master on the two workers
+    if(myrank==iproc_worker1_corners(imsg) .or. &
+                (NCHUNKS /= 2 .and. myrank==iproc_worker2_corners(imsg))) then
+
+      ! receive from master and copy to local array
+      sender = iproc_master_corners(imsg)
+      call MPI_RECV(buffer_recv_chunkcorn_scalar,NGLOB1D_RADIAL, &
+            CUSTOM_MPI_TYPE,sender,itag,MPI_COMM_WORLD,msg_status,ier)
+      do ipoin1D=1,NGLOB1D_RADIAL
+        array_val(iboolcorner(ipoin1D,icount_corners)) = buffer_recv_chunkcorn_scalar(ipoin1D)
+      enddo
+
+    endif
+
+    !---- send messages from the master to the two workers
+    if(myrank==iproc_master_corners(imsg)) then
+
+      do ipoin1D=1,NGLOB1D_RADIAL
+        buffer_send_chunkcorn_scalar(ipoin1D) = array_val(iboolcorner(ipoin1D,icount_corners))
+      enddo
+
+      ! send to worker #1
+      receiver = iproc_worker1_corners(imsg)
+      call MPI_SEND(buffer_send_chunkcorn_scalar,NGLOB1D_RADIAL,CUSTOM_MPI_TYPE, &
+                receiver,itag,MPI_COMM_WORLD,ier)
+
+      ! send to worker #2
+      if(NCHUNKS /= 2) then
+        receiver = iproc_worker2_corners(imsg)
+        call MPI_SEND(buffer_send_chunkcorn_scalar,NGLOB1D_RADIAL,CUSTOM_MPI_TYPE, &
+                  receiver,itag,MPI_COMM_WORLD,ier)
+      endif
+
+    endif
+
+  enddo
+
+  end subroutine assemble_MPI_scalar_block
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine assemble_MPI_scalar(NPROC,NGLOB_AB,array_val, &
+                        num_interfaces,max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        my_neighbours)
+
+! blocking send/receive
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: NPROC
+  integer :: NGLOB_AB
+
+  ! array to assemble
+  real(kind=CUSTOM_REAL), dimension(NGLOB_AB) :: array_val
+
+  integer :: num_interfaces,max_nibool_interfaces
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
+
+  ! local parameters
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_send_scalar
+  real(kind=CUSTOM_REAL), dimension(:,:), allocatable :: buffer_recv_scalar
+  integer, dimension(:), allocatable :: request_send_scalar
+  integer, dimension(:), allocatable :: request_recv_scalar
+
+
+  integer ipoin,iinterface,ier
+
+! here we have to assemble all the contributions between partitions using MPI
+
+  ! assemble only if more than one partition
+  if(NPROC > 1) then
+
+    allocate(buffer_send_scalar(max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_send_scalar'
+    allocate(buffer_recv_scalar(max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_recv_scalar'
+    allocate(request_send_scalar(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_send_scalar'
+    allocate(request_recv_scalar(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_recv_scalar'
+
+    ! partition border copy into the buffer
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        buffer_send_scalar(ipoin,iinterface) = array_val(ibool_interfaces(ipoin,iinterface))
+      enddo
+    enddo
+
+    ! send messages
+    do iinterface = 1, num_interfaces
+      ! non-blocking synchronous send request
+      call isend_cr(buffer_send_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
+           itag, &
+           request_send_scalar(iinterface) &
+           )
+      ! receive request
+      call irecv_cr(buffer_recv_scalar(1:nibool_interfaces(iinterface),iinterface), &
+           nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
+           itag, &
+           request_recv_scalar(iinterface) &
+           )
+    enddo
+
+    ! wait for communications completion (recv)
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_scalar(iinterface))
+    enddo
+
+    ! adding contributions of neighbours
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(ibool_interfaces(ipoin,iinterface)) = &
+             array_val(ibool_interfaces(ipoin,iinterface)) + buffer_recv_scalar(ipoin,iinterface)
+      enddo
+    enddo
+
+    ! wait for communications completion (send)
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_scalar(iinterface))
+    enddo
+
+    deallocate(buffer_send_scalar)
+    deallocate(buffer_recv_scalar)
+    deallocate(request_send_scalar)
+    deallocate(request_recv_scalar)
+
+  endif
+
+  end subroutine assemble_MPI_scalar
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_vector_mesh.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_vector_mesh.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/assemble_MPI_vector_mesh.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,125 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine assemble_MPI_vector(NPROC,NGLOB_AB,array_val, &
+                        num_interfaces,max_nibool_interfaces, &
+                        nibool_interfaces,ibool_interfaces, &
+                        my_neighbours)
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: NPROC
+  integer :: NGLOB_AB
+
+! array to assemble
+  real(kind=CUSTOM_REAL), dimension(NDIM,NGLOB_AB) :: array_val
+
+  integer :: num_interfaces,max_nibool_interfaces
+  integer, dimension(num_interfaces) :: nibool_interfaces,my_neighbours
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: ibool_interfaces
+
+  ! local parameters
+
+  ! send/receive temporary buffers
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_send_vector
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: buffer_recv_vector
+
+  ! requests
+  integer, dimension(:), allocatable :: request_send_vector
+  integer, dimension(:), allocatable :: request_recv_vector
+
+  integer ipoin,iinterface,ier
+
+
+! here we have to assemble all the contributions between partitions using MPI
+
+  ! assemble only if more than one partition
+  if(NPROC > 1) then
+
+    allocate(buffer_send_vector(NDIM,max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_send_vector'
+    allocate(buffer_recv_vector(NDIM,max_nibool_interfaces,num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array buffer_recv_vector'
+    allocate(request_send_vector(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_send_vector'
+    allocate(request_recv_vector(num_interfaces),stat=ier)
+    if( ier /= 0 ) stop 'error allocating array request_recv_vector'
+
+    ! partition border copy into the buffer
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        buffer_send_vector(:,ipoin,iinterface) = &
+          array_val(:,ibool_interfaces(ipoin,iinterface))
+      enddo
+    enddo
+
+    ! send messages
+    do iinterface = 1, num_interfaces
+      call isend_cr(buffer_send_vector(1,1,iinterface), &
+           NDIM*nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
+           itag, &
+           request_send_vector(iinterface) &
+           )
+      call irecv_cr(buffer_recv_vector(1,1,iinterface), &
+           NDIM*nibool_interfaces(iinterface), &
+           my_neighbours(iinterface), &
+           itag, &
+           request_recv_vector(iinterface) &
+           )
+    enddo
+
+    ! wait for communications completion (recv)
+    do iinterface = 1, num_interfaces
+      call wait_req(request_recv_vector(iinterface))
+    enddo
+
+    ! adding contributions of neighbours
+    do iinterface = 1, num_interfaces
+      do ipoin = 1, nibool_interfaces(iinterface)
+        array_val(:,ibool_interfaces(ipoin,iinterface)) = &
+             array_val(:,ibool_interfaces(ipoin,iinterface)) &
+             + buffer_recv_vector(:,ipoin,iinterface)
+      enddo
+    enddo
+
+    ! wait for communications completion (send)
+    do iinterface = 1, num_interfaces
+      call wait_req(request_send_vector(iinterface))
+    enddo
+
+    deallocate(buffer_send_vector)
+    deallocate(buffer_recv_vector)
+    deallocate(request_send_vector)
+    deallocate(request_recv_vector)
+
+  endif
+
+  end subroutine assemble_MPI_vector

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_area.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_area.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_area.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,97 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine compute_area(myrank,NCHUNKS,iregion_code, &
+                                    area_local_bottom,area_local_top,&
+                                    volume_local,volume_total, &
+                                    RCMB,RICB,R_CENTRAL_CUBE)
+
+  use meshfem3D_models_par
+
+  implicit none
+
+  integer :: myrank,NCHUNKS,iregion_code
+
+  double precision :: area_local_bottom,area_local_top,volume_local
+  double precision :: volume_total
+  double precision :: RCMB,RICB,R_CENTRAL_CUBE
+
+  ! local parameters
+  double precision :: volume_total_region,area_total_bottom,area_total_top
+
+  ! use MPI reduction to compute total area and volume
+  volume_total_region = ZERO
+  area_total_bottom   = ZERO
+  area_total_top   = ZERO
+
+  call sum_all_dp(area_local_bottom,area_total_bottom)
+  call sum_all_dp(area_local_top,area_total_top)
+  call sum_all_dp(volume_local,volume_total_region)
+
+  if(myrank == 0) then
+    !   sum volume over all the regions
+    volume_total = volume_total + volume_total_region
+
+    !   check volume of chunk, and bottom and top area
+    write(IMAIN,*)
+    write(IMAIN,*) '   calculated top area: ',area_total_top
+
+    ! compare to exact theoretical value
+    if(NCHUNKS == 6 .and. .not. TOPOGRAPHY) then
+      select case(iregion_code)
+        case(IREGION_CRUST_MANTLE)
+          write(IMAIN,*) '            exact area: ',dble(NCHUNKS)*(4.0d0/6.0d0)*PI*R_UNIT_SPHERE**2
+        case(IREGION_OUTER_CORE)
+          write(IMAIN,*) '            exact area: ',dble(NCHUNKS)*(4.0d0/6.0d0)*PI*(RCMB/R_EARTH)**2
+        case(IREGION_INNER_CORE)
+          write(IMAIN,*) '            exact area: ',dble(NCHUNKS)*(4.0d0/6.0d0)*PI*(RICB/R_EARTH)**2
+        case default
+          call exit_MPI(myrank,'incorrect region code')
+      end select
+    endif
+
+    write(IMAIN,*)
+    write(IMAIN,*) 'calculated bottom area: ',area_total_bottom
+
+    ! compare to exact theoretical value
+    if(NCHUNKS == 6 .and. .not. TOPOGRAPHY) then
+      select case(iregion_code)
+        case(IREGION_CRUST_MANTLE)
+          write(IMAIN,*) '            exact area: ',dble(NCHUNKS)*(4.0d0/6.0d0)*PI*(RCMB/R_EARTH)**2
+        case(IREGION_OUTER_CORE)
+          write(IMAIN,*) '            exact area: ',dble(NCHUNKS)*(4.0d0/6.0d0)*PI*(RICB/R_EARTH)**2
+        case(IREGION_INNER_CORE)
+          write(IMAIN,*) '            similar area (central cube): ',dble(NCHUNKS)*(2.*(R_CENTRAL_CUBE / R_EARTH)/sqrt(3.))**2
+        case default
+          call exit_MPI(myrank,'incorrect region code')
+      end select
+    endif
+
+  endif
+
+  end subroutine compute_area

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_volumes.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_volumes.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/compute_volumes.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,111 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine compute_volumes(volume_local,area_local_bottom,area_local_top, &
+                            nspec,wxgll,wygll,wzgll,xixstore,xiystore,xizstore, &
+                            etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore, &
+                            NSPEC2D_BOTTOM,jacobian2D_bottom,NSPEC2D_TOP,jacobian2D_top)
+
+  implicit none
+
+  include "constants.h"
+
+  double precision :: volume_local,area_local_bottom,area_local_top
+
+  integer :: nspec
+  double precision :: wxgll(NGLLX),wygll(NGLLY),wzgll(NGLLZ)
+
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,nspec) :: &
+    xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore
+
+  integer :: NSPEC2D_BOTTOM,NSPEC2D_TOP
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NSPEC2D_BOTTOM) :: jacobian2D_bottom
+  real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NSPEC2D_TOP) :: jacobian2D_top
+
+  ! local parameters
+  double precision :: weight
+  real(kind=CUSTOM_REAL) :: xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl
+  integer :: i,j,k,ispec
+
+  ! initializes
+  volume_local = ZERO
+  area_local_bottom = ZERO
+  area_local_top = ZERO
+
+  ! calculates volume of all elements in mesh
+  do ispec = 1,nspec
+    do k = 1,NGLLZ
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+
+          weight = wxgll(i)*wygll(j)*wzgll(k)
+
+          ! compute the jacobian
+          xixl = xixstore(i,j,k,ispec)
+          xiyl = xiystore(i,j,k,ispec)
+          xizl = xizstore(i,j,k,ispec)
+          etaxl = etaxstore(i,j,k,ispec)
+          etayl = etaystore(i,j,k,ispec)
+          etazl = etazstore(i,j,k,ispec)
+          gammaxl = gammaxstore(i,j,k,ispec)
+          gammayl = gammaystore(i,j,k,ispec)
+          gammazl = gammazstore(i,j,k,ispec)
+
+          jacobianl = 1._CUSTOM_REAL / (xixl*(etayl*gammazl-etazl*gammayl) &
+                        - xiyl*(etaxl*gammazl-etazl*gammaxl) &
+                        + xizl*(etaxl*gammayl-etayl*gammaxl))
+
+          volume_local = volume_local + dble(jacobianl)*weight
+
+        enddo
+      enddo
+    enddo
+  enddo
+
+  ! area of bottom surface
+  do ispec = 1,NSPEC2D_BOTTOM
+    do i=1,NGLLX
+      do j=1,NGLLY
+        weight=wxgll(i)*wygll(j)
+        area_local_bottom = area_local_bottom + dble(jacobian2D_bottom(i,j,ispec))*weight
+      enddo
+    enddo
+  enddo
+
+  ! area of top surface
+  do ispec = 1,NSPEC2D_TOP
+    do i=1,NGLLX
+      do j=1,NGLLY
+        weight=wxgll(i)*wygll(j)
+        area_local_top = area_local_top + dble(jacobian2D_top(i,j,ispec))*weight
+      enddo
+    enddo
+  enddo
+
+  end subroutine compute_volumes
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_MPI_interfaces.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_MPI_interfaces.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,670 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+
+  subroutine create_MPI_interfaces(iregion_code)
+
+  implicit none
+
+  integer,intent(in):: iregion_code
+
+  ! sets up arrays
+  call cmi_allocate_addressing(iregion_code)
+
+  ! gets in arrays
+  call cmi_get_addressing(iregion_code)
+
+  ! reads "iboolleft_..txt", "iboolright_..txt" (and "list_messages_..txt", "buffer_...txt") files and sets up MPI buffers
+  call cmi_get_buffers(iregion_code)
+
+  end subroutine create_MPI_interfaces
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine cmi_allocate_addressing(iregion_code)
+
+  use meshfem3D_par,only: &
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+    NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+    NSPEC2D_BOTTOM,NSPEC2D_TOP,NSPEC,NGLOB, &
+    myrank,NGLOB1D_RADIAL,NUMCORNERS_SHARED,NGLLX,NGLLY,NGLLZ
+
+  use create_MPI_interfaces_par
+
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer,intent(in):: iregion_code
+
+  ! local parameters
+  integer :: ier
+
+  ! parameters from header file
+  NGLOB1D_RADIAL_CM = NGLOB1D_RADIAL(IREGION_CRUST_MANTLE)
+  NGLOB1D_RADIAL_OC = NGLOB1D_RADIAL(IREGION_OUTER_CORE)
+  NGLOB1D_RADIAL_IC = NGLOB1D_RADIAL(IREGION_INNER_CORE)
+
+  ! initializes
+  NSPEC_CRUST_MANTLE = 0
+  NGLOB_CRUST_MANTLE = 0
+
+  NSPEC_OUTER_CORE = 0
+  NGLOB_OUTER_CORE = 0
+
+  NSPEC_INNER_CORE = 0
+  NGLOB_INNER_CORE = 0
+
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    NGLOB2DMAX_XMIN_XMAX_CM = NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE)
+    NGLOB2DMAX_YMIN_YMAX_CM = NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE)
+
+    NSPEC2DMAX_XMIN_XMAX_CM = NSPEC2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE)
+    NSPEC2DMAX_YMIN_YMAX_CM = NSPEC2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE)
+    NSPEC2D_BOTTOM_CM = NSPEC2D_BOTTOM(IREGION_CRUST_MANTLE)
+    NSPEC2D_TOP_CM = NSPEC2D_TOP(IREGION_CRUST_MANTLE)
+
+    NSPEC_CRUST_MANTLE = NSPEC(IREGION_CRUST_MANTLE)
+    NGLOB_CRUST_MANTLE = NGLOB(IREGION_CRUST_MANTLE)
+
+  case( IREGION_OUTER_CORE )
+    NGLOB2DMAX_XMIN_XMAX_OC = NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE)
+    NGLOB2DMAX_YMIN_YMAX_OC = NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE)
+
+    NSPEC2DMAX_XMIN_XMAX_OC = NSPEC2DMAX_XMIN_XMAX(IREGION_OUTER_CORE)
+    NSPEC2DMAX_YMIN_YMAX_OC = NSPEC2DMAX_YMIN_YMAX(IREGION_OUTER_CORE)
+    NSPEC2D_BOTTOM_OC = NSPEC2D_BOTTOM(IREGION_OUTER_CORE)
+    NSPEC2D_TOP_OC = NSPEC2D_TOP(IREGION_OUTER_CORE)
+
+    NSPEC_OUTER_CORE = NSPEC(IREGION_OUTER_CORE)
+    NGLOB_OUTER_CORE = NGLOB(IREGION_OUTER_CORE)
+
+  case( IREGION_INNER_CORE )
+    NGLOB2DMAX_XMIN_XMAX_IC = NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE)
+    NGLOB2DMAX_YMIN_YMAX_IC = NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE)
+
+    NSPEC2DMAX_XMIN_XMAX_IC = NSPEC2DMAX_XMIN_XMAX(IREGION_INNER_CORE)
+    NSPEC2DMAX_YMIN_YMAX_IC = NSPEC2DMAX_YMIN_YMAX(IREGION_INNER_CORE)
+    NSPEC2D_BOTTOM_IC = NSPEC2D_BOTTOM(IREGION_INNER_CORE)
+    NSPEC2D_TOP_IC = NSPEC2D_TOP(IREGION_INNER_CORE)
+
+    NSPEC_INNER_CORE = NSPEC(IREGION_INNER_CORE)
+    NGLOB_INNER_CORE = NGLOB(IREGION_INNER_CORE)
+
+  case default
+    stop 'error iregion_code value not recognized'
+  end select
+
+  ! allocates arrays
+  allocate(buffer_send_chunkcorn_scalar(NGLOB1D_RADIAL_CM), &
+          buffer_recv_chunkcorn_scalar(NGLOB1D_RADIAL_CM))
+
+  allocate(buffer_send_chunkcorn_vector(NDIM,NGLOB1D_RADIAL_CM + NGLOB1D_RADIAL_IC), &
+          buffer_recv_chunkcorn_vector(NDIM,NGLOB1D_RADIAL_CM + NGLOB1D_RADIAL_IC))
+
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    allocate(iboolcorner_crust_mantle(NGLOB1D_RADIAL_CM,NUMCORNERS_SHARED))
+    allocate(iboolleft_xi_crust_mantle(NGLOB2DMAX_XMIN_XMAX_CM), &
+            iboolright_xi_crust_mantle(NGLOB2DMAX_XMIN_XMAX_CM))
+    allocate(iboolleft_eta_crust_mantle(NGLOB2DMAX_YMIN_YMAX_CM), &
+            iboolright_eta_crust_mantle(NGLOB2DMAX_YMIN_YMAX_CM))
+    allocate(iboolfaces_crust_mantle(NGLOB2DMAX_XY,NUMFACES_SHARED))
+
+    ! crust mantle mesh
+    allocate(xstore_crust_mantle(NGLOB_CRUST_MANTLE), &
+            ystore_crust_mantle(NGLOB_CRUST_MANTLE), &
+            zstore_crust_mantle(NGLOB_CRUST_MANTLE), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary crust mantle arrays')
+
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    allocate(iboolcorner_outer_core(NGLOB1D_RADIAL_OC,NUMCORNERS_SHARED))
+    allocate(iboolleft_xi_outer_core(NGLOB2DMAX_XMIN_XMAX_OC), &
+            iboolright_xi_outer_core(NGLOB2DMAX_XMIN_XMAX_OC))
+    allocate(iboolleft_eta_outer_core(NGLOB2DMAX_YMIN_YMAX_OC), &
+            iboolright_eta_outer_core(NGLOB2DMAX_YMIN_YMAX_OC))
+    allocate(iboolfaces_outer_core(NGLOB2DMAX_XY,NUMFACES_SHARED))
+
+    ! outer core mesh
+    allocate(xstore_outer_core(NGLOB_OUTER_CORE), &
+            ystore_outer_core(NGLOB_OUTER_CORE), &
+            zstore_outer_core(NGLOB_OUTER_CORE), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary outer core arrays')
+
+  case( IREGION_INNER_CORE )
+    ! inner core
+    allocate(iboolcorner_inner_core(NGLOB1D_RADIAL_IC,NUMCORNERS_SHARED))
+    allocate(iboolleft_xi_inner_core(NGLOB2DMAX_XMIN_XMAX_IC), &
+            iboolright_xi_inner_core(NGLOB2DMAX_XMIN_XMAX_IC))
+    allocate(iboolleft_eta_inner_core(NGLOB2DMAX_YMIN_YMAX_IC), &
+            iboolright_eta_inner_core(NGLOB2DMAX_YMIN_YMAX_IC))
+    allocate(iboolfaces_inner_core(NGLOB2DMAX_XY,NUMFACES_SHARED))
+
+    ! inner core mesh
+    allocate(xstore_inner_core(NGLOB_INNER_CORE), &
+            ystore_inner_core(NGLOB_INNER_CORE), &
+            zstore_inner_core(NGLOB_INNER_CORE), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary inner core arrays')
+
+  end select
+
+  ! synchronize processes
+  call sync_all()
+
+  end subroutine cmi_allocate_addressing
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine cmi_get_addressing(iregion_code)
+
+  use meshfem3D_par,only: &
+    myrank
+
+  use meshfem3D_par,only: &
+    ibool
+
+  use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+  implicit none
+
+  integer,intent(in):: iregion_code
+
+  ! read coordinates of the mesh
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+!    ibool_crust_mantle(:,:,:,:) = -1
+    call cmi_read_solver_data(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
+                             xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle)
+
+    ! check that the number of points in this slice is correct
+    if(minval(ibool(:,:,:,:)) /= 1 .or. &
+      maxval(ibool(:,:,:,:)) /= NGLOB_CRUST_MANTLE) &
+        call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in crust and mantle')
+
+  case( IREGION_OUTER_CORE )
+    ! outer core
+!    ibool_outer_core(:,:,:,:) = -1
+    call cmi_read_solver_data(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
+                             xstore_outer_core,ystore_outer_core,zstore_outer_core)
+
+    ! check that the number of points in this slice is correct
+    if(minval(ibool(:,:,:,:)) /= 1 .or. &
+       maxval(ibool(:,:,:,:)) /= NGLOB_OUTER_CORE) &
+      call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in outer core')
+
+  case( IREGION_INNER_CORE )
+    ! inner core
+!    ibool_inner_core(:,:,:,:) = -1
+    call cmi_read_solver_data(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                             xstore_inner_core,ystore_inner_core,zstore_inner_core)
+
+    ! check that the number of points in this slice is correct
+    if(minval(ibool(:,:,:,:)) /= 1 .or. &
+      maxval(ibool(:,:,:,:)) /= NGLOB_INNER_CORE) &
+      call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in inner core')
+
+  end select
+
+  ! synchronize processes
+  call sync_all()
+
+  end subroutine cmi_get_addressing
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine cmi_get_buffers(iregion_code)
+
+  use meshfem3D_par,only: myrank,&
+    NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+    NGLOB1D_RADIAL,NSPEC2D_BOTTOM, &
+    NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+    NPROC_XI,NPROC_ETA,NCHUNKS,OUTPUT_FILES,IIN,INCLUDE_CENTRAL_CUBE, &
+    iproc_xi,iproc_eta,ichunk,addressing
+
+  use meshfem3D_par,only: &
+    ibool,idoubling,is_on_a_slice_edge
+
+  use create_regions_mesh_par2,only: &
+    ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top, &
+    nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax
+
+  use create_MPI_interfaces_par
+
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer,intent(in):: iregion_code
+
+  ! local parameters
+  integer :: ier
+  ! for central cube buffers
+  integer :: nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
+            nspec2D_ymin_inner_core,nspec2D_ymax_inner_core
+  integer, dimension(:),allocatable :: ibelm_xmin_inner_core,ibelm_xmax_inner_core
+  integer, dimension(:),allocatable :: ibelm_ymin_inner_core,ibelm_ymax_inner_core
+  integer, dimension(:),allocatable :: ibelm_top_inner_core
+
+  ! debug file output
+  character(len=150) :: filename
+  logical,parameter :: DEBUG = .false.
+
+  ! gets 2-D addressing for summation between slices with MPI
+
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! mantle and crust
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'crust/mantle region:'
+    endif
+    call cmi_read_buffer_data(IREGION_CRUST_MANTLE, &
+                            NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE), &
+                            NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE), &
+                            NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
+                            iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle, &
+                            iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
+                            npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+                            iboolfaces_crust_mantle,npoin2D_faces_crust_mantle, &
+                            iboolcorner_crust_mantle)
+
+    ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
+    !          assign flags for each element which is on a rim of the slice
+    !          thus, they include elements on top and bottom not shared with other MPI partitions
+    !
+    !          we will re-set these flags when setting up inner/outer elements, but will
+    !          use these arrays for now as initial guess for the search for elements which share a global point
+    !          between different MPI processes
+    call fix_non_blocking_slices(is_on_a_slice_edge, &
+            iboolright_xi_crust_mantle,iboolleft_xi_crust_mantle, &
+            iboolright_eta_crust_mantle,iboolleft_eta_crust_mantle, &
+            npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+            ibool, &
+            NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE,NGLOB2DMAX_XMIN_XMAX_CM,NGLOB2DMAX_YMIN_YMAX_CM)
+
+    ! debug: saves element flags
+    if( DEBUG ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_crust_mantle_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
+                                xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
+                                ibool,is_on_a_slice_edge,filename)
+    endif
+
+    ! added this to reduce the size of the buffers
+    ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+    !npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_crust_mantle(:) + npoin2D_xi_inner_core(:)), &
+    !                            maxval(npoin2D_eta_crust_mantle(:) + npoin2D_eta_inner_core(:)))
+    npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_crust_mantle(:)), &
+                                maxval(npoin2D_eta_crust_mantle(:)))
+
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'outer core region:'
+    endif
+    call cmi_read_buffer_data(IREGION_OUTER_CORE, &
+                            NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE), &
+                            NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE), &
+                            NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
+                            iboolleft_xi_outer_core,iboolright_xi_outer_core, &
+                            iboolleft_eta_outer_core,iboolright_eta_outer_core, &
+                            npoin2D_xi_outer_core,npoin2D_eta_outer_core, &
+                            iboolfaces_outer_core,npoin2D_faces_outer_core, &
+                            iboolcorner_outer_core)
+
+    ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
+    !          assign flags for each element which is on a rim of the slice
+    !          thus, they include elements on top and bottom not shared with other MPI partitions
+    !
+    !          we will re-set these flags when setting up inner/outer elements, but will
+    !          use these arrays for now as initial guess for the search for elements which share a global point
+    !          between different MPI processes
+    call fix_non_blocking_slices(is_on_a_slice_edge, &
+            iboolright_xi_outer_core,iboolleft_xi_outer_core, &
+            iboolright_eta_outer_core,iboolleft_eta_outer_core, &
+            npoin2D_xi_outer_core,npoin2D_eta_outer_core, &
+            ibool, &
+            NSPEC_OUTER_CORE,NGLOB_OUTER_CORE,NGLOB2DMAX_XMIN_XMAX_OC,NGLOB2DMAX_YMIN_YMAX_OC)
+
+    ! debug: saves element flags
+    if( DEBUG ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_outer_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
+                                xstore_outer_core,ystore_outer_core,zstore_outer_core, &
+                                ibool,is_on_a_slice_edge,filename)
+    endif
+
+    ! added this to reduce the size of the buffers
+    ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+    npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_outer_core(:)), &
+                                maxval(npoin2D_eta_outer_core(:)))
+
+  case( IREGION_INNER_CORE )
+    ! inner core
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'inner core region:'
+    endif
+    call cmi_read_buffer_data(IREGION_INNER_CORE, &
+                            NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE), &
+                            NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE), &
+                            NGLOB1D_RADIAL(IREGION_INNER_CORE), &
+                            iboolleft_xi_inner_core,iboolright_xi_inner_core, &
+                            iboolleft_eta_inner_core,iboolright_eta_inner_core, &
+                            npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
+                            iboolfaces_inner_core,npoin2D_faces_inner_core, &
+                            iboolcorner_inner_core)
+
+    ! central cube buffers
+    if(INCLUDE_CENTRAL_CUBE) then
+
+      if(myrank == 0) then
+        write(IMAIN,*)
+        write(IMAIN,*) 'including central cube'
+      endif
+      call sync_all()
+
+      ! allocates boundary indexing arrays for central cube
+      allocate(ibelm_xmin_inner_core(NSPEC2DMAX_XMIN_XMAX_IC), &
+              ibelm_xmax_inner_core(NSPEC2DMAX_XMIN_XMAX_IC), &
+              ibelm_ymin_inner_core(NSPEC2DMAX_YMIN_YMAX_IC), &
+              ibelm_ymax_inner_core(NSPEC2DMAX_YMIN_YMAX_IC), &
+              ibelm_top_inner_core(NSPEC2D_TOP_IC), &
+              ibelm_bottom_inner_core(NSPEC2D_BOTTOM_IC), &
+              stat=ier)
+      if( ier /= 0 ) call exit_MPI(myrank,'error allocating central cube index arrays')
+
+      ! gets coupling arrays for inner core
+      nspec2D_xmin_inner_core = nspec2D_xmin
+      nspec2D_xmax_inner_core = nspec2D_xmax
+      nspec2D_ymin_inner_core = nspec2D_ymin
+      nspec2D_ymax_inner_core = nspec2D_ymax
+
+      ibelm_xmin_inner_core(:) = ibelm_xmin(:)
+      ibelm_xmax_inner_core(:) = ibelm_xmax(:)
+      ibelm_ymin_inner_core(:) = ibelm_ymin(:)
+      ibelm_ymax_inner_core(:) = ibelm_ymax(:)
+      ibelm_bottom_inner_core(:) = ibelm_bottom(:)
+      ibelm_top_inner_core(:) = ibelm_top(:)
+
+      ! compute number of messages to expect in cube as well as their size
+      call comp_central_cube_buffer_size(iproc_xi,iproc_eta,ichunk, &
+                  NPROC_XI,NPROC_ETA,NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+                  nb_msgs_theor_in_cube,npoin2D_cube_from_slices)
+
+      ! this value is used for dynamic memory allocation, therefore make sure it is never zero
+      if(nb_msgs_theor_in_cube > 0) then
+        non_zero_nb_msgs_theor_in_cube = nb_msgs_theor_in_cube
+      else
+        non_zero_nb_msgs_theor_in_cube = 1
+      endif
+
+      ! allocate buffers for cube and slices
+      allocate(sender_from_slices_to_cube(non_zero_nb_msgs_theor_in_cube), &
+              buffer_all_cube_from_slices(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM), &
+              buffer_slices(npoin2D_cube_from_slices,NDIM), &
+              buffer_slices2(npoin2D_cube_from_slices,NDIM), &
+              ibool_central_cube(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices),stat=ier)
+      if( ier /= 0 ) call exit_MPI(myrank,'error allocating cube buffers')
+
+      ! handles the communications with the central cube if it was included in the mesh
+      ! create buffers to assemble with the central cube
+      call create_central_cube_buffers(myrank,iproc_xi,iproc_eta,ichunk, &
+                 NPROC_XI,NPROC_ETA,NCHUNKS, &
+                 NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                 NSPEC2DMAX_XMIN_XMAX(IREGION_INNER_CORE),NSPEC2DMAX_YMIN_YMAX(IREGION_INNER_CORE), &
+                 NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+                 addressing,ibool,idoubling, &
+                 xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                 nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
+                 nspec2D_ymin_inner_core,nspec2D_ymax_inner_core, &
+                 ibelm_xmin_inner_core,ibelm_xmax_inner_core, &
+                 ibelm_ymin_inner_core,ibelm_ymax_inner_core,ibelm_bottom_inner_core, &
+                 nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices, &
+                 receiver_cube_from_slices,sender_from_slices_to_cube,ibool_central_cube, &
+                 buffer_slices,buffer_slices2,buffer_all_cube_from_slices)
+
+      if(myrank == 0) write(IMAIN,*) ''
+
+      ! frees memory
+      deallocate(ibelm_xmin_inner_core,ibelm_xmax_inner_core)
+      deallocate(ibelm_ymin_inner_core,ibelm_ymax_inner_core)
+      deallocate(ibelm_top_inner_core)
+
+    else
+
+      ! allocate fictitious buffers for cube and slices with a dummy size
+      ! just to be able to use them as arguments in subroutine calls
+      allocate(sender_from_slices_to_cube(1), &
+              buffer_all_cube_from_slices(1,1,1), &
+              buffer_slices(1,1), &
+              buffer_slices2(1,1), &
+              ibool_central_cube(1,1),stat=ier)
+      if( ier /= 0 ) call exit_MPI(myrank,'error allocating dummy buffers')
+
+    endif
+
+    ! note: fix_... routines below update is_on_a_slice_edge_.. arrays:
+    !          assign flags for each element which is on a rim of the slice
+    !          thus, they include elements on top and bottom not shared with other MPI partitions
+    !
+    !          we will re-set these flags when setting up inner/outer elements, but will
+    !          use these arrays for now as initial guess for the search for elements which share a global point
+    !          between different MPI processes
+    call fix_non_blocking_slices(is_on_a_slice_edge, &
+            iboolright_xi_inner_core,iboolleft_xi_inner_core, &
+            iboolright_eta_inner_core,iboolleft_eta_inner_core, &
+            npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
+            ibool, &
+            NSPEC_INNER_CORE,NGLOB_INNER_CORE,NGLOB2DMAX_XMIN_XMAX_IC,NGLOB2DMAX_YMIN_YMAX_IC)
+
+    if(INCLUDE_CENTRAL_CUBE) then
+      ! updates flags for elements on slice boundaries
+      call fix_non_blocking_central_cube(is_on_a_slice_edge, &
+           ibool,NSPEC_INNER_CORE,NGLOB_INNER_CORE,nb_msgs_theor_in_cube,ibelm_bottom_inner_core, &
+           idoubling,npoin2D_cube_from_slices, &
+           ibool_central_cube,NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+           ichunk,NPROC_XI)
+    endif
+
+    ! debug: saves element flags
+    if( DEBUG ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_is_on_a_slice_edge_inner_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                                xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                                ibool,is_on_a_slice_edge,filename)
+    endif
+
+    ! added this to reduce the size of the buffers
+    ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+    npoin2D_max_all_CM_IC = max(maxval(npoin2D_xi_inner_core(:)), &
+                                maxval(npoin2D_eta_inner_core(:)))
+
+  end select
+
+
+  end subroutine cmi_get_buffers
+
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine cmi_read_solver_data(nspec,nglob,xstore_s,ystore_s,zstore_s)
+
+
+  use meshfem3D_par,only: &
+    xstore,ystore,zstore,ibool
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: nspec,nglob
+
+  ! global mesh points
+  real(kind=CUSTOM_REAL), dimension(nglob) :: xstore_s,ystore_s,zstore_s
+
+  ! local parameters
+  integer :: i,j,k,ispec,iglob
+
+  ! fill custom_real arrays
+  do ispec = 1,nspec
+    do k = 1,NGLLZ
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          iglob = ibool(i,j,k,ispec)
+          ! distinguish between single and double precision for reals
+          if(CUSTOM_REAL == SIZE_REAL) then
+            xstore_s(iglob) = sngl(xstore(i,j,k,ispec))
+            ystore_s(iglob) = sngl(ystore(i,j,k,ispec))
+            zstore_s(iglob) = sngl(zstore(i,j,k,ispec))
+          else
+            xstore_s(iglob) = xstore(i,j,k,ispec)
+            ystore_s(iglob) = ystore(i,j,k,ispec)
+            zstore_s(iglob) = zstore(i,j,k,ispec)
+          endif
+        enddo
+      enddo
+    enddo
+  enddo
+
+  end subroutine cmi_read_solver_data
+
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine cmi_read_buffer_data(iregion_code, &
+                                  NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+                                  NGLOB1D_RADIAL, &
+                                  iboolleft_xi_s,iboolright_xi_s, &
+                                  iboolleft_eta_s,iboolright_eta_s, &
+                                  npoin2D_xi_s,npoin2D_eta_s, &
+                                  iboolfaces_s,npoin2D_faces_s, &
+                                  iboolcorner_s)
+
+  use meshfem3D_par,only: &
+    myrank,IMAIN,NDIM,NUMFACES_SHARED,NUMCORNERS_SHARED,NPROC_XI,NPROC_ETA
+
+  use create_MPI_interfaces_par
+
+  implicit none
+
+  integer :: iregion_code
+
+  integer :: NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
+  integer :: NGLOB1D_RADIAL
+
+  integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi_s,iboolright_xi_s
+  integer, dimension(NGLOB2DMAX_YMIN_YMAX) :: iboolleft_eta_s,iboolright_eta_s
+
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_s,npoin2D_eta_s
+
+  integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces_s
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_s
+
+  integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner_s
+
+  ! local parameters
+  integer :: icount_faces,imsg
+
+  ! gets 2-D arrays
+  npoin2D_xi_s(:) = npoin2D_xi_all(:)
+  npoin2D_eta_s(:) = npoin2D_eta_all(:)
+
+  ! gets mpi buffers on sides
+  iboolleft_xi_s(:) = iboolleft_xi(:)
+  iboolright_xi_s(:) = iboolright_xi(:)
+  iboolleft_eta_s(:) = iboolleft_eta(:)
+  iboolright_eta_s(:) = iboolright_eta(:)
+
+  ! gets corner infos
+  iboolcorner_s(:,:) = iboolcorner(:,:)
+
+  ! gets face infos
+  npoin2D_faces_s(:) = npoin2D_faces(:)
+  iboolfaces_s(:,:) = iboolfaces(:,:)
+
+  ! checks indirect addressing for each message for faces of the chunks
+  ! a given slice can belong to at most two faces
+  icount_faces = 0
+  do imsg = 1,NUMMSGS_FACES
+    if(myrank == iprocfrom_faces(imsg) .or. myrank == iprocto_faces(imsg)) then
+      icount_faces = icount_faces + 1
+
+      if(icount_faces > NUMFACES_SHARED) then
+        print*,'error ',myrank,' icount_faces: ',icount_faces,'NUMFACES_SHARED:',NUMFACES_SHARED
+        print*,'iregion_code:',iregion_code
+        call exit_MPI(myrank,'more than NUMFACES_SHARED faces for this slice')
+      endif
+      if(icount_faces > 2 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) then
+        print*,'error ',myrank,' icount_faces: ',icount_faces,'NPROC_XI:',NPROC_XI,'NPROC_ETA:',NPROC_ETA
+        print*,'iregion_code:',iregion_code
+        call exit_MPI(myrank,'more than two faces for this slice')
+      endif
+    endif
+  enddo
+
+  ! user output
+  if(myrank == 0) then
+    write(IMAIN,*) '  #max of points in MPI buffers along xi npoin2D_xi = ', &
+                                maxval(npoin2D_xi_s(:))
+    write(IMAIN,*) '  #max of array elements transferred npoin2D_xi*NDIM = ', &
+                                maxval(npoin2D_xi_s(:))*NDIM
+    write(IMAIN,*)
+    write(IMAIN,*) '  #max of points in MPI buffers along eta npoin2D_eta = ', &
+                                maxval(npoin2D_eta_s(:))
+    write(IMAIN,*) '  #max of array elements transferred npoin2D_eta*NDIM = ', &
+                                maxval(npoin2D_eta_s(:))*NDIM
+    write(IMAIN,*)
+  endif
+
+  end subroutine cmi_read_buffer_data
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_addressing.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_addressing.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_addressing.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,129 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine create_addressing(myrank,NCHUNKS,NPROC,NPROC_ETA,NPROC_XI,NPROCTOT, &
+                        addressing,ichunk_slice,iproc_xi_slice,iproc_eta_slice, &
+                        OUTPUT_FILES)
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: myrank,NCHUNKS,NPROC,NPROC_ETA,NPROC_XI,NPROCTOT
+
+  integer, dimension(NCHUNKS,0:NPROC_XI-1,0:NPROC_ETA-1) :: addressing
+  integer, dimension(0:NPROCTOT-1) :: ichunk_slice,iproc_xi_slice,iproc_eta_slice
+
+  character(len=150) :: OUTPUT_FILES
+
+  ! local parameters
+  integer :: ichunk,iproc_eta,iproc_xi,iprocnum,ier
+
+  ! initializes
+  addressing(:,:,:) = 0
+  ichunk_slice(:) = 0
+  iproc_xi_slice(:) = 0
+  iproc_eta_slice(:) = 0
+
+  ! loop on all the chunks to create global slice addressing for solver
+  if(myrank == 0) then
+    open(unit=IOUT,file=trim(OUTPUT_FILES)//'/addressing.txt',status='unknown',iostat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error opening addressing.txt')
+    write(IMAIN,*) 'creating global slice addressing'
+    write(IMAIN,*)
+  endif
+
+  do ichunk = 1,NCHUNKS
+    do iproc_eta=0,NPROC_ETA-1
+      do iproc_xi=0,NPROC_XI-1
+        iprocnum = (ichunk-1)*NPROC + iproc_eta * NPROC_XI + iproc_xi
+        addressing(ichunk,iproc_xi,iproc_eta) = iprocnum
+        ichunk_slice(iprocnum) = ichunk
+        iproc_xi_slice(iprocnum) = iproc_xi
+        iproc_eta_slice(iprocnum) = iproc_eta
+        if(myrank == 0) write(IOUT,*) iprocnum,ichunk,iproc_xi,iproc_eta
+      enddo
+    enddo
+  enddo
+
+  if(myrank == 0) close(IOUT)
+
+  ! output a topology map of slices - fix 20x by nproc
+  if (myrank == 0 ) then
+    if( NCHUNKS == 6 .and. NPROCTOT < 1000 ) then
+      write(IMAIN,*) 'Spatial distribution of the slices'
+      do iproc_xi = NPROC_XI-1, 0, -1
+        write(IMAIN,'(20x)',advance='no')
+        do iproc_eta = NPROC_ETA -1, 0, -1
+          ichunk = CHUNK_AB
+          write(IMAIN,'(i5)',advance='no') addressing(ichunk,iproc_xi,iproc_eta)
+        enddo
+        write(IMAIN,'(1x)',advance='yes')
+      enddo
+      write(IMAIN, *) ' '
+      do iproc_xi = NPROC_XI-1, 0, -1
+        write(IMAIN,'(1x)',advance='no')
+        do iproc_eta = NPROC_ETA -1, 0, -1
+          ichunk = CHUNK_BC
+          write(IMAIN,'(i5)',advance='no') addressing(ichunk,iproc_xi,iproc_eta)
+        enddo
+        write(IMAIN,'(3x)',advance='no')
+        do iproc_eta = NPROC_ETA -1, 0, -1
+          ichunk = CHUNK_AC
+          write(IMAIN,'(i5)',advance='no') addressing(ichunk,iproc_xi,iproc_eta)
+        enddo
+        write(IMAIN,'(3x)',advance='no')
+        do iproc_eta = NPROC_ETA -1, 0, -1
+          ichunk = CHUNK_BC_ANTIPODE
+          write(IMAIN,'(i5)',advance='no') addressing(ichunk,iproc_xi,iproc_eta)
+        enddo
+        write(IMAIN,'(1x)',advance='yes')
+      enddo
+      write(IMAIN, *) ' '
+      do iproc_xi = NPROC_XI-1, 0, -1
+        write(IMAIN,'(20x)',advance='no')
+        do iproc_eta = NPROC_ETA -1, 0, -1
+          ichunk = CHUNK_AB_ANTIPODE
+          write(IMAIN,'(i5)',advance='no') addressing(ichunk,iproc_xi,iproc_eta)
+        enddo
+        write(IMAIN,'(1x)',advance='yes')
+      enddo
+      write(IMAIN, *) ' '
+      do iproc_xi = NPROC_XI-1, 0, -1
+        write(IMAIN,'(20x)',advance='no')
+        do iproc_eta = NPROC_ETA -1, 0, -1
+          ichunk = CHUNK_AC_ANTIPODE
+          write(IMAIN,'(i5)',advance='no') addressing(ichunk,iproc_xi,iproc_eta)
+        enddo
+        write(IMAIN,'(1x)',advance='yes')
+      enddo
+      write(IMAIN, *) ' '
+    endif
+  endif
+
+  end subroutine create_addressing

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_central_cube_buffers.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_central_cube_buffers.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_central_cube_buffers.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,633 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+!
+!--- create buffers to assemble with central cube
+!
+
+  subroutine create_central_cube_buffers(myrank,iproc_xi,iproc_eta,ichunk, &
+                   NPROC_XI,NPROC_ETA,NCHUNKS, &
+                   NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                   NSPEC2DMAX_XMIN_XMAX_INNER_CORE,NSPEC2DMAX_YMIN_YMAX_INNER_CORE, &
+                   NSPEC2D_BOTTOM_INNER_CORE, &
+                   addressing,ibool_inner_core,idoubling_inner_core, &
+                   xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                   nspec2D_xmin_inner_core,nspec2D_xmax_inner_core, &
+                   nspec2D_ymin_inner_core,nspec2D_ymax_inner_core, &
+                   ibelm_xmin_inner_core,ibelm_xmax_inner_core, &
+                   ibelm_ymin_inner_core,ibelm_ymax_inner_core,ibelm_bottom_inner_core, &
+                   nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices, &
+                   receiver_cube_from_slices,sender_from_slices_to_cube,ibool_central_cube, &
+                   buffer_slices,buffer_slices2,buffer_all_cube_from_slices)
+
+  implicit none
+
+  ! standard include of the MPI library
+  include 'mpif.h'
+
+  include "constants.h"
+
+  integer, intent(in) :: myrank,iproc_xi,iproc_eta,ichunk, &
+       NPROC_XI,NPROC_ETA,NCHUNKS,NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+       NSPEC2DMAX_XMIN_XMAX_INNER_CORE,NSPEC2DMAX_YMIN_YMAX_INNER_CORE,NSPEC2D_BOTTOM_INNER_CORE
+
+  ! for addressing of the slices
+  integer, dimension(NCHUNKS,0:NPROC_XI-1,0:NPROC_ETA-1), intent(in) :: addressing
+
+  ! mesh parameters
+  integer, dimension(NGLLX,NGLLY,NGLLZ,NSPEC_INNER_CORE), intent(in) :: ibool_inner_core
+
+  ! local to global mapping
+  integer, dimension(NSPEC_INNER_CORE), intent(in) :: idoubling_inner_core
+
+  real(kind=CUSTOM_REAL), dimension(NGLOB_INNER_CORE), intent(in) :: xstore_inner_core,ystore_inner_core,zstore_inner_core
+
+  ! boundary parameters locator
+  integer, intent(in) :: nspec2D_xmin_inner_core,nspec2D_xmax_inner_core,nspec2D_ymin_inner_core,nspec2D_ymax_inner_core
+  integer, dimension(NSPEC2DMAX_XMIN_XMAX_INNER_CORE), intent(in) :: ibelm_xmin_inner_core,ibelm_xmax_inner_core
+  integer, dimension(NSPEC2DMAX_YMIN_YMAX_INNER_CORE), intent(in) :: ibelm_ymin_inner_core,ibelm_ymax_inner_core
+  integer, dimension(NSPEC2D_BOTTOM_INNER_CORE), intent(in) :: ibelm_bottom_inner_core
+
+  integer, intent(in) :: nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices
+
+  ! for matching with central cube in inner core
+  integer, intent(out) :: receiver_cube_from_slices
+
+  integer, dimension(non_zero_nb_msgs_theor_in_cube), intent(out) :: sender_from_slices_to_cube
+  integer, dimension(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices), intent(out) :: ibool_central_cube
+  double precision, dimension(npoin2D_cube_from_slices,NDIM), intent(out) :: buffer_slices,buffer_slices2
+  double precision, dimension(non_zero_nb_msgs_theor_in_cube,npoin2D_cube_from_slices,NDIM), intent(out) :: &
+        buffer_all_cube_from_slices
+
+  ! local variables below
+  integer i,j,k,ispec,ispec2D,iglob,ier
+  integer sender,receiver,imsg,ipoin,iproc_xi_loop
+
+  double precision x_target,y_target,z_target
+  double precision x_current,y_current,z_current
+
+  ! MPI status of messages to be received
+  integer msg_status(MPI_STATUS_SIZE)
+
+  integer :: nproc_xi_half_floor,nproc_xi_half_ceil
+
+  if( mod(NPROC_XI,2) /= 0 ) then
+    nproc_xi_half_floor = floor(NPROC_XI/2.d0)
+    nproc_xi_half_ceil = ceiling(NPROC_XI/2.d0)
+  else
+    nproc_xi_half_floor = NPROC_XI/2
+    nproc_xi_half_ceil = NPROC_XI/2
+  endif
+
+  ! check that the number of points in this slice is correct
+  if(minval(ibool_inner_core(:,:,:,:)) /= 1 .or. maxval(ibool_inner_core(:,:,:,:)) /= NGLOB_INNER_CORE) &
+    call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in inner core')
+
+
+!--- processor to send information to in cube from slices
+
+! four vertical sides first
+  if(ichunk == CHUNK_AC) then
+    if (iproc_xi < nproc_xi_half_floor) then
+      receiver_cube_from_slices = addressing(CHUNK_AB_ANTIPODE,NPROC_XI-1,iproc_eta)
+    else
+      receiver_cube_from_slices = addressing(CHUNK_AB,0,iproc_eta)
+    endif
+  else if(ichunk == CHUNK_BC) then
+    if (iproc_xi < nproc_xi_half_floor) then
+      receiver_cube_from_slices = addressing(CHUNK_AB_ANTIPODE,NPROC_XI-1-iproc_eta,NPROC_ETA-1)
+    else
+      receiver_cube_from_slices = addressing(CHUNK_AB,iproc_eta,NPROC_ETA-1)
+    endif
+  else if(ichunk == CHUNK_AC_ANTIPODE) then
+    if (iproc_xi <= ceiling((NPROC_XI/2.d0)-1)) then
+      receiver_cube_from_slices = addressing(CHUNK_AB,NPROC_XI-1,iproc_eta)
+    else
+      receiver_cube_from_slices = addressing(CHUNK_AB_ANTIPODE,0,iproc_eta)
+    endif
+  else if(ichunk == CHUNK_BC_ANTIPODE) then
+    if (iproc_xi < nproc_xi_half_floor) then
+      receiver_cube_from_slices = addressing(CHUNK_AB_ANTIPODE,iproc_eta,0)
+    else
+      receiver_cube_from_slices = addressing(CHUNK_AB,NPROC_XI-1-iproc_eta,0)
+    endif
+! bottom of cube, direct correspondance but with inverted xi axis
+  else if(ichunk == CHUNK_AB_ANTIPODE) then
+    receiver_cube_from_slices = addressing(CHUNK_AB,NPROC_XI-1-iproc_xi,iproc_eta)
+  else if(ichunk == CHUNK_AB) then
+    receiver_cube_from_slices = addressing(CHUNK_AB_ANTIPODE,NPROC_XI-1-iproc_xi,iproc_eta)
+  endif
+
+
+!--- list of processors to receive information from in cube
+
+! only for slices in central cube
+  if(ichunk == CHUNK_AB) then
+    ! initialize index of sender
+    imsg = 0
+
+    ! define sender for xi = xi_min edge
+    if(iproc_xi == 0) then
+      do iproc_xi_loop = nproc_xi_half_floor,NPROC_XI-1
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_AC,iproc_xi_loop,iproc_eta)
+      enddo
+    endif
+
+    ! define sender for xi = xi_max edge
+    if(iproc_xi == NPROC_XI-1) then
+      do iproc_xi_loop = 0, floor((NPROC_XI-1)/2.d0)
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_AC_ANTIPODE,iproc_xi_loop,iproc_eta)
+      enddo
+    endif
+
+    ! define sender for eta = eta_min edge
+    if(iproc_eta == 0) then
+      do iproc_xi_loop = nproc_xi_half_floor,NPROC_XI-1
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_BC_ANTIPODE,iproc_xi_loop,NPROC_ETA-1-iproc_xi)
+      enddo
+    endif
+
+    ! define sender for eta = eta_max edge
+    if(iproc_eta == NPROC_ETA-1) then
+      do iproc_xi_loop = nproc_xi_half_floor,NPROC_XI-1
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_BC,iproc_xi_loop,iproc_xi)
+      enddo
+    endif
+
+    ! define sender for bottom edge
+    ! bottom of cube, direct correspondence but with inverted xi axis
+    imsg = imsg + 1
+    sender_from_slices_to_cube(imsg) = addressing(CHUNK_AB_ANTIPODE,NPROC_XI-1-iproc_xi,iproc_eta)
+
+    ! check that total number of faces found is correct
+    if(imsg /= nb_msgs_theor_in_cube) then
+      print*,'error ',myrank,'nb_msgs_theor_in_cube:',nb_msgs_theor_in_cube,imsg
+      call exit_MPI(myrank,'wrong number of faces found for central cube')
+    endif
+
+  else if(ichunk == CHUNK_AB_ANTIPODE) then
+    ! initialize index of sender
+    imsg = 0
+
+    ! define sender for xi = xi_min edge
+    if(iproc_xi == 0) then
+      do iproc_xi_loop = nproc_xi_half_ceil,NPROC_XI-1
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_AC_ANTIPODE,iproc_xi_loop,iproc_eta)
+      enddo
+    endif
+
+    ! define sender for xi = xi_max edge
+    if(iproc_xi == NPROC_XI-1) then
+      do iproc_xi_loop = 0, floor((NPROC_XI/2.d0)-1.d0)
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_AC,iproc_xi_loop,iproc_eta)
+      enddo
+    endif
+
+    ! define sender for eta = eta_min edge
+    if(iproc_eta == 0) then
+      do iproc_xi_loop = 0, floor((NPROC_XI/2.d0)-1.d0)
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_BC_ANTIPODE,iproc_xi_loop,iproc_xi)
+      enddo
+    endif
+
+    ! define sender for eta = eta_max edge
+    if(iproc_eta == NPROC_ETA-1) then
+      do iproc_xi_loop = 0, floor((NPROC_XI/2.d0)-1.d0)
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_BC,iproc_xi_loop,NPROC_ETA-1-iproc_xi)
+      enddo
+    endif
+
+    ! in case NPROC_XI == 1, the other chunks exchange all bottom points with
+    ! CHUNK_AB **and** CHUNK_AB_ANTIPODE
+    if(NPROC_XI==1) then
+      ! define sender for xi = xi_min edge
+      if(iproc_xi == 0) then
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_AC_ANTIPODE,0,iproc_eta)
+      endif
+
+      ! define sender for xi = xi_max edge
+      if(iproc_xi == NPROC_XI-1) then
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_AC,0,iproc_eta)
+      endif
+
+      ! define sender for eta = eta_min edge
+      if(iproc_eta == 0) then
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_BC_ANTIPODE,0,iproc_xi)
+      endif
+
+      ! define sender for eta = eta_max edge
+      if(iproc_eta == NPROC_ETA-1) then
+        imsg = imsg + 1
+        sender_from_slices_to_cube(imsg) = addressing(CHUNK_BC,0,NPROC_ETA-1-iproc_xi)
+      endif
+    endif
+
+    ! define sender for bottom edge
+    ! bottom of cube, direct correspondence but with inverted xi axis
+    imsg = imsg + 1
+    sender_from_slices_to_cube(imsg) = addressing(CHUNK_AB,NPROC_XI-1-iproc_xi,iproc_eta)
+
+    ! check that total number of faces found is correct
+    if(imsg /= nb_msgs_theor_in_cube) then
+      print*,'error ',myrank,'nb_msgs_theor_in_cube:',nb_msgs_theor_in_cube,imsg
+      call exit_MPI(myrank,'wrong number of faces found for central cube')
+    endif
+
+  else
+
+    ! dummy value in slices
+    sender_from_slices_to_cube(1) = -1
+
+  endif
+
+
+! on chunk AB & AB ANTIPODE, receive all (except bottom) the messages from slices
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+    do imsg = 1,nb_msgs_theor_in_cube-1
+
+    ! receive buffers from slices
+    sender = sender_from_slices_to_cube(imsg)
+    call MPI_RECV(buffer_slices, &
+              NDIM*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,sender, &
+              itag,MPI_COMM_WORLD,msg_status,ier)
+
+    ! copy buffer in 2D array for each slice
+    buffer_all_cube_from_slices(imsg,:,:) = buffer_slices(:,:)
+
+    enddo
+  endif
+
+  ! send info to central cube from all the slices except those in CHUNK_AB & CHUNK_AB_ANTIPODE
+  if(ichunk /= CHUNK_AB .and. ichunk /= CHUNK_AB_ANTIPODE ) then
+    ! for bottom elements in contact with central cube from the slices side
+    ipoin = 0
+    do ispec2D = 1,NSPEC2D_BOTTOM_INNER_CORE
+
+      ispec = ibelm_bottom_inner_core(ispec2D)
+
+      ! only for DOFs exactly on surface of central cube (bottom of these elements)
+      k = 1
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          ipoin = ipoin + 1
+          iglob = ibool_inner_core(i,j,k,ispec)
+          buffer_slices(ipoin,1) = dble(xstore_inner_core(iglob))
+          buffer_slices(ipoin,2) = dble(ystore_inner_core(iglob))
+          buffer_slices(ipoin,3) = dble(zstore_inner_core(iglob))
+        enddo
+      enddo
+    enddo
+
+    ! send buffer to central cube
+    receiver = receiver_cube_from_slices
+    call MPI_SEND(buffer_slices,NDIM*npoin2D_cube_from_slices, &
+              MPI_DOUBLE_PRECISION,receiver,itag,MPI_COMM_WORLD,ier)
+
+    ! in case NPROC_XI == 1, the other chunks exchange all bottom points with
+    ! CHUNK_AB **and** CHUNK_AB_ANTIPODE
+    if(NPROC_XI==1) then
+      call MPI_SEND(buffer_slices,NDIM*npoin2D_cube_from_slices, &
+                   MPI_DOUBLE_PRECISION, &
+                   addressing(CHUNK_AB_ANTIPODE,0,iproc_eta), &
+                   itag,MPI_COMM_WORLD,ier)
+    endif
+
+  endif  ! end sending info to central cube
+
+
+  ! exchange of their bottom faces between chunks AB and AB_ANTIPODE
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+    ipoin = 0
+    do ispec = NSPEC_INNER_CORE, 1, -1
+      if (idoubling_inner_core(ispec) == IFLAG_BOTTOM_CENTRAL_CUBE) then
+        k = 1
+        do j = 1,NGLLY
+          do i = 1,NGLLX
+            ipoin = ipoin + 1
+            iglob = ibool_inner_core(i,j,k,ispec)
+            buffer_slices(ipoin,1) = dble(xstore_inner_core(iglob))
+            buffer_slices(ipoin,2) = dble(ystore_inner_core(iglob))
+            buffer_slices(ipoin,3) = dble(zstore_inner_core(iglob))
+          enddo
+        enddo
+      endif
+    enddo
+    if (ipoin /= npoin2D_cube_from_slices) then
+      print*,'error',myrank,'bottom points:',npoin2D_cube_from_slices,ipoin
+      call exit_MPI(myrank,'wrong number of points found for bottom CC AB or !AB')
+    endif
+
+    sender = sender_from_slices_to_cube(nb_msgs_theor_in_cube)
+
+    call MPI_SENDRECV(buffer_slices,NDIM*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,receiver_cube_from_slices, &
+        itag,buffer_slices2,NDIM*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,sender, &
+        itag,MPI_COMM_WORLD,msg_status,ier)
+
+    buffer_all_cube_from_slices(nb_msgs_theor_in_cube,:,:) = buffer_slices2(:,:)
+
+  endif
+
+  !--- now we need to find the points received and create indirect addressing
+  ibool_central_cube(:,:) = -1
+
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+
+   do imsg = 1,nb_msgs_theor_in_cube
+
+    do ipoin = 1,npoin2D_cube_from_slices
+
+      x_target = buffer_all_cube_from_slices(imsg,ipoin,1)
+      y_target = buffer_all_cube_from_slices(imsg,ipoin,2)
+      z_target = buffer_all_cube_from_slices(imsg,ipoin,3)
+
+      ! x = x_min
+      do ispec2D = 1,nspec2D_xmin_inner_core
+        ispec = ibelm_xmin_inner_core(ispec2D)
+        ! do not loop on elements outside of the central cube
+        if(idoubling_inner_core(ispec) /= IFLAG_MIDDLE_CENTRAL_CUBE .and. &
+          idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE .and. &
+          idoubling_inner_core(ispec) /= IFLAG_TOP_CENTRAL_CUBE) cycle
+        ! check
+        if(idoubling_inner_core(ispec) == IFLAG_IN_FICTITIOUS_CUBE ) stop 'error xmin ibelm'
+        i = 1
+        do k = 1,NGLLZ
+          do j = 1,NGLLY
+           iglob = ibool_inner_core(i,j,k,ispec)
+           x_current = dble(xstore_inner_core(iglob))
+           y_current = dble(ystore_inner_core(iglob))
+           z_current = dble(zstore_inner_core(iglob))
+           ! look for matching point
+           if(dsqrt((x_current-x_target)**2 + (y_current-y_target)**2 + (z_current-z_target)**2) < SMALLVALTOL) then
+             ibool_central_cube(imsg,ipoin) = ibool_inner_core(i,j,k,ispec)
+             goto 100
+           endif
+          enddo
+        enddo
+      enddo
+
+      ! x = x_max
+      do ispec2D = 1,nspec2D_xmax_inner_core
+        ispec = ibelm_xmax_inner_core(ispec2D)
+        ! do not loop on elements outside of the central cube
+        if(idoubling_inner_core(ispec) /= IFLAG_MIDDLE_CENTRAL_CUBE .and. &
+            idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE .and. &
+            idoubling_inner_core(ispec) /= IFLAG_TOP_CENTRAL_CUBE) cycle
+        !check
+        if(idoubling_inner_core(ispec) == IFLAG_IN_FICTITIOUS_CUBE ) stop 'error xmax ibelm'
+        i = NGLLX
+        do k = 1,NGLLZ
+          do j = 1,NGLLY
+            iglob = ibool_inner_core(i,j,k,ispec)
+            x_current = dble(xstore_inner_core(iglob))
+            y_current = dble(ystore_inner_core(iglob))
+            z_current = dble(zstore_inner_core(iglob))
+            ! look for matching point
+            if(dsqrt((x_current-x_target)**2 + (y_current-y_target)**2 + (z_current-z_target)**2) < SMALLVALTOL) then
+              ibool_central_cube(imsg,ipoin) = ibool_inner_core(i,j,k,ispec)
+              goto 100
+            endif
+          enddo
+        enddo
+      enddo
+
+      ! y = y_min
+      do ispec2D = 1,nspec2D_ymin_inner_core
+        ispec = ibelm_ymin_inner_core(ispec2D)
+        ! do not loop on elements outside of the central cube
+        if(idoubling_inner_core(ispec) /= IFLAG_MIDDLE_CENTRAL_CUBE .and. &
+            idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE .and. &
+            idoubling_inner_core(ispec) /= IFLAG_TOP_CENTRAL_CUBE) cycle
+        !check
+        if(idoubling_inner_core(ispec) == IFLAG_IN_FICTITIOUS_CUBE ) stop 'error ymin ibelm'
+        j = 1
+        do k = 1,NGLLZ
+          do i = 1,NGLLX
+            iglob = ibool_inner_core(i,j,k,ispec)
+            x_current = dble(xstore_inner_core(iglob))
+            y_current = dble(ystore_inner_core(iglob))
+            z_current = dble(zstore_inner_core(iglob))
+            ! look for matching point
+            if(dsqrt((x_current-x_target)**2 + (y_current-y_target)**2 + (z_current-z_target)**2) < SMALLVALTOL) then
+              ibool_central_cube(imsg,ipoin) = ibool_inner_core(i,j,k,ispec)
+              goto 100
+            endif
+          enddo
+        enddo
+      enddo
+
+      ! y = y_max
+      do ispec2D = 1,nspec2D_ymax_inner_core
+        ispec = ibelm_ymax_inner_core(ispec2D)
+        ! do not loop on elements outside of the central cube
+        if(idoubling_inner_core(ispec) /= IFLAG_MIDDLE_CENTRAL_CUBE .and. &
+            idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE .and. &
+            idoubling_inner_core(ispec) /= IFLAG_TOP_CENTRAL_CUBE) cycle
+        !check
+        if(idoubling_inner_core(ispec) == IFLAG_IN_FICTITIOUS_CUBE ) stop 'error ymax ibelm'
+        j = NGLLY
+        do k = 1,NGLLZ
+          do i = 1,NGLLX
+            iglob = ibool_inner_core(i,j,k,ispec)
+            x_current = dble(xstore_inner_core(iglob))
+            y_current = dble(ystore_inner_core(iglob))
+            z_current = dble(zstore_inner_core(iglob))
+            ! look for matching point
+            if(dsqrt((x_current-x_target)**2 + (y_current-y_target)**2 + (z_current-z_target)**2) < SMALLVALTOL) then
+              ibool_central_cube(imsg,ipoin) = ibool_inner_core(i,j,k,ispec)
+              goto 100
+            endif
+          enddo
+        enddo
+      enddo
+
+      ! bottom of cube
+      do ispec = 1,NSPEC_INNER_CORE
+        ! loop on elements at the bottom of the cube only
+        if(idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE) cycle
+        k = 1
+        do j = 1,NGLLY
+          do i = 1,NGLLX
+            iglob = ibool_inner_core(i,j,k,ispec)
+            x_current = dble(xstore_inner_core(iglob))
+            y_current = dble(ystore_inner_core(iglob))
+            z_current = dble(zstore_inner_core(iglob))
+            ! look for matching point
+            if(dsqrt((x_current-x_target)**2 + (y_current-y_target)**2 + (z_current-z_target)**2) < SMALLVALTOL) then
+              ibool_central_cube(imsg,ipoin) = ibool_inner_core(i,j,k,ispec)
+              goto 100
+            endif
+          enddo
+        enddo
+      enddo
+
+      ! point not found so far
+      if(NPROC_XI==1) then
+        ! ignores point
+        ibool_central_cube(imsg,ipoin) = 0
+      else
+        ! check that a matching point is found in all cases
+        call exit_MPI(myrank,'point never found in central cube')
+      endif
+
+ 100  continue
+
+    enddo ! ipoin
+
+    ! checks ibool array
+    if(NPROC_XI==1) then
+      if( minval(ibool_central_cube(imsg,:)) < 0 ) call exit_mpi(myrank,'error ibool_central_cube point not found')
+
+      ! removes points on bottom surface in antipode chunk for other chunks than its AB sharing chunk
+      ! (to avoid adding the same point twice from other chunks)
+      if( ichunk == CHUNK_AB_ANTIPODE .and. imsg < nb_msgs_theor_in_cube ) then
+        do ipoin = 1,npoin2D_cube_from_slices
+          x_target = buffer_all_cube_from_slices(imsg,ipoin,1)
+          y_target = buffer_all_cube_from_slices(imsg,ipoin,2)
+          z_target = buffer_all_cube_from_slices(imsg,ipoin,3)
+
+          ! bottom of cube
+          do ispec = 1,NSPEC_INNER_CORE
+            ! loop on elements at the bottom of the cube only
+            if(idoubling_inner_core(ispec) /= IFLAG_BOTTOM_CENTRAL_CUBE) cycle
+            k = 1
+            do j = 1,NGLLY
+              do i = 1,NGLLX
+                iglob = ibool_inner_core(i,j,k,ispec)
+                x_current = dble(xstore_inner_core(iglob))
+                y_current = dble(ystore_inner_core(iglob))
+                z_current = dble(zstore_inner_core(iglob))
+                ! look for matching point
+                if(dsqrt((x_current-x_target)**2 + (y_current-y_target)**2 + (z_current-z_target)**2) < SMALLVALTOL) then
+                  ibool_central_cube(imsg,ipoin) = 0
+                  goto 200
+                endif
+              enddo
+            enddo
+          enddo
+
+ 200      continue
+
+        enddo ! ipoin
+      endif
+
+    endif ! NPROC_XI==1
+
+   enddo ! imsg
+  endif
+
+  end subroutine create_central_cube_buffers
+
+!
+!----------------------------------
+!
+
+  subroutine comp_central_cube_buffer_size(iproc_xi,iproc_eta,ichunk,NPROC_XI,NPROC_ETA,NSPEC2D_BOTTOM_INNER_CORE, &
+                nb_msgs_theor_in_cube,npoin2D_cube_from_slices)
+
+!--- compute number of messages to expect in cube as well as their size
+!--- take into account vertical sides and bottom side
+
+  implicit none
+
+  include "constants.h"
+
+  integer, intent(in) :: iproc_xi,iproc_eta,ichunk,NPROC_XI,NPROC_ETA,NSPEC2D_BOTTOM_INNER_CORE
+
+  integer, intent(out) :: nb_msgs_theor_in_cube,npoin2D_cube_from_slices
+
+  integer :: nproc_xi_half_floor,nproc_xi_half_ceil
+
+  if( mod(NPROC_XI,2) /= 0 ) then
+    nproc_xi_half_floor = floor(NPROC_XI/2.d0)
+    nproc_xi_half_ceil = ceiling(NPROC_XI/2.d0)
+  else
+    nproc_xi_half_floor = NPROC_XI/2
+    nproc_xi_half_ceil = NPROC_XI/2
+  endif
+
+! only for slices in central cube
+  if(ichunk == CHUNK_AB) then
+    if(NPROC_XI == 1) then
+      ! five sides if only one processor in cube
+      nb_msgs_theor_in_cube = 5
+    else
+      ! case of a corner
+      if((iproc_xi == 0 .or. iproc_xi == NPROC_XI-1).and. &
+         (iproc_eta == 0 .or. iproc_eta == NPROC_ETA-1)) then
+        ! slices on both "vertical" faces plus one slice at the bottom
+        nb_msgs_theor_in_cube = 2*(nproc_xi_half_ceil) + 1
+      ! case of an edge
+      else if(iproc_xi == 0 .or. iproc_xi == NPROC_XI-1 .or. &
+              iproc_eta == 0 .or. iproc_eta == NPROC_ETA-1) then
+        ! slices on the "vertical" face plus one slice at the bottom
+        nb_msgs_theor_in_cube = nproc_xi_half_ceil + 1
+      else
+        ! bottom element only
+        nb_msgs_theor_in_cube = 1
+      endif
+    endif
+  else if(ichunk == CHUNK_AB_ANTIPODE) then
+    if(NPROC_XI == 1) then
+      ! five sides if only one processor in cube
+      nb_msgs_theor_in_cube = 5
+    else
+      ! case of a corner
+      if((iproc_xi == 0 .or. iproc_xi == NPROC_XI-1).and. &
+         (iproc_eta == 0 .or. iproc_eta == NPROC_ETA-1)) then
+        ! slices on both "vertical" faces plus one slice at the bottom
+        nb_msgs_theor_in_cube = 2*(nproc_xi_half_floor) + 1
+      ! case of an edge
+      else if(iproc_xi == 0 .or. iproc_xi == NPROC_XI-1 .or. &
+              iproc_eta == 0 .or. iproc_eta == NPROC_ETA-1) then
+        ! slices on the "vertical" face plus one slice at the bottom
+        nb_msgs_theor_in_cube = nproc_xi_half_floor + 1
+      else
+        ! bottom element only
+        nb_msgs_theor_in_cube = 1
+      endif
+    endif
+  else
+    ! not in chunk AB
+    nb_msgs_theor_in_cube = 0
+  endif
+
+  ! number of points to send or receive (bottom of slices)
+  npoin2D_cube_from_slices = NSPEC2D_BOTTOM_INNER_CORE * NGLLX * NGLLY
+
+  end subroutine comp_central_cube_buffer_size
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_meshes.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_meshes.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_meshes.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,120 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine create_meshes()
+
+  use meshfem3D_par
+  implicit none
+
+  ! local parameters
+  integer :: ipass
+  integer :: ier
+
+  ! get addressing for this process
+  ichunk = ichunk_slice(myrank)
+  iproc_xi = iproc_xi_slice(myrank)
+  iproc_eta = iproc_eta_slice(myrank)
+
+  ! volume of the slice
+  volume_total = ZERO
+
+  ! make sure everybody is synchronized
+  call sync_all()
+
+  !----
+  !----  loop on all the regions of the mesh
+  !----
+
+  ! number of regions in full Earth
+  do iregion_code = 1,MAX_NUM_REGIONS
+
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) '*******************************************'
+      write(IMAIN,*) 'creating mesh in region ',iregion_code
+      select case(iregion_code)
+        case(IREGION_CRUST_MANTLE)
+          write(IMAIN,*) 'this region is the crust and mantle'
+        case(IREGION_OUTER_CORE)
+          write(IMAIN,*) 'this region is the outer core'
+        case(IREGION_INNER_CORE)
+          write(IMAIN,*) 'this region is the inner core'
+        case default
+          call exit_MPI(myrank,'incorrect region code')
+      end select
+      write(IMAIN,*) '*******************************************'
+      write(IMAIN,*)
+    endif
+
+    ! compute maximum number of points
+    npointot = NSPEC(iregion_code) * NGLLX * NGLLY * NGLLZ
+
+    ! use dynamic allocation to allocate memory for arrays
+    allocate(idoubling(NSPEC(iregion_code)), &
+            ibool(NGLLX,NGLLY,NGLLZ,NSPEC(iregion_code)), &
+            xstore(NGLLX,NGLLY,NGLLZ,NSPEC(iregion_code)), &
+            ystore(NGLLX,NGLLY,NGLLZ,NSPEC(iregion_code)), &
+            zstore(NGLLX,NGLLY,NGLLZ,NSPEC(iregion_code)), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating memory for arrays')
+
+    ! this for non blocking MPI
+    allocate(is_on_a_slice_edge(NSPEC(iregion_code)), &
+            stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating is_on_a_slice_edge array')
+
+
+    ! create all the regions of the mesh
+    ! perform two passes in this part to be able to save memory
+    do ipass = 1,2
+      call create_regions_mesh(iregion_code, &
+                          NSPEC(iregion_code),NGLOB(iregion_code),npointot, &
+                          NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                          NSPEC2DMAX_XMIN_XMAX(iregion_code),NSPEC2DMAX_YMIN_YMAX(iregion_code), &
+                          NSPEC2D_BOTTOM(iregion_code),NSPEC2D_TOP(iregion_code), &
+                          mod(iproc_xi_slice(myrank),2),mod(iproc_eta_slice(myrank),2), &
+                          ipass)
+    enddo
+
+    ! deallocate arrays used for that region
+    deallocate(idoubling)
+    deallocate(ibool)
+    deallocate(xstore)
+    deallocate(ystore)
+    deallocate(zstore)
+
+    ! this for non blocking MPI
+    deallocate(is_on_a_slice_edge)
+
+    ! make sure everybody is synchronized
+    call sync_all()
+
+  ! end of loop on all the regions
+  enddo
+
+  end subroutine create_meshes

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_regions_mesh_adios.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_regions_mesh_adios.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/create_regions_mesh_adios.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,175 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+subroutine crm_save_mesh_files_adios(nspec,npointot,iregion_code, &
+    num_ibool_AVS_DX, mask_ibool)
+  use mpi
+  use adios_write_mod
+
+  use meshfem3d_par,only: &
+    ibool,idoubling, &
+    xstore,ystore,zstore, &
+    myrank,NGLLX,NGLLY,NGLLZ, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN, &
+    ADIOS_FOR_AVS_DX, LOCAL_PATH
+
+
+  use meshfem3D_models_par,only: &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    nspl,rspl,espl,espl2
+
+  use create_regions_mesh_par2
+
+  ! Modules for temporary AVS/DX data
+  use AVS_DX_global_mod
+  use AVS_DX_global_faces_mod
+  use AVS_DX_global_chunks_mod
+  use AVS_DX_surface_mod
+
+  implicit none
+
+  ! number of spectral elements in each block
+  integer,intent(in) :: nspec,npointot,iregion_code
+
+  ! local parameters
+  ! arrays used for AVS or DX files
+  integer, dimension(npointot), intent(inout) :: num_ibool_AVS_DX
+  logical, dimension(npointot), intent(inout) :: mask_ibool
+  ! structures used for ADIOS AVS/DX files
+  type(avs_dx_global_t) :: avs_dx_global_vars
+  type(avs_dx_global_faces_t) :: avs_dx_global_faces_vars
+  type(avs_dx_global_chunks_t) :: avs_dx_global_chunks_vars
+  type(avs_dx_surface_t) :: avs_dx_surface_vars
+
+  character(len=150) :: reg_name, outputname, group_name
+  integer :: comm, sizeprocs, ier
+  integer(kind=8) :: adios_group, group_size_inc, adios_totalsize, adios_handle
+
+  ! create a prefix for the file name such as LOCAL_PATH/regX_
+  call create_name_database_adios(reg_name,iregion_code,LOCAL_PATH)
+  outputname = trim(reg_name) // "AVS_DX.bp" 
+  write(group_name,"('SPECFEM3D_GLOBE_AVS_DX_reg',i1)") iregion_code
+  call world_size(sizeprocs) ! TODO keep it in parameters
+  ! Alias COMM_WORLD to use ADIOS
+  call MPI_Comm_dup (MPI_COMM_WORLD, comm, ier)
+  group_size_inc = 0
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, ier)
+  ! We set the transport method to 'MPI'. This seems to be the correct choice
+  ! for now. We might want to move this to the constant.h file later on.
+  call adios_select_method(adios_group, "MPI", "", "", ier)
+
+  !--- Define ADIOS variables -----------------------------
+  call define_AVS_DX_global_data_adios(adios_group, myrank, nspec, ibool, &
+      npointot, mask_ibool, group_size_inc, avs_dx_global_vars)
+    
+  call define_AVS_DX_global_faces_data_adios (adios_group, &
+      myrank, prname, nspec, iMPIcut_xi,iMPIcut_eta, &
+      ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+      npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+      ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+      RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+      RMIDDLE_CRUST,ROCEAN,iregion_code, &
+      group_size_inc, avs_dx_global_faces_vars)
+
+  call define_AVS_DX_global_chunks_data(adios_group, &
+      myrank,prname,nspec,iboun,ibool, &
+      idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+      npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+      ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+      RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+      RMIDDLE_CRUST,ROCEAN,iregion_code, &
+      group_size_inc, avs_dx_global_chunks_vars)
+
+  call define_AVS_DX_surfaces_data_adios(adios_group, &
+      myrank,prname,nspec,iboun, &
+      ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,npointot,&
+      rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+      ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+      RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+      RMIDDLE_CRUST,ROCEAN,iregion_code, &
+      group_size_inc, avs_dx_surface_vars)
+
+  !--- Open an ADIOS handler to the AVS_DX file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, ier);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, ier)
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  call prepare_AVS_DX_global_data_adios(adios_handle, myrank, &
+      nspec, ibool, idoubling, xstore, ystore, zstore, num_ibool_AVS_DX, &
+      mask_ibool, npointot, avs_dx_global_vars)
+  call write_AVS_DX_global_data_adios(adios_handle, myrank, &
+      sizeprocs, avs_dx_global_vars)
+
+  call prepare_AVS_DX_global_faces_data_adios (myrank, prname, nspec, &
+      iMPIcut_xi,iMPIcut_eta, &
+      ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+      npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+      ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+      RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+      RMIDDLE_CRUST,ROCEAN,iregion_code, &
+      avs_dx_global_faces_vars)
+  call write_AVS_DX_global_faces_data_adios(adios_handle, myrank, &
+      sizeprocs, avs_dx_global_faces_vars, ISOTROPIC_3D_MANTLE)
+
+  call prepare_AVS_DX_global_chunks_data_adios(myrank,prname,nspec, &
+      iboun,ibool, idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,&
+      npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+      ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+      RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+      RMIDDLE_CRUST,ROCEAN,iregion_code, &
+      avs_dx_global_chunks_vars)
+  call write_AVS_DX_global_chunks_data_adios(adios_handle, myrank, &
+      sizeprocs, avs_dx_global_chunks_vars, ISOTROPIC_3D_MANTLE)
+
+  call prepare_AVS_DX_surfaces_data_adios(myrank,prname,nspec,iboun, &
+      ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,npointot,&
+      rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+      ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+      RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+      RMIDDLE_CRUST,ROCEAN,iregion_code, &
+      avs_dx_surface_vars)
+  call write_AVS_DX_surfaces_data_adios(adios_handle, myrank, &
+      sizeprocs, avs_dx_surface_vars, ISOTROPIC_3D_MANTLE)
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_set_path (adios_handle, "", ier)
+  call adios_close(adios_handle, ier)
+
+  !--- Clean up temporary arrays -------------------------
+  call free_AVS_DX_global_data_adios(myrank, avs_dx_global_vars)
+  call free_AVS_DX_global_faces_data_adios(myrank, avs_dx_global_faces_vars, &
+      ISOTROPIC_3D_MANTLE)
+  call free_AVS_DX_global_chunks_data_adios(myrank, avs_dx_global_chunks_vars, &
+      ISOTROPIC_3D_MANTLE)
+  call free_AVS_DX_surfaces_data_adios(myrank, avs_dx_surface_vars, &
+      ISOTROPIC_3D_MANTLE)
+end subroutine crm_save_mesh_files_adios

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/finalize_mesher.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/finalize_mesher.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/finalize_mesher.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,178 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine finalize_mesher()
+
+  use meshfem3D_par
+  use meshfem3D_models_par
+
+  implicit none
+
+  ! local parameters
+  ! timing
+  double precision, external :: wtime
+
+  !--- print number of points and elements in the mesh for each region
+  if(myrank == 0) then
+
+    ! check volume of chunk
+    write(IMAIN,*)
+    write(IMAIN,*) 'calculated volume: ',volume_total
+    if(.not. TOPOGRAPHY) then
+      ! take the central cube into account
+      ! it is counted 6 times because of the fictitious elements
+      if(INCLUDE_CENTRAL_CUBE) then
+        write(IMAIN,*) '     exact volume: ', &
+          dble(NCHUNKS)*((4.0d0/3.0d0)*PI*(R_UNIT_SPHERE**3)+5.*(2.*(R_CENTRAL_CUBE/R_EARTH)/sqrt(3.))**3)/6.d0
+      else
+        write(IMAIN,*) '     exact volume: ', &
+          dble(NCHUNKS)*((4.0d0/3.0d0)*PI*(R_UNIT_SPHERE**3)-(2.*(R_CENTRAL_CUBE/R_EARTH)/sqrt(3.))**3)/6.d0
+      endif
+    endif
+
+    ! infos output
+    numelem_crust_mantle = NSPEC(IREGION_CRUST_MANTLE)
+    numelem_outer_core = NSPEC(IREGION_OUTER_CORE)
+    numelem_inner_core = NSPEC(IREGION_INNER_CORE)
+
+    numelem_total = numelem_crust_mantle + numelem_outer_core + numelem_inner_core
+
+    write(IMAIN,*)
+    write(IMAIN,*) 'Repartition of elements in regions:'
+    write(IMAIN,*) '----------------------------------'
+    write(IMAIN,*)
+    write(IMAIN,*) 'total number of elements in each slice: ',numelem_total
+    write(IMAIN,*)
+    write(IMAIN,*) ' - crust and mantle: ',sngl(100.d0*dble(numelem_crust_mantle)/dble(numelem_total)),' %'
+    write(IMAIN,*) ' - outer core: ',sngl(100.d0*dble(numelem_outer_core)/dble(numelem_total)),' %'
+    write(IMAIN,*) ' - inner core: ',sngl(100.d0*dble(numelem_inner_core)/dble(numelem_total)),' %'
+    write(IMAIN,*)
+    write(IMAIN,*) 'for some mesh statistics, see comments in file OUTPUT_FILES/values_from_mesher.h'
+    write(IMAIN,*)
+
+    ! load balancing
+    write(IMAIN,*) 'Load balancing = 100 % by definition'
+    write(IMAIN,*)
+
+    write(IMAIN,*)
+    write(IMAIN,*) 'total number of time steps in the solver will be: ',NSTEP
+    write(IMAIN,*)
+
+    write(IMAIN,*)
+    write(IMAIN,*) 'time-stepping of the solver will be: ',DT
+    write(IMAIN,*)
+
+    ! write information about precision used for floating-point operations
+    if(CUSTOM_REAL == SIZE_REAL) then
+      write(IMAIN,*) 'using single precision for the calculations'
+    else
+      write(IMAIN,*) 'using double precision for the calculations'
+    endif
+    write(IMAIN,*)
+    write(IMAIN,*) 'smallest and largest possible floating-point numbers are: ',tiny(1._CUSTOM_REAL),huge(1._CUSTOM_REAL)
+    write(IMAIN,*)
+
+    ! evaluate the amount of static memory needed by the solver
+    call memory_eval(OCEANS,ABSORBING_CONDITIONS,ATTENUATION,ANISOTROPIC_3D_MANTLE, &
+                   TRANSVERSE_ISOTROPY,ANISOTROPIC_INNER_CORE,ROTATION,TOPOGRAPHY, &
+                   ONE_CRUST,doubling_index,this_region_has_a_doubling,NCHUNKS, &
+                   ner,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                   ratio_sampling_array,NPROCTOT, &
+                   NSPEC,nglob,SIMULATION_TYPE,MOVIE_VOLUME,SAVE_FORWARD, &
+                   NSPECMAX_ANISO_IC,NSPECMAX_ISO_MANTLE,NSPECMAX_TISO_MANTLE, &
+                   NSPECMAX_ANISO_MANTLE,NSPEC_CRUST_MANTLE_ATTENUAT, &
+                   NSPEC_INNER_CORE_ATTENUATION, &
+                   NSPEC_CRUST_MANTLE_STR_OR_ATT,NSPEC_INNER_CORE_STR_OR_ATT, &
+                   NSPEC_CRUST_MANTLE_STR_AND_ATT,NSPEC_INNER_CORE_STR_AND_ATT, &
+                   NSPEC_CRUST_MANTLE_STRAIN_ONLY,NSPEC_INNER_CORE_STRAIN_ONLY, &
+                   NSPEC_CRUST_MANTLE_ADJOINT, &
+                   NSPEC_OUTER_CORE_ADJOINT,NSPEC_INNER_CORE_ADJOINT, &
+                   NGLOB_CRUST_MANTLE_ADJOINT,NGLOB_OUTER_CORE_ADJOINT, &
+                   NGLOB_INNER_CORE_ADJOINT,NSPEC_OUTER_CORE_ROT_ADJOINT, &
+                   NSPEC_CRUST_MANTLE_STACEY,NSPEC_OUTER_CORE_STACEY, &
+                   NGLOB_CRUST_MANTLE_OCEANS,NSPEC_OUTER_CORE_ROTATION, &
+                   NSPEC2D_BOTTOM,NSPEC2D_TOP, &
+                   static_memory_size)
+
+    ! create include file for the solver
+    call save_header_file(NSPEC,nglob,NEX_XI,NEX_ETA,NPROC,NPROCTOT, &
+                    TRANSVERSE_ISOTROPY,ANISOTROPIC_3D_MANTLE,ANISOTROPIC_INNER_CORE, &
+                    ELLIPTICITY,GRAVITY,ROTATION, &
+                    OCEANS,ATTENUATION,ATTENUATION_NEW,ATTENUATION_3D, &
+                    ANGULAR_WIDTH_XI_IN_DEGREES,ANGULAR_WIDTH_ETA_IN_DEGREES,NCHUNKS, &
+                    INCLUDE_CENTRAL_CUBE,CENTER_LONGITUDE_IN_DEGREES,&
+                    CENTER_LATITUDE_IN_DEGREES,GAMMA_ROTATION_AZIMUTH,NSOURCES,NSTEP, &
+                    static_memory_size, &
+                    NSPEC2D_TOP,NSPEC2D_BOTTOM, &
+                    NSPEC2DMAX_YMIN_YMAX,NSPEC2DMAX_XMIN_XMAX, &
+                    NPROC_XI,NPROC_ETA, &
+                    NSPECMAX_ANISO_IC,NSPECMAX_ISO_MANTLE,NSPECMAX_TISO_MANTLE, &
+                    NSPECMAX_ANISO_MANTLE,NSPEC_CRUST_MANTLE_ATTENUAT, &
+                    NSPEC_INNER_CORE_ATTENUATION, &
+                    NSPEC_CRUST_MANTLE_STR_OR_ATT,NSPEC_INNER_CORE_STR_OR_ATT, &
+                    NSPEC_CRUST_MANTLE_STR_AND_ATT,NSPEC_INNER_CORE_STR_AND_ATT, &
+                    NSPEC_CRUST_MANTLE_STRAIN_ONLY,NSPEC_INNER_CORE_STRAIN_ONLY, &
+                    NSPEC_CRUST_MANTLE_ADJOINT, &
+                    NSPEC_OUTER_CORE_ADJOINT,NSPEC_INNER_CORE_ADJOINT, &
+                    NGLOB_CRUST_MANTLE_ADJOINT,NGLOB_OUTER_CORE_ADJOINT, &
+                    NGLOB_INNER_CORE_ADJOINT,NSPEC_OUTER_CORE_ROT_ADJOINT, &
+                    NSPEC_CRUST_MANTLE_STACEY,NSPEC_OUTER_CORE_STACEY, &
+                    NGLOB_CRUST_MANTLE_OCEANS,NSPEC_OUTER_CORE_ROTATION, &
+                    SIMULATION_TYPE,SAVE_FORWARD,MOVIE_VOLUME)
+
+  endif   ! end of section executed by main process only
+
+  ! deallocate arrays used for mesh generation
+  deallocate(addressing)
+  deallocate(ichunk_slice)
+  deallocate(iproc_xi_slice)
+  deallocate(iproc_eta_slice)
+
+  ! elapsed time since beginning of mesh generation
+  if(myrank == 0) then
+    tCPU = wtime() - time_start
+    write(IMAIN,*)
+    write(IMAIN,*) 'Elapsed time for mesh generation and buffer creation in seconds = ',tCPU
+    write(IMAIN,"(' Elapsed time for mesh generation and buffer creation in hh:mm:ss = ',i4,' h ',i2.2,' m ',i2.2,' s')") &
+              int(tCPU/3600),int( (tCPU - int(tCPU/3600)*3600)/60 ),int(tCPU - int(tCPU/60) * 60)
+    write(IMAIN,*)
+    write(IMAIN,*) 'End of mesh generation'
+    write(IMAIN,*)
+    ! close main output file
+    close(IMAIN)
+  endif
+
+  ! synchronize all the processes to make sure everybody has finished
+  call sync_all()
+
+  if (ADIOS_ENABLED) then
+    call adios_cleanup()
+  endif
+
+  end subroutine finalize_mesher
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/fix_non_blocking_flags.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/fix_non_blocking_flags.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/fix_non_blocking_flags.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,184 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+! fix the non blocking arrays to assemble the slices inside each chunk: elements
+! in contact with the MPI faces by an edge or a corner only but not
+! a full face are missing, therefore let us add them
+  subroutine fix_non_blocking_slices(is_on_a_slice_edge,iboolright_xi, &
+         iboolleft_xi,iboolright_eta,iboolleft_eta, &
+         npoin2D_xi,npoin2D_eta,ibool, &
+         nspec,nglob,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX)
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: nspec,nglob,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
+
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
+
+  logical, dimension(nspec) :: is_on_a_slice_edge
+
+  integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi,iboolright_xi
+  integer, dimension(NGLOB2DMAX_YMIN_YMAX) :: iboolleft_eta,iboolright_eta
+
+  integer, dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+
+  ! local parameters
+  logical, dimension(nglob) :: mask_ibool
+  integer :: ipoin,ispec,i,j,k
+
+! clean the mask
+  mask_ibool(:) = .false.
+
+! mark all the points that are in the MPI buffers to assemble inside each chunk
+  do ipoin = 1,npoin2D_xi(1)
+    mask_ibool(iboolleft_xi(ipoin)) = .true.
+  enddo
+
+  do ipoin = 1,npoin2D_eta(1)
+    mask_ibool(iboolleft_eta(ipoin)) = .true.
+  enddo
+
+  do ipoin = 1,npoin2D_xi(2)
+    mask_ibool(iboolright_xi(ipoin)) = .true.
+  enddo
+
+  do ipoin = 1,npoin2D_eta(2)
+    mask_ibool(iboolright_eta(ipoin)) = .true.
+  enddo
+
+! now label all the elements that have at least one corner belonging
+! to any of these buffers as elements that must contribute to the
+! first step of the calculations (performed on the edges before starting
+! the non blocking communications); there is no need to examine the inside
+! of the elements, checking their eight corners is sufficient
+  do ispec = 1,nspec
+    do k = 1,NGLLZ,NGLLZ-1
+      do j  = 1,NGLLY,NGLLY-1
+        do i = 1,NGLLX,NGLLX-1
+          if(mask_ibool(ibool(i,j,k,ispec))) then
+            is_on_a_slice_edge(ispec) = .true.
+            goto 888
+          endif
+        enddo
+      enddo
+    enddo
+  888 continue
+  enddo
+
+  end subroutine fix_non_blocking_slices
+
+!-------------------------------------------------------------------------------------------------
+!-------------------------------------------------------------------------------------------------
+!-------------------------------------------------------------------------------------------------
+
+! fix the non blocking arrays to assemble the central cube: elements
+! in contact with the MPI faces by an edge or a corner only but not
+! a full face are missing, therefore let us add them
+  subroutine fix_non_blocking_central_cube(is_on_a_slice_edge, &
+         ibool,nspec,nglob,nb_msgs_theor_in_cube,ibelm_bottom_inner_core, &
+         idoubling_inner_core,npoin2D_cube_from_slices, &
+         ibool_central_cube,NSPEC2D_BOTTOM_INNER_CORE, &
+         ichunk,NPROC_XI)
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: nspec,nglob,nb_msgs_theor_in_cube,NSPEC2D_BOTTOM_INNER_CORE
+  integer :: ichunk,npoin2D_cube_from_slices,NPROC_XI
+
+  logical, dimension(nspec) :: is_on_a_slice_edge
+
+  integer, dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+
+  integer, dimension(nb_msgs_theor_in_cube,npoin2D_cube_from_slices) :: ibool_central_cube
+
+  integer, dimension(NSPEC2D_BOTTOM_INNER_CORE) :: ibelm_bottom_inner_core
+
+  integer, dimension(nspec) :: idoubling_inner_core
+
+  ! local parameters
+  logical, dimension(nglob) :: mask_ibool
+  integer :: ipoin,ispec,i,j,k,imsg,ispec2D
+
+  if(ichunk /= CHUNK_AB .and. ichunk /= CHUNK_AB_ANTIPODE) then
+    do ispec2D = 1,NSPEC2D_BOTTOM_INNER_CORE
+      ispec = ibelm_bottom_inner_core(ispec2D)
+      is_on_a_slice_edge(ispec) = .true.
+    enddo
+  endif
+
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+    do ispec = 1,nspec
+      if(idoubling_inner_core(ispec) == IFLAG_BOTTOM_CENTRAL_CUBE .or. &
+         idoubling_inner_core(ispec) == IFLAG_TOP_CENTRAL_CUBE) &
+        is_on_a_slice_edge(ispec) = .true.
+    enddo
+  endif
+
+  if(ichunk == CHUNK_AB .or. ichunk == CHUNK_AB_ANTIPODE) then
+
+    ! clean the mask
+    mask_ibool(:) = .false.
+
+    do imsg = 1,nb_msgs_theor_in_cube
+      do ipoin = 1,npoin2D_cube_from_slices
+        if(NPROC_XI==1) then
+          if(ibool_central_cube(imsg,ipoin) > 0 ) then
+            mask_ibool(ibool_central_cube(imsg,ipoin)) = .true.
+          endif
+        else
+          mask_ibool(ibool_central_cube(imsg,ipoin)) = .true.
+        endif
+      enddo
+    enddo
+
+    ! now label all the elements that have at least one corner belonging
+    ! to any of these buffers as elements that must contribute to the
+    ! first step of the calculations (performed on the edges before starting
+    ! the non blocking communications); there is no need to examine the inside
+    ! of the elements, checking their eight corners is sufficient
+    do ispec = 1,nspec
+      do k = 1,NGLLZ,NGLLZ-1
+        do j  = 1,NGLLY,NGLLY-1
+          do i = 1,NGLLX,NGLLX-1
+            if(mask_ibool(ibool(i,j,k,ispec))) then
+              is_on_a_slice_edge(ispec) = .true.
+              goto 888
+            endif
+          enddo
+        enddo
+      enddo
+    888 continue
+    enddo
+
+  endif
+
+  end subroutine fix_non_blocking_central_cube
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_MPI_interfaces.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_MPI_interfaces.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,746 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine get_MPI_interfaces(myrank,NGLOB,NSPEC, &
+                                    test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+                                    num_interfaces,max_nibool_interfaces, &
+                                    max_nibool,MAX_NEIGHBOURS, &
+                                    ibool,&
+                                    is_on_a_slice_edge, &
+                                    IREGION,add_central_cube,idoubling,INCLUDE_CENTRAL_CUBE, &
+                                    xstore,ystore,zstore,NPROCTOT)
+
+  use constants,only: CUSTOM_REAL,NGLLX,NGLLY,NGLLZ,IREGION_INNER_CORE,IFLAG_IN_FICTITIOUS_CUBE
+  implicit none
+
+  integer,intent(in) :: myrank,NGLOB,NSPEC
+
+  real(kind=CUSTOM_REAL),dimension(NGLOB),intent(in) :: test_flag
+
+  integer,intent(in) :: max_nibool
+  integer,intent(in) :: MAX_NEIGHBOURS
+  integer, dimension(MAX_NEIGHBOURS),intent(inout) :: my_neighbours,nibool_neighbours
+  integer, dimension(max_nibool,MAX_NEIGHBOURS),intent(inout) :: ibool_neighbours
+
+  integer,intent(inout) :: num_interfaces,max_nibool_interfaces
+
+  integer,dimension(NGLLX,NGLLY,NGLLZ,NSPEC),intent(in) :: ibool
+
+  logical,dimension(NSPEC),intent(inout) :: is_on_a_slice_edge
+
+  integer,intent(in) :: IREGION
+  logical,intent(in) :: add_central_cube
+  integer,dimension(NSPEC),intent(in) :: idoubling
+
+  logical,intent(in) :: INCLUDE_CENTRAL_CUBE
+
+  real(kind=CUSTOM_REAL),dimension(NGLOB),intent(in) :: xstore,ystore,zstore
+
+  integer :: NPROCTOT
+
+  ! local parameters
+  integer :: ispec,iglob,j,k
+  integer :: iface,iedge,icorner
+  integer :: ii,iinterface,icurrent,rank
+  integer :: npoin
+  logical :: is_done,ispec_is_outer
+  integer,dimension(NGLOB) :: work_test_flag
+  logical,dimension(NSPEC) :: work_ispec_is_outer
+
+  integer,parameter :: MID = (NGLLX+1)/2
+
+  ! initializes
+  if( add_central_cube) then
+    ! adds points to existing inner_core interfaces
+    iinterface = num_interfaces
+    work_ispec_is_outer(:) = is_on_a_slice_edge(:)
+  else
+    ! creates new interfaces
+    iinterface = 0
+    num_interfaces = 0
+    max_nibool_interfaces = 0
+    my_neighbours(:) = -1
+    nibool_neighbours(:) = 0
+    ibool_neighbours(:,:) = 0
+    work_ispec_is_outer(:) = .false.
+  endif
+
+  ! makes working copy (converted to nearest integers)
+  work_test_flag(:) = nint( test_flag(:) )
+
+  ! loops over all elements
+  do ispec = 1,NSPEC
+
+    ! exclude elements in inner part of slice
+    !if( .not. is_on_a_slice_edge(ispec) ) cycle
+
+    ! exclude elements in fictitious core
+    if( IREGION == IREGION_INNER_CORE) then
+      if( idoubling(ispec) == IFLAG_IN_FICTITIOUS_CUBE ) cycle
+    endif
+
+    ! sets flag if element has global points shared with other processes
+    ispec_is_outer = .false.
+
+    ! 1. finds neighbours which share a whole face with this process
+    ! (faces are shared only with 1 other neighbour process)
+
+    ! loops over all faces of element
+    do iface = 1, 6
+
+      ! chooses a point inside face
+      select case( iface )
+      case( 1 )
+        ! face I == 1
+        iglob = ibool(1,MID,MID,ispec)
+      case( 2 )
+        ! face I == NGLLX
+        iglob = ibool(NGLLX,MID,MID,ispec)
+      case( 3 )
+        ! face J == 1
+        iglob = ibool(MID,1,MID,ispec)
+      case( 4 )
+        ! face J == NGLLY
+        iglob = ibool(MID,NGLLY,MID,ispec)
+      case( 5 )
+        ! face K == 1
+        iglob = ibool(MID,MID,1,ispec)
+      case( 6 )
+        ! face K == NGLLZ
+        iglob = ibool(MID,MID,NGLLZ,ispec)
+      end select
+
+      ! checks assembled flag on global point
+      if( work_test_flag(iglob) > 0 ) then
+        ispec_is_outer = .true.
+
+        ! rank of neighbor process
+        rank = work_test_flag(iglob) - 1
+
+        ! checks ranks range
+        if( rank < 0 .or. rank >= NPROCTOT ) then
+          print*,'error face rank: ',myrank,'ispec=',ispec
+          print*,'  neighbor rank = ',rank,'exceeds total nproc:',NPROCTOT
+          print*,'  face ',iface
+          call exit_mpi(myrank,'error face neighbor mpi rank')
+        endif
+
+        ! checks if already stored
+        icurrent = 0
+        is_done = .false.
+        do ii = 1,iinterface
+          if( rank == my_neighbours(ii) ) then
+            icurrent = ii
+            is_done = .true.
+            exit
+          endif
+        enddo
+
+        ! updates interfaces array
+        if( .not. is_done ) then
+          iinterface = iinterface + 1
+          if( iinterface > MAX_NEIGHBOURS ) then
+            print*,'error interfaces rank:',myrank,'iinterface = ',iinterface,MAX_NEIGHBOURS
+            call exit_mpi(myrank,'interface face exceeds MAX_NEIGHBOURS range')
+          endif
+          ! adds as neighbor new interface
+          my_neighbours(iinterface) = rank
+          icurrent = iinterface
+        endif
+        if( icurrent == 0 ) &
+          call exit_mpi(myrank,'could not find current interface for this neighbor, please check my_neighbours')
+
+        ! adds interface points and removes neighbor flag from face
+        ! assumes NGLLX == NGLLY == NGLLZ
+        do k=1,NGLLX
+          do j=1,NGLLX
+            select case( iface )
+            case( 1 )
+              ! face I == 1
+              iglob = ibool(1,j,k,ispec)
+            case( 2 )
+              ! face I == NGLLX
+              iglob = ibool(NGLLX,j,k,ispec)
+            case( 3 )
+              ! face J == 1
+              iglob = ibool(j,1,k,ispec)
+            case( 4 )
+              ! face J == NGLLY
+              iglob = ibool(j,NGLLY,k,ispec)
+            case( 5 )
+              ! face K == 1
+              iglob = ibool(j,k,1,ispec)
+            case( 6 )
+              ! face K == NGLLZ
+              iglob = ibool(j,k,NGLLZ,ispec)
+            end select
+
+            ! checks that we take each global point (on edges and corners) only once
+            call add_interface_point(iglob,rank,icurrent, &
+                                        nibool_neighbours,MAX_NEIGHBOURS, &
+                                        ibool_neighbours,max_nibool, &
+                                        work_test_flag,NGLOB,myrank, &
+                                        .true.,add_central_cube)
+            ! debug
+            if( work_test_flag(iglob) < 0 ) then
+              if( IREGION == IREGION_INNER_CORE .and. INCLUDE_CENTRAL_CUBE ) then
+                ! we might have missed an interface point on an edge, just re-set to missing value
+                print*,'warning face flag:',myrank,'ispec=',ispec,'rank=',rank
+                print*,'  flag=',work_test_flag(iglob),'iface jk=',iface,j,k,'missed iglob=',iglob
+                !work_test_flag(iglob) = 0
+              else
+                print*,'error face flag:',myrank,'ispec=',ispec,'rank=',rank
+                print*,'  flag=',work_test_flag(iglob),'iface jk=',iface,j,k,'iglob=',iglob
+                call exit_mpi(myrank,'error face flag')
+              endif
+            endif
+
+          enddo
+        enddo
+      endif
+    enddo ! iface
+
+    ! 2. finds neighbours which share a single edge with this process
+    ! note: by now, faces have subtracted their neighbours, edges can hold only one more process info
+
+    ! loops over all edges of element
+    do iedge = 1, 12
+
+      ! chooses a point inside edge but not corner
+      select case( iedge )
+      case( 1 )
+        ! face I == 1, J == 1
+        iglob = ibool(1,1,MID,ispec)
+      case( 2 )
+        ! face I == 1, J == NGLLY
+        iglob = ibool(1,NGLLY,MID,ispec)
+      case( 3 )
+        ! face I == 1, K == 1
+        iglob = ibool(1,MID,1,ispec)
+      case( 4 )
+        ! face I == 1, K == NGLLZ
+        iglob = ibool(1,MID,NGLLZ,ispec)
+      case( 5 )
+        ! face I == NGLLX, J == 1
+        iglob = ibool(NGLLX,1,MID,ispec)
+      case( 6 )
+        ! face I == NGLLX, J == NGLLY
+        iglob = ibool(NGLLX,NGLLY,MID,ispec)
+      case( 7 )
+        ! face I == NGLLX, K == 1
+        iglob = ibool(NGLLX,MID,1,ispec)
+      case( 8 )
+        ! face I == NGLLX, K == NGLLZ
+        iglob = ibool(NGLLX,MID,NGLLZ,ispec)
+      case( 9 )
+        ! face J == 1, K == 1
+        iglob = ibool(MID,1,1,ispec)
+      case( 10 )
+        ! face J == 1, K == NGLLZ
+        iglob = ibool(MID,1,NGLLZ,ispec)
+      case( 11 )
+        ! face J == NGLLY, K == 1
+        iglob = ibool(MID,NGLLY,1,ispec)
+      case( 12 )
+        ! face J == NGLLY, K == NGLLZ
+        iglob = ibool(MID,NGLLY,NGLLZ,ispec)
+      end select
+
+      ! checks assembled flag on global point
+      if( work_test_flag(iglob) > 0 ) then
+        ispec_is_outer = .true.
+
+        ! rank of neighbor process
+        rank = work_test_flag(iglob) - 1
+
+        ! checks ranks range
+        if( rank < 0 .or. rank >= NPROCTOT ) then
+          print*,'error egde rank: ',myrank
+          print*,'  neighbor rank = ',rank,'exceeds total nproc:',NPROCTOT
+          print*,'  edge ',iedge
+          call exit_mpi(myrank,'error edge neighbor mpi rank')
+        endif
+
+        ! checks if already stored
+        icurrent = 0
+        is_done = .false.
+        do ii = 1,iinterface
+          if( rank == my_neighbours(ii) ) then
+            icurrent = ii
+            is_done = .true.
+            exit
+          endif
+        enddo
+
+        ! updates interfaces array
+        if( .not. is_done ) then
+          iinterface = iinterface + 1
+          if( iinterface > MAX_NEIGHBOURS ) then
+            print*,'error interfaces rank:',myrank,'iinterface = ',iinterface,MAX_NEIGHBOURS
+            call exit_mpi(myrank,'interface edge exceeds MAX_NEIGHBOURS range')
+          endif
+          ! adds as neighbor new interface
+          my_neighbours(iinterface) = rank
+          icurrent = iinterface
+        endif
+        if( icurrent == 0 ) &
+          call exit_mpi(myrank,'could not find current interface for this neighbor, please check my_neighbours')
+
+        ! adds interface points and removes neighbor flag from edge
+        ! assumes NGLLX == NGLLY == NGLLZ
+        do k = 1,NGLLX
+          select case( iedge )
+          case( 1 )
+            ! face I == 1, J == 1
+            iglob = ibool(1,1,k,ispec)
+          case( 2 )
+            ! face I == 1, J == NGLLY
+            iglob = ibool(1,NGLLY,k,ispec)
+          case( 3 )
+            ! face I == 1, K == 1
+            iglob = ibool(1,k,1,ispec)
+          case( 4 )
+            ! face I == 1, K == NGLLZ
+            iglob = ibool(1,k,NGLLZ,ispec)
+          case( 5 )
+            ! face I == NGLLX, J == 1
+            iglob = ibool(NGLLX,1,k,ispec)
+          case( 6 )
+            ! face I == NGLLX, J == NGLLY
+            iglob = ibool(NGLLX,NGLLY,k,ispec)
+          case( 7 )
+            ! face I == NGLLX, K == 1
+            iglob = ibool(NGLLX,k,1,ispec)
+          case( 8 )
+            ! face I == NGLLX, K == NGLLZ
+            iglob = ibool(NGLLX,k,NGLLZ,ispec)
+          case( 9 )
+            ! face J == 1, K == 1
+            iglob = ibool(k,1,1,ispec)
+          case( 10 )
+            ! face J == 1, K == NGLLZ
+            iglob = ibool(k,1,NGLLZ,ispec)
+          case( 11 )
+            ! face J == NGLLY, K == 1
+            iglob = ibool(k,NGLLY,1,ispec)
+          case( 12 )
+            ! face J == NGLLY, K == NGLLZ
+            iglob = ibool(k,NGLLY,NGLLZ,ispec)
+          end select
+
+          ! checks that we take each global point (on edges and corners) only once
+          call add_interface_point(iglob,rank,icurrent, &
+                                        nibool_neighbours,MAX_NEIGHBOURS, &
+                                        ibool_neighbours,max_nibool, &
+                                        work_test_flag,NGLOB,myrank, &
+                                        .true.,add_central_cube)
+
+          ! debug
+          if( work_test_flag(iglob) < 0 ) then
+            if( IREGION == IREGION_INNER_CORE .and. INCLUDE_CENTRAL_CUBE ) then
+              ! we might have missed an interface point on an edge, just re-set to missing value
+              print*,'warning edge flag:',myrank,'ispec=',ispec,'rank=',rank
+              print*,'  flag=',work_test_flag(iglob),'iedge jk=',iedge,k,'missed iglob=',iglob
+              !work_test_flag(iglob) = 0
+            else
+              print*,'error edge flag:',myrank,'ispec=',ispec,'rank=',rank
+              print*,'  flag=',work_test_flag(iglob),'iedge jk=',iedge,k,'iglob=',iglob
+              call exit_mpi(myrank,'error edge flag')
+            endif
+          endif
+
+        enddo
+      endif
+    enddo ! iedge
+
+
+    ! 3. finds neighbours which share a single corner with this process
+    ! note: faces and edges have subtracted their neighbors, only one more process left possible
+
+    ! loops over all corners of element
+    do icorner = 1, 8
+
+      ! chooses a corner point
+      select case( icorner )
+      case( 1 )
+        ! face I == 1
+        iglob = ibool(1,1,1,ispec)
+      case( 2 )
+        ! face I == 1
+        iglob = ibool(1,NGLLY,1,ispec)
+      case( 3 )
+        ! face I == 1
+        iglob = ibool(1,1,NGLLZ,ispec)
+      case( 4 )
+        ! face I == 1
+        iglob = ibool(1,NGLLY,NGLLZ,ispec)
+      case( 5 )
+        ! face I == NGLLX
+        iglob = ibool(NGLLX,1,1,ispec)
+      case( 6 )
+        ! face I == NGLLX
+        iglob = ibool(NGLLX,NGLLY,1,ispec)
+      case( 7 )
+        ! face I == NGLLX
+        iglob = ibool(NGLLX,1,NGLLZ,ispec)
+      case( 8 )
+        ! face I == NGLLX
+        iglob = ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      end select
+
+      ! makes sure that all elements on mpi interfaces are included
+      ! uses original test_flag array, since the working copy reduces values
+      ! note: there can be elements which have an edge or corner shared with
+      !          other mpi partitions, but have the work_test_flag value already set to zero
+      !          since the iglob point was found before.
+      !          also, this check here would suffice to determine the outer flag, but we also include the
+      !          check everywhere we encounter it too
+      if( test_flag(iglob) > 0.5 ) then
+        ispec_is_outer = .true.
+      endif
+
+      ! checks assembled flag on global point
+      if( work_test_flag(iglob) > 0 ) then
+        ispec_is_outer = .true.
+
+        ! rank of neighbor process
+        rank = work_test_flag(iglob) - 1
+
+        ! checks ranks range
+        if( rank < 0 .or. rank >= NPROCTOT ) then
+          print*,'error corner: ',myrank
+          print*,'  neighbor rank = ',rank,'exceeds total nproc:',NPROCTOT
+          print*,'  corner ',icorner
+          call exit_mpi(myrank,'error corner neighbor mpi rank')
+        endif
+
+        ! checks if already stored
+        icurrent = 0
+        is_done = .false.
+        do ii = 1,iinterface
+          if( rank == my_neighbours(ii) ) then
+            icurrent = ii
+            is_done = .true.
+            exit
+          endif
+        enddo
+
+        ! updates interfaces array
+        if( .not. is_done ) then
+          iinterface = iinterface + 1
+          if( iinterface > MAX_NEIGHBOURS ) then
+            print*,'error interfaces rank:',myrank,'iinterface = ',iinterface,MAX_NEIGHBOURS
+            call exit_mpi(myrank,'interface corner exceed MAX_NEIGHBOURS range')
+          endif
+          ! adds as neighbor new interface
+          my_neighbours(iinterface) = rank
+          icurrent = iinterface
+        endif
+        if( icurrent == 0 ) &
+          call exit_mpi(myrank,'could not find current interface for this neighbor, please check my_neighbours')
+
+        ! adds this corner as interface point and removes neighbor flag from face,
+        ! checks that we take each global point (on edges and corners) only once
+        call add_interface_point(iglob,rank,icurrent, &
+                                    nibool_neighbours,MAX_NEIGHBOURS, &
+                                    ibool_neighbours,max_nibool, &
+                                    work_test_flag,NGLOB,myrank, &
+                                    .false.,add_central_cube)
+
+        ! debug
+        if( work_test_flag(iglob) < 0 ) call exit_mpi(myrank,'error corner flag')
+
+      endif
+
+    enddo ! icorner
+
+    ! stores flags for outer elements when recognized as such
+    ! (inner/outer elements separated for non-blocking mpi communications)
+    if( ispec_is_outer ) then
+      work_ispec_is_outer(ispec) = .true.
+    endif
+
+  enddo
+
+  ! number of outer elements (on MPI interfaces)
+  npoin = count( work_ispec_is_outer )
+
+  ! debug: user output
+  if( add_central_cube ) then
+    print*, 'rank',myrank,'interfaces : ',iinterface
+    do j=1,iinterface
+      print*, '  my_neighbours: ',my_neighbours(j),nibool_neighbours(j)
+    enddo
+    print*, '  test flag min/max: ',minval(work_test_flag),maxval(work_test_flag)
+    print*, '  outer elements: ',npoin
+    print*
+  endif
+
+  ! checks if all points were recognized
+  if( minval(work_test_flag) < 0 .or. maxval(work_test_flag) > 0 ) then
+    print*,'error mpi interface rank: ',myrank
+    print*,'  work_test_flag min/max :',minval(work_test_flag),maxval(work_test_flag)
+    call exit_mpi(myrank,'error: mpi points remain unrecognized, please check mesh interfaces')
+  endif
+
+  ! sets interfaces infos
+  num_interfaces = iinterface
+  max_nibool_interfaces = maxval( nibool_neighbours(1:num_interfaces) )
+
+  ! checks if unique set of neighbours
+  do ii = 1,num_interfaces-1
+    rank = my_neighbours(ii)
+    do j = ii+1,num_interfaces
+      if( rank == my_neighbours(j) ) then
+        print*,'test MPI: rank ',myrank,'my_neighbours:',rank,my_neighbours(j),'interfaces:',ii,j
+        call exit_mpi(myrank,'error test my_neighbours not unique')
+      endif
+    enddo
+  enddo
+
+  ! sorts buffers obtained to be conforming with neighbors in other slices
+  do iinterface = 1,num_interfaces
+    ! sorts ibool values in increasing order
+    ! used to check if we have duplicates in array
+    npoin = nibool_neighbours(iinterface)
+    call heap_sort( npoin, ibool_neighbours(1:npoin,iinterface) )
+
+    ! checks if unique set of iglob values
+    do j=1,npoin-1
+      if( ibool_neighbours(j,iinterface) == ibool_neighbours(j+1,iinterface) ) then
+        if( IREGION == IREGION_INNER_CORE .and. INCLUDE_CENTRAL_CUBE ) then
+          ! missing points might have been counted more than once
+          if( ibool_neighbours(j,iinterface) > 0 ) then
+            print*,'warning mpi interface rank:',myrank
+            print*,'  interface: ',my_neighbours(iinterface),'point: ',j,'of',npoin,'iglob=',ibool_neighbours(j,iinterface)
+            ! decrease number of points
+            nibool_neighbours(iinterface) = nibool_neighbours(iinterface) - 1
+            if( nibool_neighbours(iinterface) <= 0 ) then
+              print*,'error zero mpi interface rank:',myrank,'interface=',my_neighbours(iinterface)
+              call exit_mpi(myrank,'error: zero mpi points on interface')
+            endif
+            ! shift values
+            do k = j+1,npoin-1
+              ii = ibool_neighbours(k+1,iinterface)
+              ibool_neighbours(k,iinterface) = ii
+            enddo
+            ! re-sets values
+            ibool_neighbours(npoin,iinterface) = 0
+            npoin = nibool_neighbours(iinterface)
+            max_nibool_interfaces = maxval( nibool_neighbours(1:num_interfaces) )
+          endif
+        else
+          print*,'error mpi interface rank:',myrank
+          print*,'  interface: ',my_neighbours(iinterface),'point: ',j,'of',npoin,'iglob=',ibool_neighbours(j,iinterface)
+          call exit_mpi(myrank,'error: mpi points not unique on interface')
+        endif
+      endif
+    enddo
+
+    ! sort buffer obtained to be conforming with neighbor in other chunk
+    npoin = nibool_neighbours(iinterface)
+    call sort_MPI_interface(myrank,npoin,ibool_neighbours(1:npoin,iinterface), &
+                                NGLOB,xstore,ystore,zstore)
+
+  enddo
+
+  ! re-sets flags for outer elements
+  is_on_a_slice_edge(:) = work_ispec_is_outer(:)
+
+  end subroutine get_MPI_interfaces
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine sort_MPI_interface(myrank,npoin,ibool_n, &
+                                    NGLOB,xstore,ystore,zstore)
+
+  use constants,only: CUSTOM_REAL,SIZE_REAL
+
+  implicit none
+
+  integer,intent(in) :: myrank,npoin
+  integer,dimension(npoin),intent(inout) :: ibool_n
+
+  integer,intent(in) :: NGLOB
+  real(kind=CUSTOM_REAL), dimension(NGLOB) :: xstore,ystore,zstore
+
+  ! local parameters
+  ! arrays for sorting routine
+  double precision, dimension(:), allocatable :: work
+  double precision, dimension(:), allocatable :: xstore_selected,ystore_selected,zstore_selected
+  integer, dimension(:), allocatable :: ibool_selected
+  integer, dimension(:), allocatable :: ind,ninseg,iglob,locval,iwork
+  logical, dimension(:), allocatable :: ifseg
+  integer :: nglob_selected,i,ipoin,ier
+
+  ! allocate arrays for buffers with maximum size
+  allocate(ibool_selected(npoin), &
+          xstore_selected(npoin), &
+          ystore_selected(npoin), &
+          zstore_selected(npoin), &
+          ind(npoin), &
+          ninseg(npoin), &
+          iglob(npoin), &
+          locval(npoin), &
+          ifseg(npoin), &
+          iwork(npoin), &
+          work(npoin),stat=ier)
+  if( ier /= 0 ) call exit_MPI(myrank,'error sort MPI interface: allocating temporary sorting arrays')
+
+  ! sets up working arrays
+  do i=1,npoin
+    ipoin = ibool_n(i)
+
+    ibool_selected(i) = ipoin
+
+    if( CUSTOM_REAL == SIZE_REAL ) then
+      xstore_selected(i) = dble(xstore(ipoin))
+      ystore_selected(i) = dble(ystore(ipoin))
+      zstore_selected(i) = dble(zstore(ipoin))
+    else
+      xstore_selected(i) = xstore(ipoin)
+      ystore_selected(i) = ystore(ipoin)
+      zstore_selected(i) = zstore(ipoin)
+    endif
+  enddo
+
+  ! sort buffer obtained to be conforming with neighbor in other chunk
+  ! sort on x, y and z, the other arrays will be swapped as well
+  call sort_array_coordinates(npoin,xstore_selected,ystore_selected,zstore_selected, &
+                             ibool_selected,iglob,locval,ifseg,nglob_selected, &
+                             ind,ninseg,iwork,work)
+
+  ! check that no duplicate has been detected
+  if(nglob_selected /= npoin) call exit_MPI(myrank,'error sort MPI interface: duplicates detected in buffer')
+
+  ! stores new ibool ordering
+  ibool_n(1:npoin) = ibool_selected(1:npoin)
+
+  ! frees array memory
+  deallocate(ibool_selected,xstore_selected,ystore_selected,zstore_selected, &
+            ind,ninseg,iglob,locval,ifseg,iwork,work)
+
+
+  end subroutine sort_MPI_interface
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine add_interface_point(iglob,rank,icurrent, &
+                                    nibool_neighbours,MAX_NEIGHBOURS, &
+                                    ibool_neighbours,max_nibool, &
+                                    work_test_flag,NGLOB,myrank, &
+                                    is_face_edge,add_central_cube)
+
+
+  implicit none
+
+  integer,intent(in) :: iglob,rank,icurrent
+  integer,intent(in) :: myrank
+
+  integer,intent(in) :: MAX_NEIGHBOURS,max_nibool
+  integer, dimension(MAX_NEIGHBOURS),intent(inout) :: nibool_neighbours
+  integer, dimension(max_nibool,MAX_NEIGHBOURS),intent(inout) :: ibool_neighbours
+
+  integer,intent(in) :: NGLOB
+  integer,dimension(NGLOB) :: work_test_flag
+
+  logical,intent(in) :: is_face_edge,add_central_cube
+
+  ! local parameters
+  integer :: i
+  logical :: is_done
+
+  ! let's check and be sure for central cube
+  !if( work_test_flag(iglob) <= 0 ) cycle ! continues to next point
+
+  ! checks that we take each global point (on edges and corners) only once
+  is_done = .false.
+  do i=1,nibool_neighbours(icurrent)
+    if( ibool_neighbours(i,icurrent) == iglob ) then
+      is_done = .true.
+      exit
+    endif
+  enddo
+
+  ! checks if anything to do
+  if( is_done ) then
+    ! special handling for central cube: removes rank if already added in inner core
+    if( add_central_cube ) then
+      if( is_face_edge .and. work_test_flag(iglob) < (rank + 1) ) then
+        ! re-sets if we missed this rank number
+        work_test_flag(iglob) = work_test_flag(iglob) + (rank + 1)
+      endif
+      ! re-sets flag
+      work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )
+      if( is_face_edge .and. work_test_flag(iglob) < 0 ) then
+        ! re-sets to zero if we missed this rank number
+        if( work_test_flag(iglob) == - (rank + 1 ) ) work_test_flag(iglob) = 0
+      endif
+    endif
+    return
+  endif
+
+  ! checks if flag was set correctly
+  if( work_test_flag(iglob) <= 0 ) then
+    ! we might have missed an interface point on an edge, just re-set to missing value
+    print*,'warning ',myrank,' flag: missed rank=',rank
+    print*,'  flag=',work_test_flag(iglob),'missed iglob=',iglob,'interface=',icurrent
+    print*
+  endif
+  ! we might have missed an interface point on an edge, just re-set to missing value
+  if( is_face_edge ) then
+    if( work_test_flag(iglob) < (rank + 1) ) then
+      ! re-sets if we missed this rank number
+      work_test_flag(iglob) = work_test_flag(iglob) + (rank + 1)
+    endif
+  endif
+
+  ! adds point
+  ! increases number of total points on this interface
+  nibool_neighbours(icurrent) = nibool_neighbours(icurrent) + 1
+  if( nibool_neighbours(icurrent) > max_nibool) &
+      call exit_mpi(myrank,'interface face exceeds max_nibool range')
+
+  ! stores interface iglob index
+  ibool_neighbours( nibool_neighbours(icurrent),icurrent ) = iglob
+
+  ! re-sets flag
+  work_test_flag(iglob) = work_test_flag(iglob) - ( rank + 1 )
+
+  ! checks
+  if( is_face_edge .and. work_test_flag(iglob) < 0 ) then
+    ! re-sets to zero if we missed this rank number
+    if( work_test_flag(iglob) == - (rank + 1 ) ) work_test_flag(iglob) = 0
+  endif
+
+  end subroutine add_interface_point
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_absorb_adios.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_absorb_adios.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/get_absorb_adios.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,161 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+!-------------------------------------------------------------------------------
+!> \file get_absorb_adios.f90
+!! \brief Function to write stacey boundary condition to disk with ADIOS.
+!! \author MPBL      
+!-------------------------------------------------------------------------------
+
+!===============================================================================
+!> \brief Write stacey boundary conditions to a single file using ADIOS 
+!!
+!! \param myrank The MPI rank of the current process
+!! \param iregion The region the absorbing conditon is written for. Check
+!!                constant.h files to see what these regions are.
+!! \param nimin An array to be written
+!! \param nimax An array to be written
+!! \param njmin An array to be written
+!! \param njmax An array to be written
+!! \param nkmin_xi An array to be written
+!! \param nkmin_eta An array to be written
+!! \param NSPEC2DMAX_XMIN_XMAX Integer to compute the size of the arrays
+!!                             in argument
+!! \param NSPEC2DMAX_YMIN_YMAX Integer to compute the size of the arrays
+!!                             in argument
+!! 
+!! \note This routine only call adios to write the file to disk, Note that he
+!!       necessary data preparation is done by the get_absorb() routine.
+subroutine get_absorb_adios(myrank, iregion, nimin, nimax, njmin, njmax, &
+  nkmin_xi, nkmin_eta, NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX)
+
+  use mpi
+  use adios_write_mod
+  use meshfem3D_par, only: LOCAL_PATH
+
+  ! Stacey, define flags for absorbing boundaries
+  implicit none
+
+  include "constants.h"
+
+  integer :: myrank
+  integer :: NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX
+
+  integer,dimension(2,NSPEC2DMAX_YMIN_YMAX) :: nimin,nimax
+  integer,dimension(2,NSPEC2DMAX_XMIN_XMAX) :: njmin,njmax
+  integer,dimension(2,NSPEC2DMAX_XMIN_XMAX) :: nkmin_xi
+  integer,dimension(2,NSPEC2DMAX_YMIN_YMAX) :: nkmin_eta
+
+  character(len=150) :: reg_name, outputname, group_name
+  integer :: sizeprocs, comm, local_dim, ierr, iregion
+  integer(kind=8) :: group_size_inc
+  ! ADIOS variables
+  integer                 :: adios_err
+  integer(kind=8)         :: adios_group, adios_handle, varid
+  integer(kind=8)         :: adios_groupsize, adios_totalsize
+
+  ! create a prefix for the file name such as LOCAL_PATH/regX_
+  call create_name_database_adios(reg_name,iregion,LOCAL_PATH)
+
+  ! Postpend the actual file name.
+  outputname = trim(reg_name) // "stacey.bp" 
+
+  ! save these temporary arrays for the solver for Stacey conditions
+  write(group_name,"('SPECFEM3D_GLOBE_STACEY_reg',i1)") iregion
+  call world_size(sizeprocs) ! TODO keep it in parameters
+  ! Alias COMM_WORLD to use ADIOS
+  call MPI_Comm_dup (MPI_COMM_WORLD, comm, ierr)
+  ! set the adios group size to 0 before incremented by calls to 
+  ! helpers functions.
+  group_size_inc = 0
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, adios_err)
+  ! We set the transport method to 'MPI'. This seems to be the correct choice
+  ! for now. We might want to move this to the constant.h file later on.
+  call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+  !--- Define ADIOS variables -----------------------------
+  local_dim = 2*NSPEC2DMAX_XMIN_XMAX 
+  call define_adios_global_integer_1d_array(adios_group, "njmin", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "njmax", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "nkmin_xi", &
+      local_dim, group_size_inc)
+  local_dim = 2*NSPEC2DMAX_YMIN_YMAX
+  call define_adios_global_integer_1d_array(adios_group, "nimin", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "nimax", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "nkmin_eta", &
+      local_dim, group_size_inc)
+
+  !--- Open an ADIOS handler to the restart file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, adios_err);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, adios_err)
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  local_dim = 2*NSPEC2DMAX_XMIN_XMAX 
+  call adios_set_path (adios_handle, "njmin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", njmin, adios_err)
+
+  call adios_set_path (adios_handle, "njmax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", njmax, adios_err)
+
+  call adios_set_path (adios_handle, "nkmin_xi", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", nkmin_xi, adios_err)
+
+  local_dim = 2*NSPEC2DMAX_YMIN_YMAX 
+  call adios_set_path (adios_handle, "nimin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", nimin, adios_err)
+
+  call adios_set_path (adios_handle, "nimax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", nimax, adios_err)
+
+  call adios_set_path (adios_handle, "nkmin_eta", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", nkmin_eta, adios_err)
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_set_path (adios_handle, "", adios_err)
+  call adios_close(adios_handle, adios_err)
+
+end subroutine get_absorb_adios
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/heap_sort.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/heap_sort.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/heap_sort.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,98 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine heap_sort( N, array )
+
+! heap sort algorithm
+! sorts integer array (in increasing order, like 1 - 5 - 6 - 9 - 12 - 13 - 14 -...)
+
+  implicit none
+  integer,intent(in) :: N
+  integer,dimension(N),intent(inout) :: array
+
+  ! local parameters
+  integer :: tmp
+  integer :: i
+
+  ! checks if anything to do
+  if( N < 2 ) return
+
+  ! builds heap
+  do i = N/2, 1, -1
+    call heap_sort_siftdown(N,array,i,N)
+  enddo
+
+  ! sorts array
+  do i = N, 2, -1
+    ! swaps last and first entry in this section
+    tmp = array(1)
+    array(1) = array(i)
+    array(i) = tmp
+    call heap_sort_siftdown(N,array,1,i-1)
+  enddo
+
+  end subroutine heap_sort
+
+!
+!----
+!
+
+  subroutine heap_sort_siftdown(N,array,start,bottom)
+
+  implicit none
+
+  integer,intent(in):: N
+  integer,dimension(N),intent(inout) :: array
+  integer :: start,bottom
+
+  ! local parameters
+  integer :: i,j
+  integer :: tmp
+
+  i = start
+  tmp = array(i)
+  j = 2*i
+  do while( j <= bottom )
+    ! chooses larger value first in this section
+    if( j < bottom ) then
+      if( array(j) <= array(j+1) ) j = j + 1
+    endif
+
+    ! checks if section already smaller than inital value
+    if( array(j) < tmp ) exit
+
+    array(i) = array(j)
+    i = j
+    j = 2*i
+  enddo
+
+  array(i) = tmp
+  return
+
+  end subroutine heap_sort_siftdown
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_layers.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_layers.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_layers.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,176 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine initialize_layers(myrank,ipass,xigll,yigll,zigll,wxgll,wygll,wzgll, &
+                        shape3D,dershape3D,shape2D_x,shape2D_y,shape2D_bottom,shape2D_top, &
+                        dershape2D_x,dershape2D_y,dershape2D_bottom,dershape2D_top, &
+                        iaddx,iaddy,iaddz,nspec,xstore,ystore,zstore,ibool,idoubling, &
+                        iboun,iMPIcut_xi,iMPIcut_eta,ispec2D_moho_top,ispec2D_moho_bot, &
+                        ispec2D_400_top,ispec2D_400_bot,ispec2D_670_top,ispec2D_670_bot, &
+                        NEX_PER_PROC_ETA,nex_eta_moho,RMOHO,R400,R670,r_moho,r_400,r_670, &
+                        ONE_CRUST,NUMBER_OF_MESH_LAYERS,layer_shift, &
+                        iregion_code,ifirst_region,ilast_region, &
+                        first_layer_aniso,last_layer_aniso,nb_layer_above_aniso,is_on_a_slice_edge)
+
+! create the different regions of the mesh
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: myrank,ipass
+
+  double precision xigll(NGLLX),yigll(NGLLY),zigll(NGLLZ)
+  double precision wxgll(NGLLX),wygll(NGLLY),wzgll(NGLLZ)
+
+  double precision shape3D(NGNOD,NGLLX,NGLLY,NGLLZ),dershape3D(NDIM,NGNOD,NGLLX,NGLLY,NGLLZ)
+
+  double precision shape2D_x(NGNOD2D,NGLLY,NGLLZ),shape2D_y(NGNOD2D,NGLLX,NGLLZ)
+  double precision shape2D_bottom(NGNOD2D,NGLLX,NGLLY),shape2D_top(NGNOD2D,NGLLX,NGLLY)
+  double precision dershape2D_x(NDIM2D,NGNOD2D,NGLLY,NGLLZ),dershape2D_y(NDIM2D,NGNOD2D,NGLLX,NGLLZ)
+  double precision dershape2D_bottom(NDIM2D,NGNOD2D,NGLLX,NGLLY),dershape2D_top(NDIM2D,NGNOD2D,NGLLX,NGLLY)
+
+  integer, dimension(NGNOD) :: iaddx,iaddy,iaddz
+
+  integer nspec
+  double precision xstore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision ystore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision zstore(NGLLX,NGLLY,NGLLZ,nspec)
+  integer ibool(NGLLX,NGLLY,NGLLZ,nspec)
+  integer idoubling(nspec)
+
+  logical iboun(6,nspec)
+  logical iMPIcut_xi(2,nspec),iMPIcut_eta(2,nspec)
+
+  integer ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,ispec2D_400_bot, &
+    ispec2D_670_top,ispec2D_670_bot
+  integer NEX_PER_PROC_ETA,nex_eta_moho
+  double precision RMOHO,R400,R670
+  double precision r_moho,r_400,r_670
+
+  logical ONE_CRUST
+  integer NUMBER_OF_MESH_LAYERS,layer_shift
+
+  ! code for the four regions of the mesh
+  integer iregion_code,ifirst_region,ilast_region
+  integer first_layer_aniso,last_layer_aniso,nb_layer_above_aniso
+
+! this for non blocking MPI
+  logical, dimension(nspec) :: is_on_a_slice_edge
+
+! set up coordinates of the Gauss-Lobatto-Legendre points
+  call zwgljd(xigll,wxgll,NGLLX,GAUSSALPHA,GAUSSBETA)
+  call zwgljd(yigll,wygll,NGLLY,GAUSSALPHA,GAUSSBETA)
+  call zwgljd(zigll,wzgll,NGLLZ,GAUSSALPHA,GAUSSBETA)
+
+! if number of points is odd, the middle abscissa is exactly zero
+  if(mod(NGLLX,2) /= 0) xigll((NGLLX-1)/2+1) = ZERO
+  if(mod(NGLLY,2) /= 0) yigll((NGLLY-1)/2+1) = ZERO
+  if(mod(NGLLZ,2) /= 0) zigll((NGLLZ-1)/2+1) = ZERO
+
+! get the 3-D shape functions
+  call get_shape3D(myrank,shape3D,dershape3D,xigll,yigll,zigll)
+
+! get the 2-D shape functions
+  call get_shape2D(myrank,shape2D_x,dershape2D_x,yigll,zigll,NGLLY,NGLLZ)
+  call get_shape2D(myrank,shape2D_y,dershape2D_y,xigll,zigll,NGLLX,NGLLZ)
+  call get_shape2D(myrank,shape2D_bottom,dershape2D_bottom,xigll,yigll,NGLLX,NGLLY)
+  call get_shape2D(myrank,shape2D_top,dershape2D_top,xigll,yigll,NGLLX,NGLLY)
+
+! create the shape of the corner nodes of a regular mesh element
+  call hex_nodes(iaddx,iaddy,iaddz)
+
+! reference element has size one here, not two
+  iaddx(:) = iaddx(:) / 2
+  iaddy(:) = iaddy(:) / 2
+  iaddz(:) = iaddz(:) / 2
+
+! sets number of layers
+  if (ONE_CRUST) then
+    NUMBER_OF_MESH_LAYERS = MAX_NUMBER_OF_MESH_LAYERS - 1
+    layer_shift = 0
+  else
+    NUMBER_OF_MESH_LAYERS = MAX_NUMBER_OF_MESH_LAYERS
+    layer_shift = 1
+  endif
+
+  if (.not. ADD_4TH_DOUBLING) NUMBER_OF_MESH_LAYERS = NUMBER_OF_MESH_LAYERS - 1
+
+! define the first and last layers that define this region
+  if(iregion_code == IREGION_CRUST_MANTLE) then
+    ifirst_region = 1
+    ilast_region = 10 + layer_shift
+
+  else if(iregion_code == IREGION_OUTER_CORE) then
+    ifirst_region = 11 + layer_shift
+    ilast_region = NUMBER_OF_MESH_LAYERS - 1
+
+  else if(iregion_code == IREGION_INNER_CORE) then
+    ifirst_region = NUMBER_OF_MESH_LAYERS
+    ilast_region = NUMBER_OF_MESH_LAYERS
+
+  else
+    call exit_MPI(myrank,'incorrect region code detected')
+  endif
+
+! to consider anisotropic elements first and to build the mesh from the bottom to the top of the region
+  if (ONE_CRUST) then
+    first_layer_aniso=2
+    last_layer_aniso=3
+    nb_layer_above_aniso = 1
+  else
+    first_layer_aniso=3
+    last_layer_aniso=4
+    nb_layer_above_aniso = 2
+  endif
+
+! initialize mesh arrays
+  idoubling(:) = 0
+
+  xstore(:,:,:,:) = 0.d0
+  ystore(:,:,:,:) = 0.d0
+  zstore(:,:,:,:) = 0.d0
+
+  if(ipass == 1) ibool(:,:,:,:) = 0
+
+  ! initialize boundary arrays
+  iboun(:,:) = .false.
+  iMPIcut_xi(:,:) = .false.
+  iMPIcut_eta(:,:) = .false.
+  is_on_a_slice_edge(:) = .false.
+
+  ! boundary mesh
+  ispec2D_moho_top = 0; ispec2D_moho_bot = 0
+  ispec2D_400_top = 0; ispec2D_400_bot = 0
+  ispec2D_670_top = 0; ispec2D_670_bot = 0
+
+  nex_eta_moho = NEX_PER_PROC_ETA
+
+  r_moho = RMOHO/R_EARTH; r_400 = R400 / R_EARTH; r_670 = R670/R_EARTH
+
+  end subroutine initialize_layers

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_mesher.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_mesher.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/initialize_mesher.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,163 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+  subroutine initialize_mesher()
+
+  use meshfem3D_par
+  use meshfem3D_models_par
+
+  implicit none
+
+  ! local parameters
+  integer, external :: err_occurred
+  ! timing
+  double precision, external :: wtime
+
+! sizeprocs returns number of processes started (should be equal to NPROCTOT).
+! myrank is the rank of each process, between 0 and NPROCTOT-1.
+! as usual in MPI, process 0 is in charge of coordinating everything
+! and also takes care of the main output
+! do not create anything for the inner core here, will be done in solver
+  call world_size(sizeprocs)
+  call world_rank(myrank)
+
+! get the base pathname for output files
+  call get_value_string(OUTPUT_FILES, 'OUTPUT_FILES', 'OUTPUT_FILES')
+
+! open main output file, only written to by process 0
+  if(myrank == 0 .and. IMAIN /= ISTANDARD_OUTPUT) &
+    open(unit=IMAIN,file=trim(OUTPUT_FILES)//'/output_mesher.txt',status='unknown')
+
+! get MPI starting time
+  time_start = wtime()
+
+  if(myrank == 0) then
+    write(IMAIN,*)
+    write(IMAIN,*) '****************************'
+    write(IMAIN,*) '*** Specfem3D MPI Mesher ***'
+    write(IMAIN,*) '****************************'
+    write(IMAIN,*)
+  endif
+
+  if (myrank==0) then
+    ! reads the parameter file and computes additional parameters
+    call read_compute_parameters(MIN_ATTENUATION_PERIOD,MAX_ATTENUATION_PERIOD,NER_CRUST, &
+          NER_80_MOHO,NER_220_80,NER_400_220,NER_600_400,NER_670_600,NER_771_670, &
+          NER_TOPDDOUBLEPRIME_771,NER_CMB_TOPDDOUBLEPRIME,NER_OUTER_CORE, &
+          NER_TOP_CENTRAL_CUBE_ICB,NEX_XI,NEX_ETA,RMOHO_FICTITIOUS_IN_MESHER, &
+          NPROC_XI,NPROC_ETA,NTSTEP_BETWEEN_OUTPUT_SEISMOS, &
+          NTSTEP_BETWEEN_READ_ADJSRC,NSTEP,NTSTEP_BETWEEN_FRAMES, &
+          NTSTEP_BETWEEN_OUTPUT_INFO,NUMBER_OF_RUNS,NUMBER_OF_THIS_RUN,NCHUNKS,DT, &
+          ANGULAR_WIDTH_XI_IN_DEGREES,ANGULAR_WIDTH_ETA_IN_DEGREES,CENTER_LONGITUDE_IN_DEGREES, &
+          CENTER_LATITUDE_IN_DEGREES,GAMMA_ROTATION_AZIMUTH,ROCEAN,RMIDDLE_CRUST, &
+          RMOHO,R80,R120,R220,R400,R600,R670,R771,RTOPDDOUBLEPRIME,RCMB,RICB, &
+          R_CENTRAL_CUBE,RHO_TOP_OC,RHO_BOTTOM_OC,RHO_OCEANS,HDUR_MOVIE,MOVIE_VOLUME_TYPE, &
+          MOVIE_TOP,MOVIE_BOTTOM,MOVIE_WEST,MOVIE_EAST,MOVIE_NORTH,MOVIE_SOUTH,MOVIE_START,MOVIE_STOP, &
+          TRANSVERSE_ISOTROPY,ANISOTROPIC_3D_MANTLE, &
+          ANISOTROPIC_INNER_CORE,CRUSTAL,ELLIPTICITY,GRAVITY,ONE_CRUST, &
+          ROTATION,ISOTROPIC_3D_MANTLE,HETEROGEN_3D_MANTLE,TOPOGRAPHY,OCEANS,MOVIE_SURFACE, &
+          MOVIE_VOLUME,MOVIE_COARSE,ATTENUATION_3D,RECEIVERS_CAN_BE_BURIED, &
+          PRINT_SOURCE_TIME_FUNCTION,SAVE_MESH_FILES, &
+          ATTENUATION,ATTENUATION_NEW,REFERENCE_1D_MODEL,THREE_D_MODEL,ABSORBING_CONDITIONS, &
+          INCLUDE_CENTRAL_CUBE,INFLATE_CENTRAL_CUBE, &
+          LOCAL_PATH,LOCAL_TMP_PATH,MODEL, &
+          SIMULATION_TYPE,SAVE_FORWARD, &
+          NPROC,NPROCTOT,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+          NSPEC,NSPEC2D_XI,NSPEC2D_ETA,NSPEC2DMAX_XMIN_XMAX, &
+          NSPEC2DMAX_YMIN_YMAX,NSPEC2D_BOTTOM,NSPEC2D_TOP, &
+          NSPEC1D_RADIAL,NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB, &
+          ratio_sampling_array, ner, doubling_index,r_bottom,r_top,&
+          this_region_has_a_doubling,rmins,rmaxs,CASE_3D, &
+          OUTPUT_SEISMOS_ASCII_TEXT,OUTPUT_SEISMOS_SAC_ALPHANUM,OUTPUT_SEISMOS_SAC_BINARY, &
+          ROTATE_SEISMOGRAMS_RT,ratio_divide_central_cube, &
+          HONOR_1D_SPHERICAL_MOHO,CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA,&
+          DIFF_NSPEC1D_RADIAL,DIFF_NSPEC2D_XI,DIFF_NSPEC2D_ETA,&
+          WRITE_SEISMOGRAMS_BY_MASTER,SAVE_ALL_SEISMOS_IN_ONE_FILE, &
+          USE_BINARY_FOR_LARGE_FILE,.false.,NOISE_TOMOGRAPHY)
+
+    if(err_occurred() /= 0) &
+      call exit_MPI(myrank,'an error occurred while reading the parameter file')
+
+    ! ADIOS_ENABLED: parameter is optional, may not be in the Par_file
+    call read_adios_parameters(ADIOS_ENABLED, ADIOS_FOR_FORWARD_ARRAYS, &
+        ADIOS_FOR_MPI_ARRAYS, ADIOS_FOR_ARRAYS_SOLVER, &
+        ADIOS_FOR_SOLVER_MESHFILES, ADIOS_FOR_AVS_DX)
+
+  endif
+
+  ! distributes parameters from master to all processes
+  call broadcast_compute_parameters(myrank,MIN_ATTENUATION_PERIOD,MAX_ATTENUATION_PERIOD,NER_CRUST, &
+                NER_80_MOHO,NER_220_80,NER_400_220,NER_600_400,NER_670_600,NER_771_670, &
+                NER_TOPDDOUBLEPRIME_771,NER_CMB_TOPDDOUBLEPRIME,NER_OUTER_CORE, &
+                NER_TOP_CENTRAL_CUBE_ICB,NEX_XI,NEX_ETA, &
+                NPROC_XI,NPROC_ETA,NTSTEP_BETWEEN_OUTPUT_SEISMOS, &
+                NTSTEP_BETWEEN_READ_ADJSRC,NSTEP,NSOURCES,NTSTEP_BETWEEN_FRAMES, &
+                NTSTEP_BETWEEN_OUTPUT_INFO,NUMBER_OF_RUNS,NUMBER_OF_THIS_RUN,NCHUNKS,SIMULATION_TYPE, &
+                MOVIE_VOLUME_TYPE,MOVIE_START,MOVIE_STOP, &
+                DT,ANGULAR_WIDTH_XI_IN_DEGREES,ANGULAR_WIDTH_ETA_IN_DEGREES,CENTER_LONGITUDE_IN_DEGREES, &
+                CENTER_LATITUDE_IN_DEGREES,GAMMA_ROTATION_AZIMUTH,ROCEAN,RMIDDLE_CRUST, &
+                RMOHO,R80,R120,R220,R400,R600,R670,R771,RTOPDDOUBLEPRIME,RCMB,RICB, &
+                R_CENTRAL_CUBE,RHO_TOP_OC,RHO_BOTTOM_OC,RHO_OCEANS,HDUR_MOVIE, &
+                MOVIE_TOP,MOVIE_BOTTOM,MOVIE_WEST,MOVIE_EAST,MOVIE_NORTH,MOVIE_SOUTH, &
+                RMOHO_FICTITIOUS_IN_MESHER, &
+                MOVIE_SURFACE,MOVIE_VOLUME,RECEIVERS_CAN_BE_BURIED,PRINT_SOURCE_TIME_FUNCTION, &
+                SAVE_MESH_FILES,ABSORBING_CONDITIONS,INCLUDE_CENTRAL_CUBE,INFLATE_CENTRAL_CUBE,SAVE_FORWARD, &
+                SAVE_ALL_SEISMOS_IN_ONE_FILE,MOVIE_COARSE,OUTPUT_SEISMOS_ASCII_TEXT, &
+                OUTPUT_SEISMOS_SAC_ALPHANUM,OUTPUT_SEISMOS_SAC_BINARY, &
+                ROTATE_SEISMOGRAMS_RT,WRITE_SEISMOGRAMS_BY_MASTER,USE_BINARY_FOR_LARGE_FILE, &
+                LOCAL_PATH,LOCAL_TMP_PATH,MODEL, &
+                NPROC,NPROCTOT,NEX_PER_PROC_XI,NEX_PER_PROC_ETA, &
+                NSPEC,NSPEC2D_XI,NSPEC2D_ETA, &
+                NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX,NSPEC2D_BOTTOM,NSPEC2D_TOP, &
+                NSPEC1D_RADIAL,NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB, &
+                ratio_sampling_array, ner, doubling_index,r_bottom,r_top, &
+                this_region_has_a_doubling,rmins,rmaxs, &
+                ratio_divide_central_cube,CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA, &
+                DIFF_NSPEC1D_RADIAL,DIFF_NSPEC2D_XI,DIFF_NSPEC2D_ETA, &
+                REFERENCE_1D_MODEL,THREE_D_MODEL,ELLIPTICITY,GRAVITY,ROTATION,TOPOGRAPHY,OCEANS, &
+                HONOR_1D_SPHERICAL_MOHO,CRUSTAL,ONE_CRUST,CASE_3D,TRANSVERSE_ISOTROPY, &
+                ISOTROPIC_3D_MANTLE,ANISOTROPIC_3D_MANTLE,HETEROGEN_3D_MANTLE, &
+                ATTENUATION,ATTENUATION_NEW,ATTENUATION_3D,ANISOTROPIC_INNER_CORE,NOISE_TOMOGRAPHY)
+  ! broadcasts optional ADIOS_ENABLED 
+  call broadcast_adios_parameters(myrank,ADIOS_ENABLED, &
+      ADIOS_FOR_FORWARD_ARRAYS, ADIOS_FOR_MPI_ARRAYS, ADIOS_FOR_ARRAYS_SOLVER, &
+      ADIOS_FOR_SOLVER_MESHFILES, ADIOS_FOR_AVS_DX)
+
+  ! check that the code is running with the requested number of processes
+  if(sizeprocs /= NPROCTOT) call exit_MPI(myrank,'wrong number of MPI processes')
+
+  ! compute rotation matrix from Euler angles
+  ANGULAR_WIDTH_XI_RAD = ANGULAR_WIDTH_XI_IN_DEGREES * DEGREES_TO_RADIANS
+  ANGULAR_WIDTH_ETA_RAD = ANGULAR_WIDTH_ETA_IN_DEGREES * DEGREES_TO_RADIANS
+
+  if(NCHUNKS /= 6) call euler_angles(rotation_matrix,CENTER_LONGITUDE_IN_DEGREES,CENTER_LATITUDE_IN_DEGREES,GAMMA_ROTATION_AZIMUTH)
+
+  if (ADIOS_ENABLED) then
+    call adios_setup()
+  endif
+
+  end subroutine initialize_mesher

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/meshfem3D_par.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/meshfem3D_par.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/meshfem3D_par.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,667 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+!
+! United States and French Government Sponsorship Acknowledged.
+
+  module constants
+
+  include "constants.h"
+
+  end module constants
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module meshfem3D_models_par
+
+!---
+!
+! ADD YOUR MODEL HERE
+!
+!---
+
+  use constants
+
+  implicit none
+
+! model_attenuation_variables
+  type model_attenuation_variables
+    sequence
+    double precision min_period, max_period
+    double precision                          :: QT_c_source        ! Source Frequency
+    double precision, dimension(:), pointer   :: Qtau_s             ! tau_sigma
+    double precision, dimension(:), pointer   :: QrDisc             ! Discontinutitues Defined
+    double precision, dimension(:), pointer   :: Qr                 ! Radius
+    double precision, dimension(:), pointer   :: Qmu                ! Shear Attenuation
+    double precision, dimension(:,:), pointer :: Qtau_e             ! tau_epsilon
+    double precision, dimension(:), pointer   :: Qomsb, Qomsb2      ! one_minus_sum_beta
+    double precision, dimension(:,:), pointer :: Qfc, Qfc2          ! factor_common
+    double precision, dimension(:), pointer   :: Qsf, Qsf2          ! scale_factor
+    integer, dimension(:), pointer            :: Qrmin              ! Max and Mins of idoubling
+    integer, dimension(:), pointer            :: Qrmax              ! Max and Mins of idoubling
+    integer, dimension(:), pointer            :: interval_Q                 ! Steps
+    integer                                   :: Qn                 ! Number of points
+    integer dummy_pad ! padding 4 bytes to align the structure
+  end type model_attenuation_variables
+  type (model_attenuation_variables) AM_V
+! model_attenuation_variables
+
+! model_attenuation_storage_var
+  type model_attenuation_storage_var
+    sequence
+    double precision, dimension(:,:), pointer :: tau_e_storage
+    double precision, dimension(:), pointer :: Qmu_storage
+    integer Q_resolution
+    integer Q_max
+  end type model_attenuation_storage_var
+  type (model_attenuation_storage_var) AM_S
+! model_attenuation_storage_var
+
+! attenuation_simplex_variables
+  type attenuation_simplex_variables
+    sequence
+    double precision Q  ! Q     = Desired Value of Attenuation or Q
+    double precision iQ ! iQ    = 1/Q
+    double precision, dimension(:), pointer ::  f
+    ! f = Frequencies at which to evaluate the solution
+    double precision, dimension(:), pointer :: tau_s
+    ! tau_s = Tau_sigma defined by the frequency range and
+    !             number of standard linear solids
+    integer nf          ! nf    = Number of Frequencies
+    integer nsls        ! nsls  = Number of Standard Linear Solids
+  end type attenuation_simplex_variables
+  type(attenuation_simplex_variables) AS_V
+! attenuation_simplex_variables
+
+! GLL model_variables
+  type model_gll_variables
+    sequence
+    ! tomographic iteration model on GLL points
+    double precision :: scale_velocity,scale_density
+    ! isotropic model
+    real(kind=CUSTOM_REAL),dimension(:,:,:,:),pointer :: vs_new,vp_new,rho_new
+    ! transverse isotropic model
+    real(kind=CUSTOM_REAL),dimension(:,:,:,:),pointer :: vsv_new,vpv_new, &
+      vsh_new,vph_new,eta_new
+    logical :: MODEL_GLL
+    logical,dimension(3) :: dummy_pad ! padding 3 bytes to align the structure
+  end type model_gll_variables
+  type (model_gll_variables) MGLL_V
+
+! bathymetry and topography: use integer array to store values
+  integer, dimension(NX_BATHY,NY_BATHY) :: ibathy_topo
+
+! for ellipticity
+  double precision,dimension(NR) :: rspl,espl,espl2
+  integer :: nspl
+
+! model parameter and flags
+  integer :: REFERENCE_1D_MODEL,THREE_D_MODEL
+
+  logical :: ELLIPTICITY,GRAVITY,ROTATION,TOPOGRAPHY,OCEANS
+
+  logical :: HONOR_1D_SPHERICAL_MOHO,CRUSTAL,ONE_CRUST,CASE_3D,TRANSVERSE_ISOTROPY
+
+  logical :: ISOTROPIC_3D_MANTLE,ANISOTROPIC_3D_MANTLE,HETEROGEN_3D_MANTLE
+
+  logical :: ATTENUATION,ATTENUATION_NEW,ATTENUATION_3D
+
+  logical :: ANISOTROPIC_INNER_CORE
+
+! to create a reference model based on 1D_REF but with 3D crust and 410/660 topography
+  logical,parameter :: USE_1D_REFERENCE = .false.
+
+  end module meshfem3D_models_par
+
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+
+  module meshfem3D_par
+
+! main parameter module for specfem simulations
+
+  use constants
+
+  implicit none
+
+  ! correct number of spectral elements in each block depending on chunk type
+  integer :: npointot
+
+  ! proc numbers for MPI
+  integer :: myrank,sizeprocs
+
+  ! check area and volume of the final mesh
+  double precision :: volume_total
+
+  ! for loop on all the slices
+  integer :: iregion_code
+  integer :: iproc_xi,iproc_eta,ichunk
+
+  ! rotation matrix from Euler angles
+  double precision, dimension(NDIM,NDIM) :: rotation_matrix
+  double precision :: ANGULAR_WIDTH_XI_RAD,ANGULAR_WIDTH_ETA_RAD
+
+  ! for some statistics for the mesh
+  integer :: numelem_crust_mantle,numelem_outer_core,numelem_inner_core
+  integer :: numelem_total
+
+  ! timer MPI
+  double precision :: time_start,tCPU
+
+  ! addressing for all the slices
+  integer, dimension(:), allocatable :: ichunk_slice,iproc_xi_slice,iproc_eta_slice
+  integer, dimension(:,:,:), allocatable :: addressing
+
+  ! parameters read from parameter file
+  integer :: MIN_ATTENUATION_PERIOD,MAX_ATTENUATION_PERIOD,NER_CRUST, &
+          NER_80_MOHO,NER_220_80,NER_400_220,NER_600_400,NER_670_600,NER_771_670, &
+          NER_TOPDDOUBLEPRIME_771,NER_CMB_TOPDDOUBLEPRIME,NER_OUTER_CORE, &
+          NER_TOP_CENTRAL_CUBE_ICB,NEX_XI,NEX_ETA, &
+          NPROC_XI,NPROC_ETA,NTSTEP_BETWEEN_OUTPUT_SEISMOS, &
+          NTSTEP_BETWEEN_READ_ADJSRC,NSTEP,NSOURCES,NTSTEP_BETWEEN_FRAMES, &
+          NTSTEP_BETWEEN_OUTPUT_INFO,NUMBER_OF_RUNS,NUMBER_OF_THIS_RUN, &
+          NCHUNKS,SIMULATION_TYPE, &
+          MOVIE_VOLUME_TYPE,MOVIE_START,MOVIE_STOP,NOISE_TOMOGRAPHY
+
+  double precision :: DT,ANGULAR_WIDTH_XI_IN_DEGREES,ANGULAR_WIDTH_ETA_IN_DEGREES,CENTER_LONGITUDE_IN_DEGREES, &
+          CENTER_LATITUDE_IN_DEGREES,GAMMA_ROTATION_AZIMUTH,ROCEAN,RMIDDLE_CRUST, &
+          RMOHO,R80,R120,R220,R400,R600,R670,R771,RTOPDDOUBLEPRIME,RCMB,RICB, &
+          R_CENTRAL_CUBE,RHO_TOP_OC,RHO_BOTTOM_OC,RHO_OCEANS,HDUR_MOVIE, &
+          MOVIE_TOP,MOVIE_BOTTOM,MOVIE_WEST,MOVIE_EAST,MOVIE_NORTH,MOVIE_SOUTH, &
+          RMOHO_FICTITIOUS_IN_MESHER
+
+  logical :: MOVIE_SURFACE,MOVIE_VOLUME,MOVIE_COARSE, &
+          RECEIVERS_CAN_BE_BURIED,PRINT_SOURCE_TIME_FUNCTION, &
+          SAVE_MESH_FILES,ABSORBING_CONDITIONS,INCLUDE_CENTRAL_CUBE,INFLATE_CENTRAL_CUBE,SAVE_FORWARD, &
+          OUTPUT_SEISMOS_ASCII_TEXT,OUTPUT_SEISMOS_SAC_ALPHANUM,OUTPUT_SEISMOS_SAC_BINARY, &
+          ROTATE_SEISMOGRAMS_RT,WRITE_SEISMOGRAMS_BY_MASTER,&
+          SAVE_ALL_SEISMOS_IN_ONE_FILE,USE_BINARY_FOR_LARGE_FILE
+
+  character(len=150) :: OUTPUT_FILES
+  character(len=150) :: LOCAL_PATH,LOCAL_TMP_PATH,MODEL
+
+  ! parameters deduced from parameters read from file
+  integer :: NPROC,NPROCTOT,NEX_PER_PROC_XI,NEX_PER_PROC_ETA,ratio_divide_central_cube
+
+  ! this for all the regions
+  integer, dimension(MAX_NUM_REGIONS) :: NSPEC,NSPEC2D_XI,NSPEC2D_ETA, &
+               NSPEC2DMAX_XMIN_XMAX,NSPEC2DMAX_YMIN_YMAX, &
+               NSPEC2D_BOTTOM,NSPEC2D_TOP,NSPEC1D_RADIAL,NGLOB1D_RADIAL, &
+               NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX, &
+               NGLOB
+
+  ! computed in read_compute_parameters
+  integer, dimension(MAX_NUMBER_OF_MESH_LAYERS) :: ner,ratio_sampling_array
+  integer, dimension(MAX_NUMBER_OF_MESH_LAYERS) :: doubling_index
+  double precision, dimension(MAX_NUMBER_OF_MESH_LAYERS) :: r_bottom,r_top
+  logical, dimension(MAX_NUMBER_OF_MESH_LAYERS) :: this_region_has_a_doubling
+  double precision, dimension(MAX_NUMBER_OF_MESH_LAYERS) :: rmins,rmaxs
+
+  ! memory size of all the static arrays
+  double precision :: static_memory_size
+
+  integer :: NSPECMAX_ANISO_IC,NSPECMAX_ISO_MANTLE,NSPECMAX_TISO_MANTLE, &
+         NSPECMAX_ANISO_MANTLE,NSPEC_CRUST_MANTLE_ATTENUAT, &
+         NSPEC_INNER_CORE_ATTENUATION, &
+         NSPEC_CRUST_MANTLE_STR_OR_ATT,NSPEC_INNER_CORE_STR_OR_ATT, &
+         NSPEC_CRUST_MANTLE_STR_AND_ATT,NSPEC_INNER_CORE_STR_AND_ATT, &
+         NSPEC_CRUST_MANTLE_STRAIN_ONLY,NSPEC_INNER_CORE_STRAIN_ONLY, &
+         NSPEC_CRUST_MANTLE_ADJOINT, &
+         NSPEC_OUTER_CORE_ADJOINT,NSPEC_INNER_CORE_ADJOINT, &
+         NGLOB_CRUST_MANTLE_ADJOINT,NGLOB_OUTER_CORE_ADJOINT, &
+         NGLOB_INNER_CORE_ADJOINT,NSPEC_OUTER_CORE_ROT_ADJOINT, &
+         NSPEC_CRUST_MANTLE_STACEY,NSPEC_OUTER_CORE_STACEY, &
+         NGLOB_CRUST_MANTLE_OCEANS,NSPEC_OUTER_CORE_ROTATION
+
+  ! this for the different corners of the slice (which are different if the superbrick is cut)
+  ! 1 : xi_min, eta_min
+  ! 2 : xi_max, eta_min
+  ! 3 : xi_max, eta_max
+  ! 4 : xi_min, eta_max
+  integer, dimension(MAX_NUM_REGIONS,NB_SQUARE_CORNERS) :: NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER
+
+  ! 1 -> min, 2 -> max
+  integer, dimension(MAX_NUM_REGIONS,NB_SQUARE_EDGES_ONEDIR) :: NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE
+
+  integer, dimension(NB_SQUARE_CORNERS,NB_CUT_CASE) :: DIFF_NSPEC1D_RADIAL
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR,NB_CUT_CASE) :: DIFF_NSPEC2D_XI,DIFF_NSPEC2D_ETA
+  logical :: CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA
+
+  ! arrays with the mesh in double precision
+  double precision, dimension(:,:,:,:), allocatable :: xstore,ystore,zstore
+  ! parameters needed to store the radii of the grid points
+  ! in the spherically symmetric Earth
+  integer, dimension(:), allocatable :: idoubling
+  integer, dimension(:,:,:,:), allocatable :: ibool
+
+  ! this for non blocking MPI
+  logical, dimension(:), allocatable :: is_on_a_slice_edge
+  !-----------------------------------------------------------------
+  ! ADIOS
+  !-----------------------------------------------------------------
+
+  logical :: ADIOS_ENABLED, ADIOS_FOR_FORWARD_ARRAYS, ADIOS_FOR_MPI_ARRAYS, &
+      ADIOS_FOR_ARRAYS_SOLVER, ADIOS_FOR_SOLVER_MESHFILES, &
+      ADIOS_FOR_AVS_DX
+
+  end module meshfem3D_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module create_regions_mesh_par
+
+  use constants,only: NGLLX,NGLLY,NGLLZ,NGNOD,NGNOD2D,NDIM,NDIM2D
+
+  implicit none
+
+  ! topology of the elements
+  integer, dimension(NGNOD) :: iaddx,iaddy,iaddz
+
+  ! Gauss-Lobatto-Legendre points and weights of integration
+  double precision, dimension(NGLLX) :: xigll,wxgll
+  double precision, dimension(NGLLY) :: yigll,wygll
+  double precision, dimension(NGLLZ) :: zigll,wzgll
+
+  ! 3D shape functions and their derivatives
+  double precision, dimension(NGNOD,NGLLX,NGLLY,NGLLZ) :: shape3D
+  double precision, dimension(NDIM,NGNOD,NGLLX,NGLLY,NGLLZ) :: dershape3D
+
+  ! 2D shape functions and their derivatives
+  double precision, dimension(NGNOD2D,NGLLY,NGLLZ) :: shape2D_x
+  double precision, dimension(NGNOD2D,NGLLX,NGLLZ) :: shape2D_y
+  double precision, dimension(NGNOD2D,NGLLX,NGLLY) :: shape2D_bottom,shape2D_top
+  double precision, dimension(NDIM2D,NGNOD2D,NGLLY,NGLLZ) :: dershape2D_x
+  double precision, dimension(NDIM2D,NGNOD2D,NGLLX,NGLLZ) :: dershape2D_y
+  double precision, dimension(NDIM2D,NGNOD2D,NGLLX,NGLLY) :: dershape2D_bottom,dershape2D_top
+
+  end module create_regions_mesh_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module create_regions_mesh_par2
+
+  use constants,only: CUSTOM_REAL,N_SLS
+
+  implicit none
+
+  integer :: nspec_stacey,nspec_actually,nspec_att
+
+  integer :: ifirst_region,ilast_region
+  integer, dimension(:), allocatable :: perm_layer
+
+  ! for model density and anisotropy
+  integer :: nspec_ani
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: rhostore,dvpstore, &
+    kappavstore,kappahstore,muvstore,muhstore,eta_anisostore
+
+  ! the 21 coefficients for an anisotropic medium in reduced notation
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: &
+    c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+    c36store,c44store,c45store,c46store,c55store,c56store,c66store
+
+  ! boundary locator
+  logical, dimension(:,:), allocatable :: iboun
+
+  ! arrays with mesh parameters
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: xixstore,xiystore,xizstore, &
+    etaxstore,etaystore,etazstore,gammaxstore,gammaystore,gammazstore
+
+  ! mass matrices
+  real(kind=CUSTOM_REAL), dimension(:), allocatable :: rmassx,rmassy,rmassz
+  integer :: nglob_xy
+
+  ! mass matrix and bathymetry for ocean load
+  integer :: nglob_oceans
+  real(kind=CUSTOM_REAL), dimension(:), allocatable :: rmass_ocean_load
+
+  ! number of elements on the boundaries
+  integer :: nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax
+
+  ! boundary parameters locator
+  integer, dimension(:), allocatable :: ibelm_xmin,ibelm_xmax, &
+    ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top
+
+  ! 2-D jacobians and normals
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: &
+    jacobian2D_xmin,jacobian2D_xmax, &
+    jacobian2D_ymin,jacobian2D_ymax,jacobian2D_bottom,jacobian2D_top
+
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: &
+    normal_xmin,normal_xmax,normal_ymin,normal_ymax,normal_bottom,normal_top
+
+  ! MPI cut-planes parameters along xi and along eta
+  logical, dimension(:,:), allocatable :: iMPIcut_xi,iMPIcut_eta
+
+  ! Stacey, indices for Clayton-Engquist absorbing conditions
+  integer, dimension(:,:), allocatable :: nimin,nimax,njmin,njmax,nkmin_xi,nkmin_eta
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: rho_vp,rho_vs
+
+  ! attenuation
+  double precision, dimension(:,:,:,:),   allocatable :: Qmu_store
+  double precision, dimension(:,:,:,:,:), allocatable :: tau_e_store
+  double precision, dimension(N_SLS) :: tau_s
+  double precision :: T_c_source
+
+  ! element layers
+  integer :: NUMBER_OF_MESH_LAYERS,layer_shift,cpt, &
+    first_layer_aniso,last_layer_aniso,FIRST_ELT_NON_ANISO
+  logical :: USE_ONE_LAYER_SB
+
+  ! layer stretching
+  double precision, dimension(:,:), allocatable :: stretch_tab
+  integer :: nb_layer_above_aniso,FIRST_ELT_ABOVE_ANISO
+
+  ! Boundary Mesh
+  integer :: NSPEC2D_MOHO,NSPEC2D_400,NSPEC2D_670,nex_eta_moho
+  integer, dimension(:), allocatable :: ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot, &
+    ibelm_670_top,ibelm_670_bot
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: normal_moho,normal_400,normal_670
+  real(kind=CUSTOM_REAL), dimension(:,:,:), allocatable :: jacobian2D_moho,jacobian2D_400,jacobian2D_670
+
+  integer :: ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,ispec2D_400_bot, &
+    ispec2D_670_top,ispec2D_670_bot
+  double precision :: r_moho,r_400,r_670
+
+  ! flags for transverse isotropic elements
+  logical, dimension(:), allocatable :: ispec_is_tiso
+
+  ! name of the database file
+  character(len=150) :: prname, prname_adios
+
+  end module create_regions_mesh_par2
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module create_MPI_interfaces_par
+
+  use constants,only: &
+    CUSTOM_REAL,NDIM,IMAIN, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE, &
+    NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+
+  implicit none
+
+  ! indirect addressing for each message for faces and corners of the chunks
+  ! a given slice can belong to at most one corner and at most two faces
+
+  ! number of faces between chunks
+  integer :: NUMMSGS_FACES
+
+  ! number of corners between chunks
+  integer :: NCORNERSCHUNKS
+
+  ! number of message types
+  integer :: NUM_MSG_TYPES
+
+  !-----------------------------------------------------------------
+  ! assembly
+  !-----------------------------------------------------------------
+
+  ! ---- arrays to assemble between chunks
+  ! communication pattern for faces between chunks
+  integer, dimension(:),allocatable :: iprocfrom_faces,iprocto_faces,imsg_type
+  ! communication pattern for corners between chunks
+  integer, dimension(:),allocatable :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner
+
+  ! chunk faces
+  integer, dimension(:,:),allocatable :: iboolfaces
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces
+  integer :: NGLOB2DMAX_XY
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi,iboolright_xi
+  integer, dimension(:),allocatable :: iboolleft_eta,iboolright_eta
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_all,npoin2D_eta_all
+  integer :: npoin2D_xi,npoin2D_eta
+
+  ! 1-D addressing
+  integer :: NGLOB1D_RADIAL_MAX
+  integer,dimension(:),allocatable :: ibool1D_leftxi_lefteta,ibool1D_rightxi_lefteta, &
+                                    ibool1D_leftxi_righteta,ibool1D_rightxi_righteta
+
+  double precision,dimension(:,:),allocatable :: xyz1D_leftxi_lefteta,xyz1D_rightxi_lefteta, &
+                                    xyz1D_leftxi_righteta,xyz1D_rightxi_righteta
+
+  ! this for non blocking MPI
+
+  ! buffers for send and receive between faces of the slices and the chunks
+  ! we use the same buffers to assemble scalars and vectors because vectors are
+  ! always three times bigger and therefore scalars can use the first part
+  ! of the vector buffer in memory even if it has an additional index here
+  integer :: npoin2D_max_all_CM_IC
+
+  ! buffers for send and receive between corners of the chunks
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar
+
+  ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
+  real(kind=CUSTOM_REAL), dimension(:,:),allocatable :: &
+     buffer_send_chunkcorn_vector,buffer_recv_chunkcorn_vector
+
+  end module create_MPI_interfaces_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module MPI_crust_mantle_par
+
+  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+
+  implicit none
+
+  ! collected MPI interfaces
+  !--------------------------------------
+  ! MPI crust/mantle mesh
+  !--------------------------------------
+  integer :: num_interfaces_crust_mantle
+  integer :: max_nibool_interfaces_cm
+  integer, dimension(:), allocatable :: my_neighbours_crust_mantle,nibool_interfaces_crust_mantle
+  integer, dimension(:,:), allocatable :: ibool_interfaces_crust_mantle
+
+  !--------------------------------------
+  ! crust mantle
+  !--------------------------------------
+  integer :: NSPEC_CRUST_MANTLE
+  integer :: NGLOB_CRUST_MANTLE
+
+  integer :: NGLOB1D_RADIAL_CM
+  integer :: NGLOB2DMAX_XMIN_XMAX_CM
+  integer :: NGLOB2DMAX_YMIN_YMAX_CM
+  integer :: NSPEC2DMAX_XMIN_XMAX_CM
+  integer :: NSPEC2DMAX_YMIN_YMAX_CM
+  integer :: NSPEC2D_BOTTOM_CM
+  integer :: NSPEC2D_TOP_CM
+
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
+
+  ! assembly
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_crust_mantle
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner_crust_mantle
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle
+  integer, dimension(:),allocatable :: iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle
+
+  integer, dimension(:,:),allocatable :: iboolfaces_crust_mantle
+
+  ! inner / outer elements crust/mantle region
+  integer :: num_phase_ispec_crust_mantle
+  integer :: nspec_inner_crust_mantle,nspec_outer_crust_mantle
+  integer, dimension(:,:), allocatable :: phase_ispec_inner_crust_mantle
+
+  ! mesh coloring
+  integer :: num_colors_outer_crust_mantle,num_colors_inner_crust_mantle
+  integer,dimension(:),allocatable :: num_elem_colors_crust_mantle
+
+  end module MPI_crust_mantle_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module MPI_inner_core_par
+
+  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+
+  implicit none
+
+  !--------------------------------------
+  ! MPI inner core mesh
+  !--------------------------------------
+  integer :: num_interfaces_inner_core
+  integer :: max_nibool_interfaces_ic
+  integer, dimension(:), allocatable :: my_neighbours_inner_core,nibool_interfaces_inner_core
+  integer, dimension(:,:), allocatable :: ibool_interfaces_inner_core
+
+  !--------------------------------------
+  ! inner core
+  !--------------------------------------
+  integer :: NSPEC_INNER_CORE
+  integer :: NGLOB_INNER_CORE
+
+  integer :: NGLOB1D_RADIAL_IC
+  integer :: NGLOB2DMAX_XMIN_XMAX_IC
+  integer :: NGLOB2DMAX_YMIN_YMAX_IC
+  integer :: NSPEC2DMAX_XMIN_XMAX_IC
+  integer :: NSPEC2DMAX_YMIN_YMAX_IC
+  integer :: NSPEC2D_BOTTOM_IC
+  integer :: NSPEC2D_TOP_IC
+
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    xstore_inner_core,ystore_inner_core,zstore_inner_core
+
+  ! for matching with central cube in inner core
+  integer, dimension(:), allocatable :: sender_from_slices_to_cube
+  integer, dimension(:,:), allocatable :: ibool_central_cube
+  double precision, dimension(:,:), allocatable :: buffer_slices,buffer_slices2
+  double precision, dimension(:,:,:), allocatable :: buffer_all_cube_from_slices
+  integer :: nb_msgs_theor_in_cube,non_zero_nb_msgs_theor_in_cube, &
+    npoin2D_cube_from_slices,receiver_cube_from_slices
+
+  ! bottom inner core / top central cube
+  integer, dimension(:),allocatable :: ibelm_bottom_inner_core
+
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_inner_core
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_inner_core,npoin2D_eta_inner_core
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner_inner_core
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi_inner_core,iboolright_xi_inner_core
+  integer, dimension(:),allocatable :: iboolleft_eta_inner_core,iboolright_eta_inner_core
+
+  integer, dimension(:,:),allocatable :: iboolfaces_inner_core
+
+  ! inner / outer elements inner core region
+  integer :: num_phase_ispec_inner_core
+  integer :: nspec_inner_inner_core,nspec_outer_inner_core
+  integer, dimension(:,:), allocatable :: phase_ispec_inner_inner_core
+
+  ! mesh coloring
+  integer :: num_colors_outer_inner_core,num_colors_inner_inner_core
+  integer,dimension(:),allocatable :: num_elem_colors_inner_core
+
+  end module MPI_inner_core_par
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  module MPI_outer_core_par
+
+  use constants,only: CUSTOM_REAL,NUMFACES_SHARED,NB_SQUARE_EDGES_ONEDIR
+
+  implicit none
+
+  !--------------------------------------
+  ! MPI outer core mesh
+  !--------------------------------------
+  integer :: num_interfaces_outer_core
+  integer :: max_nibool_interfaces_oc
+  integer, dimension(:), allocatable :: my_neighbours_outer_core,nibool_interfaces_outer_core
+  integer, dimension(:,:), allocatable :: ibool_interfaces_outer_core
+
+  !--------------------------------------
+  ! outer core
+  !--------------------------------------
+  integer :: NSPEC_OUTER_CORE
+  integer :: NGLOB_OUTER_CORE
+
+  integer :: NGLOB1D_RADIAL_OC
+  integer :: NGLOB2DMAX_XMIN_XMAX_OC
+  integer :: NGLOB2DMAX_YMIN_YMAX_OC
+  integer :: NSPEC2DMAX_XMIN_XMAX_OC
+  integer :: NSPEC2DMAX_YMIN_YMAX_OC
+  integer :: NSPEC2D_BOTTOM_OC
+  integer :: NSPEC2D_TOP_OC
+
+  real(kind=CUSTOM_REAL), dimension(:),allocatable :: &
+    xstore_outer_core,ystore_outer_core,zstore_outer_core
+
+  ! assembly
+  integer, dimension(NUMFACES_SHARED) :: npoin2D_faces_outer_core
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi_outer_core,npoin2D_eta_outer_core
+
+  ! indirect addressing for each corner of the chunks
+  integer, dimension(:,:),allocatable :: iboolcorner_outer_core
+
+  ! 2-D addressing and buffers for summation between slices
+  integer, dimension(:),allocatable :: iboolleft_xi_outer_core,iboolright_xi_outer_core
+  integer, dimension(:),allocatable :: iboolleft_eta_outer_core,iboolright_eta_outer_core
+
+  integer, dimension(:,:),allocatable :: iboolfaces_outer_core
+
+  ! inner / outer elements outer core region
+  integer :: num_phase_ispec_outer_core
+  integer :: nspec_inner_outer_core,nspec_outer_outer_core
+  integer, dimension(:,:), allocatable :: phase_ispec_inner_outer_core
+
+  ! mesh coloring
+  integer :: num_colors_outer_outer_core,num_colors_inner_outer_core
+  integer,dimension(:),allocatable :: num_elem_colors_outer_core
+
+  end module MPI_outer_core_par

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/save_arrays_solver_adios.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/save_arrays_solver_adios.F90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/save_arrays_solver_adios.F90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,1603 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+!-------------------------------------------------------------------------------
+!> \file get_absorb_adios.f90
+!! \brief Function to write stacey boundary condition to disk with ADIOS.
+!! \author MPBL      
+!-------------------------------------------------------------------------------
+
+!===============================================================================
+!> \brief Main routine to save the arrays from the mesher to the solver with the
+!!        help of ADIOS
+!! \param myrank The MPI rank of the current process
+!! \param nspec Number of GLL points per element
+!! \param nglob Number of mesh points
+!! \param idoubling Array of information on every mesh point
+!! \param ibool Array of information on every mesh point
+!! \param iregion_code The region the absorbing conditon is written for. Check
+!!                     constant.h files to see what these regions are.
+!! \param xstore Array with the x coordinates of the mesh points 
+!! \param ystore Array with the y coordinates of the mesh points 
+!! \param zstore Array with the z coordinates of the mesh points 
+!! \param NSPEC2DMAX_XMIN_XMAX Integer to compute the size of the arrays
+!!                             in argument
+!! \param NSPEC2DMAX_YMIN_YMAX Integer to compute the size of the arrays
+!!                             in argument
+!! \param NSPEC2D_TOP Integer to compute the size of the arrays
+!!                    in argument
+!! \param NSPEC2D_BOTTOM Integer to compute the size of the arrays
+!!                       in argument
+subroutine save_arrays_solver_adios(myrank,nspec,nglob,idoubling,ibool, &
+                    iregion_code,xstore,ystore,zstore, &
+                    NSPEC2DMAX_XMIN_XMAX, NSPEC2DMAX_YMIN_YMAX, &
+                    NSPEC2D_TOP,NSPEC2D_BOTTOM)
+
+  use mpi
+  use adios_write_mod
+
+  use constants
+
+  use meshfem3D_models_par,only: &
+    OCEANS,TRANSVERSE_ISOTROPY,HETEROGEN_3D_MANTLE,ANISOTROPIC_3D_MANTLE, &
+    ANISOTROPIC_INNER_CORE,ATTENUATION
+
+  use meshfem3D_par,only: &
+    NCHUNKS,ABSORBING_CONDITIONS,SAVE_MESH_FILES, LOCAL_PATH, &
+    ADIOS_FOR_SOLVER_MESHFILES
+
+  use create_regions_mesh_par2,only: &
+    xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore, &
+    gammaxstore,gammaystore,gammazstore, &
+    rhostore,dvpstore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore,&
+    c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+    c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
+    rmassx,rmassy,rmassz,rmass_ocean_load, &
+    ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top, &
+    normal_xmin,normal_xmax,normal_ymin,normal_ymax,normal_bottom,normal_top, &
+    jacobian2D_xmin,jacobian2D_xmax,jacobian2D_ymin,jacobian2D_ymax, &
+    jacobian2D_bottom,jacobian2D_top, &
+    rho_vp,rho_vs, &
+    nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, &
+    ispec_is_tiso,tau_s,T_c_source,tau_e_store,Qmu_store, &
+    prname, nspec_actually, nspec_ani, nspec_stacey, nglob_xy, nglob_oceans
+
+  implicit none
+
+  integer :: myrank
+  integer :: nspec,nglob
+
+  ! doubling mesh flag
+  integer, dimension(nspec) :: idoubling
+  integer,dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+
+  integer :: iregion_code
+
+  ! arrays with the mesh in double precision
+  double precision,dimension(NGLLX,NGLLY,NGLLZ,nspec) :: xstore,ystore,zstore
+
+  ! boundary parameters locator
+  integer :: NSPEC2D_TOP,NSPEC2D_BOTTOM, &
+      NSPEC2DMAX_XMIN_XMAX, NSPEC2DMAX_YMIN_YMAX
+
+  ! local parameters
+  integer :: i,j,k,ispec,iglob,ier
+  real(kind=CUSTOM_REAL),dimension(:),allocatable :: tmp_array_x, &
+      tmp_array_y, tmp_array_z
+
+  ! local parameters
+  character(len=150) :: reg_name, outputname, group_name
+  integer :: ierr, sizeprocs, comm, local_dim
+  integer(kind=8) :: group_size_inc
+  ! ADIOS variables
+  integer                 :: adios_err
+  integer(kind=8)         :: adios_group, adios_handle, varid
+  integer(kind=8)         :: adios_groupsize, adios_totalsize
+
+  ! create a prefix for the file name such as LOCAL_PATH/regX_
+  call create_name_database_adios(reg_name,iregion_code,LOCAL_PATH)
+
+  !---------------------------------------------------------
+  !--- Solver data arrays ----------------------------------
+  !---------------------------------------------------------
+
+  ! create the name for the database of the current slide and region
+  outputname = trim(reg_name) // "solver_data.bp" 
+
+  ! save arrays for the solver to run.
+  write(group_name,"('SPECFEM3D_GLOBE_ARRAYS_SOLVER_reg',i1)") iregion_code
+  call world_size(sizeprocs) ! TODO keep it in parameters
+  ! Alias COMM_WORLD to use ADIOS
+  call MPI_Comm_dup (MPI_COMM_WORLD, comm, ierr)
+  ! set the adios group size to 0 before incremented by calls to 
+  ! helpers functions.
+  group_size_inc = 0
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, adios_err)
+  ! We set the transport method to 'MPI'. This seems to be the correct choice
+  ! for now. We might want to move this to the constant.h file later on.
+  call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+  !--- Define ADIOS variables -----------------------------
+  ! save nspec and nglob, to be used in combine_paraview_data
+  call define_adios_integer_scalar (adios_group, "nspec", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "nglob", "", &
+      group_size_inc)
+
+  local_dim = nglob 
+  call define_adios_global_real_1d_array(adios_group, "xstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "ystore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "zstore", &
+      local_dim, group_size_inc)
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec
+  call define_adios_global_real_1d_array(adios_group, "rhostore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "kappavstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ibool", &
+      local_dim, group_size_inc)
+  if(iregion_code /= IREGION_OUTER_CORE) then
+    if(.not. (ANISOTROPIC_3D_MANTLE .and. &
+        iregion_code == IREGION_CRUST_MANTLE)) then
+      call define_adios_global_real_1d_array(adios_group, "muvstore", &
+          local_dim, group_size_inc)
+    endif
+    if(TRANSVERSE_ISOTROPY) then
+      if(iregion_code == IREGION_CRUST_MANTLE .and. &
+          .not. ANISOTROPIC_3D_MANTLE) then
+        call define_adios_global_real_1d_array(adios_group, "kappahstore", &
+            local_dim, group_size_inc)
+        call define_adios_global_real_1d_array(adios_group, "muhstore", &
+            local_dim, group_size_inc)
+        call define_adios_global_real_1d_array(adios_group, "eta_anisostore", &
+            local_dim, group_size_inc)
+      endif
+    endif
+  endif
+
+  local_dim = nspec
+  call define_adios_global_integer_1d_array(adios_group, "idoubling", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ispec_is_tiso", &
+      local_dim, group_size_inc)
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec_actually
+  call define_adios_global_real_1d_array(adios_group, "xixstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "xiystore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "xizstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "etaxstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "etaystore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "etazstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "gammaxstore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "gammaystore", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "gammazstore", &
+      local_dim, group_size_inc)
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec_ani 
+  if(iregion_code /= IREGION_OUTER_CORE) then
+    !   save anisotropy in the inner core only
+    if(ANISOTROPIC_INNER_CORE .and. iregion_code == IREGION_INNER_CORE) then
+      call define_adios_global_real_1d_array(adios_group, "c11store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c33store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c12store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c13store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c44store", &
+          local_dim, group_size_inc)
+    endif
+    if(ANISOTROPIC_3D_MANTLE .and. iregion_code == IREGION_CRUST_MANTLE) then
+      call define_adios_global_real_1d_array(adios_group, "c11store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c12store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c13store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c14store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c15store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c16store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c22store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c23store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c24store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c25store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c26store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c33store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c34store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c35store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c36store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c44store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c45store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c46store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c55store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c56store", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "c66store", &
+          local_dim, group_size_inc)
+    endif
+  endif
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec_stacey
+  if(ABSORBING_CONDITIONS) then
+    if(iregion_code == IREGION_CRUST_MANTLE) then
+      call define_adios_global_real_1d_array(adios_group, "rho_vp", &
+          local_dim, group_size_inc)
+      call define_adios_global_real_1d_array(adios_group, "rho_vs", &
+          local_dim, group_size_inc)
+    else if(iregion_code == IREGION_OUTER_CORE) then
+      call define_adios_global_real_1d_array(adios_group, "rho_vp", &
+          local_dim, group_size_inc)
+    endif
+  endif
+
+  local_dim = nglob_xy
+  if(NCHUNKS /= 6 .and. ABSORBING_CONDITIONS .and. &
+      iregion_code == IREGION_CRUST_MANTLE) then
+    call define_adios_global_real_1d_array(adios_group, "rmassx", &
+        local_dim, group_size_inc)
+    call define_adios_global_real_1d_array(adios_group, "rmassy", &
+        local_dim, group_size_inc)
+  endif
+  local_dim = nglob
+  call define_adios_global_real_1d_array(adios_group, "rmassz", &
+      local_dim, group_size_inc)
+
+  local_dim = nglob_oceans
+  if(OCEANS .and. iregion_code == IREGION_CRUST_MANTLE) then
+    call define_adios_global_real_1d_array(adios_group, "rmass_ocean_load", &
+        local_dim, group_size_inc)
+  endif
+
+  !--- Open an ADIOS handler to the restart file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, adios_err);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, adios_err)
+
+  ! mesh topology
+
+  ! mesh arrays used in the solver to locate source and receivers
+  ! and for anisotropy and gravity, save in single precision
+  ! use tmp_array for temporary storage to perform conversion
+  allocate(tmp_array_x(nglob),stat=ier)
+  if( ier /=0 ) call exit_MPI(myrank,&
+      'error allocating temporary array for mesh topology')
+  allocate(tmp_array_y(nglob),stat=ier)
+  if( ier /=0 ) call exit_MPI(myrank,&
+      'error allocating temporary array for mesh topology')
+  allocate(tmp_array_z(nglob),stat=ier)
+  if( ier /=0 ) call exit_MPI(myrank,&
+      'error allocating temporary array for mesh topology')
+
+  !--- x coordinate
+  tmp_array_x(:) = 0._CUSTOM_REAL
+  do ispec = 1,nspec
+    do k = 1,NGLLZ
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          iglob = ibool(i,j,k,ispec)
+          ! distinguish between single and double precision for reals
+          if(CUSTOM_REAL == SIZE_REAL) then
+            tmp_array_x(iglob) = sngl(xstore(i,j,k,ispec))
+          else
+            tmp_array_x(iglob) = xstore(i,j,k,ispec)
+          endif
+        enddo
+      enddo
+    enddo
+  enddo
+  !--- y coordinate
+  tmp_array_y(:) = 0._CUSTOM_REAL
+  do ispec = 1,nspec
+    do k = 1,NGLLZ
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          iglob = ibool(i,j,k,ispec)
+          ! distinguish between single and double precision for reals
+          if(CUSTOM_REAL == SIZE_REAL) then
+            tmp_array_y(iglob) = sngl(ystore(i,j,k,ispec))
+          else
+            tmp_array_y(iglob) = ystore(i,j,k,ispec)
+          endif
+        enddo
+      enddo
+    enddo
+  enddo
+  !--- z coordinate
+  tmp_array_z(:) = 0._CUSTOM_REAL
+  do ispec = 1,nspec
+    do k = 1,NGLLZ
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          iglob = ibool(i,j,k,ispec)
+          ! distinguish between single and double precision for reals
+          if(CUSTOM_REAL == SIZE_REAL) then
+            tmp_array_z(iglob) = sngl(zstore(i,j,k,ispec))
+          else
+            tmp_array_z(iglob) = zstore(i,j,k,ispec)
+          endif
+        enddo
+      enddo
+    enddo
+  enddo
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  ! save nspec and nglob, to be used in combine_paraview_data
+  call adios_write(adios_handle, "nspec", nspec, adios_err)
+  call adios_write(adios_handle, "nglob", nglob, adios_err)
+
+  local_dim = nglob 
+  call adios_set_path (adios_handle, "xstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", tmp_array_x, adios_err)
+
+  call adios_set_path (adios_handle, "ystore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", tmp_array_y, adios_err)
+  
+  call adios_set_path (adios_handle, "zstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", tmp_array_z, adios_err)
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec
+  call adios_set_path (adios_handle, "rhostore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", rhostore, adios_err)
+
+  call adios_set_path (adios_handle, "kappavstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", kappavstore, adios_err)
+
+  call adios_set_path (adios_handle, "ibool", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibool, adios_err)
+
+  if(iregion_code /= IREGION_OUTER_CORE) then
+    if(.not. (ANISOTROPIC_3D_MANTLE .and. &
+        iregion_code == IREGION_CRUST_MANTLE)) then
+      call adios_set_path (adios_handle, "muvstore", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", muvstore, adios_err)
+    endif
+    if(TRANSVERSE_ISOTROPY) then
+      if(iregion_code == IREGION_CRUST_MANTLE .and. &
+          .not. ANISOTROPIC_3D_MANTLE) then
+        call adios_set_path (adios_handle, "kappahstore", adios_err)
+        call write_1D_global_array_adios_dims(adios_handle, myrank, &
+            local_dim, sizeprocs)
+        call adios_write(adios_handle, "array", kappahstore, adios_err)
+
+        call adios_set_path (adios_handle, "muhstore", adios_err)
+        call write_1D_global_array_adios_dims(adios_handle, myrank, &
+            local_dim, sizeprocs)
+        call adios_write(adios_handle, "array", muhstore, adios_err)
+
+        call adios_set_path (adios_handle, "eta_anisostore", adios_err)
+        call write_1D_global_array_adios_dims(adios_handle, myrank, &
+            local_dim, sizeprocs)
+        call adios_write(adios_handle, "array", eta_anisostore, adios_err)
+      endif
+    endif
+  endif
+
+  local_dim = nspec
+  call adios_set_path (adios_handle, "idoubling", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", idoubling, adios_err)
+
+  call adios_set_path (adios_handle, "ispec_is_tiso", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ispec_is_tiso, adios_err)
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec_actually
+  call adios_set_path (adios_handle, "xixstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", xixstore, adios_err)
+
+  call adios_set_path (adios_handle, "xiystore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", xiystore, adios_err)
+
+  call adios_set_path (adios_handle, "xizstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", xizstore, adios_err)
+
+  call adios_set_path (adios_handle, "etaxstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", etaxstore, adios_err)
+
+  call adios_set_path (adios_handle, "etaystore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", etaystore, adios_err)
+
+  call adios_set_path (adios_handle, "etazstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", etazstore, adios_err)
+
+  call adios_set_path (adios_handle, "gammaxstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", gammaxstore, adios_err)
+
+  call adios_set_path (adios_handle, "gammaystore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", gammaystore, adios_err)
+
+  call adios_set_path (adios_handle, "gammazstore", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", gammazstore, adios_err)
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec_ani 
+  if(iregion_code /= IREGION_OUTER_CORE) then
+    !   save anisotropy in the inner core only
+    if(ANISOTROPIC_INNER_CORE .and. iregion_code == IREGION_INNER_CORE) then
+      call adios_set_path (adios_handle, "c11store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c11store, adios_err)
+
+      call adios_set_path (adios_handle, "c33store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c33store, adios_err)
+
+      call adios_set_path (adios_handle, "c12store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c12store, adios_err)
+
+      call adios_set_path (adios_handle, "c13store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c13store, adios_err)
+
+      call adios_set_path (adios_handle, "c44store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c44store, adios_err)
+    endif
+    if(ANISOTROPIC_3D_MANTLE .and. iregion_code == IREGION_CRUST_MANTLE) then
+      call adios_set_path (adios_handle, "c11store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c11store, adios_err)
+
+      call adios_set_path (adios_handle, "c12store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c12store, adios_err)
+
+      call adios_set_path (adios_handle, "c13store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c13store, adios_err)
+
+      call adios_set_path (adios_handle, "c14store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c14store, adios_err)
+
+      call adios_set_path (adios_handle, "c15store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c15store, adios_err)
+
+      call adios_set_path (adios_handle, "c16store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c16store, adios_err)
+
+      call adios_set_path (adios_handle, "c22store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c22store, adios_err)
+
+      call adios_set_path (adios_handle, "c23store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c23store, adios_err)
+
+      call adios_set_path (adios_handle, "c24store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c24store, adios_err)
+
+      call adios_set_path (adios_handle, "c25store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c25store, adios_err)
+
+      call adios_set_path (adios_handle, "c26store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c26store, adios_err)
+
+      call adios_set_path (adios_handle, "c33store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c33store, adios_err)
+
+      call adios_set_path (adios_handle, "c34store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c34store, adios_err)
+
+      call adios_set_path (adios_handle, "c35store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c35store, adios_err)
+
+      call adios_set_path (adios_handle, "c36store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c36store, adios_err)
+
+      call adios_set_path (adios_handle, "c44store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c44store, adios_err)
+
+      call adios_set_path (adios_handle, "c45store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c45store, adios_err)
+
+      call adios_set_path (adios_handle, "c46store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c46store, adios_err)
+
+      call adios_set_path (adios_handle, "c55store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c55store, adios_err)
+
+      call adios_set_path (adios_handle, "c56store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c56store, adios_err)
+
+      call adios_set_path (adios_handle, "c66store", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", c66store, adios_err)
+    endif
+  endif
+
+  local_dim = NGLLX * NGLLY * NGLLZ * nspec_stacey
+  if(ABSORBING_CONDITIONS) then
+    if(iregion_code == IREGION_CRUST_MANTLE) then
+      call adios_set_path (adios_handle, "rho_vp", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", rho_vp, adios_err)
+
+      call adios_set_path (adios_handle, "rho_vs", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", rho_vs, adios_err)
+
+    else if(iregion_code == IREGION_OUTER_CORE) then
+      call adios_set_path (adios_handle, "rho_vp", adios_err)
+      call write_1D_global_array_adios_dims(adios_handle, myrank, &
+          local_dim, sizeprocs)
+      call adios_write(adios_handle, "array", rho_vp, adios_err)
+    endif
+  endif
+
+  local_dim = nglob_xy
+  if(NCHUNKS /= 6 .and. ABSORBING_CONDITIONS .and. &
+      iregion_code == IREGION_CRUST_MANTLE) then
+    call adios_set_path (adios_handle, "rmassx", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", rmassx, adios_err)
+
+    call adios_set_path (adios_handle, "rmassy", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", rmassy, adios_err)
+  endif
+
+  local_dim = nglob
+  call adios_set_path (adios_handle, "rmassz", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", rmassz, adios_err)
+
+  local_dim = nglob_oceans
+  if(OCEANS .and. iregion_code == IREGION_CRUST_MANTLE) then
+    call adios_set_path (adios_handle, "rmass_ocean_load", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", rmass_ocean_load, adios_err)
+    if(minval(rmass_ocean_load) <= 0._CUSTOM_REAL) &
+        call exit_MPI(myrank,'negative mass matrix term for the oceans')
+  endif
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_set_path (adios_handle, "", adios_err)
+  call adios_close(adios_handle, adios_err)
+
+  ! Clean the temporary arrays containing the node information
+  deallocate(tmp_array_x)
+  deallocate(tmp_array_y)
+  deallocate(tmp_array_z)
+
+  !---------------------------------------------------------
+  !--- Boundary arrays -------------------------------------
+  !---------------------------------------------------------
+
+  ! Postpend the actual file name.
+  outputname = trim(reg_name) // "boundary.bp" 
+
+  ! save boundary arrays in ADIOS files 
+  write(group_name,"('SPECFEM3D_GLOBE_BOUNDARY_reg',i1)") iregion_code
+  ! set the adios group size to 0 before incremented by calls to 
+  ! helpers functions.
+  group_size_inc = 0
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, adios_err)
+  call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+  !--- Define ADIOS variables -----------------------------
+  call define_adios_integer_scalar (adios_group, "nspec2D_xmin", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "nspec2D_xmax", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "nspec2D_ymin", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "nspec2D_ymax", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "NSPEC2D_BOTTOM", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "NSPEC2D_TOP", "", &
+      group_size_inc)
+  
+  !local_dim = NSPEC2DMAX_XMIN_YMAX
+  local_dim = size (ibelm_xmin) 
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_xmin", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_xmax", &
+      local_dim, group_size_inc)
+
+  !local_dim = NSPEC2DMAX_YMIN_YMAX
+  local_dim = size (ibelm_ymin) 
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_ymin", &
+      local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_ymax", &
+      local_dim, group_size_inc)
+
+  !local_dim = NSPEC2D_BOTTOM
+  local_dim = size (ibelm_bottom) 
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_bottom", &
+      local_dim, group_size_inc)
+
+  !local_dim = NSPEC2D_TOP
+  local_dim = size (ibelm_top) 
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_top", &
+      local_dim, group_size_inc)
+
+  !local_dim = NDIM*NGLLY*NGLLZ*NSPEC2DMAX_XMIN_XMAX
+  local_dim = size (normal_xmin) 
+  call define_adios_global_real_1d_array(adios_group, "normal_xmin", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "normal_xmax", &
+      local_dim, group_size_inc)
+
+  !local_dim = NDIM*NGLLX*NGLLZ*NSPEC2DMAX_YMIN_YMAX
+  local_dim = size (normal_ymin) 
+  call define_adios_global_real_1d_array(adios_group, "normal_ymin", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "normal_ymax", &
+      local_dim, group_size_inc)
+
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_BOTTOM
+  local_dim = size (normal_bottom) 
+  call define_adios_global_real_1d_array(adios_group, "normal_bottom", &
+      local_dim, group_size_inc)
+
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_TOP
+  local_dim = size (normal_top) 
+  call define_adios_global_real_1d_array(adios_group, "normal_top", &
+      local_dim, group_size_inc)
+
+  !local_dim = NGLLY*NGLLZ*NSPEC2DMAX_XMIN_XMAX
+  local_dim = size (jacobian2D_xmin) 
+  call define_adios_global_real_1d_array(adios_group, "jacobian2D_xmin", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "jacobian2D_xmax", &
+      local_dim, group_size_inc)
+  !local_dim = NDIM*NGLLX*NGLLZ*NSPEC2DMAX_YMIN_YMAX
+  local_dim = size (jacobian2D_ymin) 
+  call define_adios_global_real_1d_array(adios_group, "jacobian2D_ymin", &
+      local_dim, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "jacobian2D_ymax", &
+      local_dim, group_size_inc)
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_BOTTOM
+  local_dim = size (jacobian2D_bottom) 
+  call define_adios_global_real_1d_array(adios_group, "jacobian2D_bottom", &
+      local_dim, group_size_inc)
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_TOP
+  local_dim = size (jacobian2D_top) 
+  call define_adios_global_real_1d_array(adios_group, "jacobian2D_top", &
+      local_dim, group_size_inc)
+
+  !--- Open an ADIOS handler to the restart file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, adios_err);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, adios_err)
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  call adios_write(adios_handle, "nspec2D_xmin", nspec2D_xmin, adios_err)
+  call adios_write(adios_handle, "nspec2D_xmax", nspec2D_xmax, adios_err)
+  call adios_write(adios_handle, "nspec2D_ymin", nspec2D_ymin, adios_err)
+  call adios_write(adios_handle, "nspec2D_ymax", nspec2D_ymax, adios_err)
+  call adios_write(adios_handle, "NSPEC2D_BOTTOM", NSPEC2D_BOTTOM, adios_err)
+  call adios_write(adios_handle, "NSPEC2D_TOP", NSPEC2D_TOP, adios_err)
+  
+  !local_dim = NSPEC2DMAX_XMIN_XMAX
+  local_dim = size (ibelm_xmin) 
+  call adios_set_path (adios_handle, "ibelm_xmin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_xmin, adios_err)
+  call adios_set_path (adios_handle, "ibelm_xmax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_xmax, adios_err)
+
+  !local_dim = NSPEC2DMAX_YMIN_YMAX
+  local_dim = size (ibelm_ymin) 
+  call adios_set_path (adios_handle, "ibelm_ymin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_ymin, adios_err)
+  call adios_set_path (adios_handle, "ibelm_ymax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_ymax, adios_err)
+
+  !local_dim = NSPEC2D_BOTTOM
+  local_dim = size (ibelm_bottom) 
+  call adios_set_path (adios_handle, "ibelm_bottom", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_bottom, adios_err)
+
+  !local_dim = NSPEC2D_TOP
+  local_dim = size (ibelm_top) 
+  call adios_set_path (adios_handle, "ibelm_top", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_top, adios_err)
+
+  !local_dim = NDIM*NGLLY*NGLLZ*NSPEC2DMAX_XMIN_XMAX
+  local_dim = size (normal_xmin) 
+  call adios_set_path (adios_handle, "normal_xmin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_xmin, adios_err)
+  call adios_set_path (adios_handle, "normal_xmax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_xmax, adios_err)
+
+  !local_dim = NDIM*NGLLX*NGLLZ*NSPEC2DMAX_YMIN_YMAX
+  local_dim = size (normal_ymin) 
+  call adios_set_path (adios_handle, "normal_ymin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_ymin, adios_err)
+  call adios_set_path (adios_handle, "normal_ymax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_ymax, adios_err)
+
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_BOTTOM
+  local_dim = size (normal_bottom) 
+  call adios_set_path (adios_handle, "normal_bottom", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_bottom, adios_err)
+
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_TOP
+  local_dim = size (normal_top) 
+  call adios_set_path (adios_handle, "normal_top", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_top, adios_err)
+
+  !local_dim = NGLLY*NGLLZ*NSPEC2DMAX_XMIN_XMAX
+  local_dim = size (jacobian2D_xmin) 
+  call adios_set_path (adios_handle, "jacobian2D_xmin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", jacobian2D_xmin, adios_err)
+  call adios_set_path (adios_handle, "jacobian2D_xmax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", jacobian2D_xmax, adios_err)
+
+  !local_dim = NDIM*NGLLX*NGLLZ*NSPEC2DMAX_YMIN_YMAX
+  local_dim = size (jacobian2D_ymin) 
+  call adios_set_path (adios_handle, "jacobian2D_ymin", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", jacobian2D_ymin, adios_err)
+  call adios_set_path (adios_handle, "jacobian2D_ymax", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", jacobian2D_ymax, adios_err)
+
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_BOTTOM
+  local_dim = size (jacobian2D_bottom) 
+  call adios_set_path (adios_handle, "jacobian2D_bottom", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", jacobian2D_bottom, adios_err)
+
+  !local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_TOP
+  local_dim = size (jacobian2D_top) 
+  call adios_set_path (adios_handle, "jacobian2D_top", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", jacobian2D_top, adios_err)
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_set_path (adios_handle, "", adios_err)
+  call adios_close(adios_handle, adios_err)
+
+  !---------------------------------------------------------
+  !--- Attenuation arrays ----------------------------------
+  !---------------------------------------------------------
+  if(ATTENUATION) then
+    outputname = trim(reg_name) // "attenuation.bp" 
+    write(group_name,"('SPECFEM3D_GLOBE_ATTENUATION_reg',i1)") iregion_code
+    group_size_inc = 0
+    call adios_declare_group(adios_group, group_name, &
+        "", 0, adios_err)
+    call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+    !--- Define ADIOS variables -----------------------------
+    call define_adios_double_scalar(adios_group, "T_c_source", "", &
+        group_size_inc)
+
+    local_dim = size(tau_s)
+    call define_adios_global_double_1d_array(adios_group, "tau_s", &
+        local_dim, group_size_inc)
+    local_dim = size(tau_e_store)
+    call define_adios_global_double_1d_array(adios_group, "tau_e_store", &
+        local_dim, group_size_inc)
+    local_dim = size(Qmu_store)
+    call define_adios_global_double_1d_array(adios_group, "Qmu_store", &
+        local_dim, group_size_inc)
+
+    !--- Open an ADIOS handler to the restart file. ---------
+    call adios_open (adios_handle, group_name, &
+        outputname, "w", comm, adios_err);
+    call adios_group_size (adios_handle, group_size_inc, &
+                           adios_totalsize, adios_err)
+
+    !--- Schedule writes for the previously defined ADIOS variables
+    call adios_write(adios_handle, "T_c_source", T_c_source, adios_err)
+
+    local_dim = size (tau_s) 
+    call adios_set_path (adios_handle, "tau_s", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", tau_s, adios_err)
+    local_dim = size (tau_e_store) 
+    call adios_set_path (adios_handle, "tau_e_store", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", tau_e_store, adios_err)
+    local_dim = size (Qmu_store) 
+    call adios_set_path (adios_handle, "Qmu_store", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", Qmu_store, adios_err)
+
+    !--- Reset the path to zero and perform the actual write to disk
+    call adios_set_path (adios_handle, "", adios_err)
+    call adios_close(adios_handle, adios_err)
+  endif
+
+  !---------------------------------------------------------
+  !--- dvp arrays ------------------------------------------
+  !---------------------------------------------------------
+  if(HETEROGEN_3D_MANTLE .and. iregion_code == IREGION_CRUST_MANTLE) then
+    outputname = trim(reg_name) // "dvp.bp" 
+    write(group_name,"('SPECFEM3D_GLOBE_DVP_reg',i1)") iregion_code
+    group_size_inc = 0
+    call adios_declare_group(adios_group, group_name, &
+        "", 0, adios_err)
+    call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+    !--- Define ADIOS variables -----------------------------
+    local_dim = size (dvpstore) 
+    call define_adios_global_real_1d_array(adios_group, "dvp", &
+        local_dim, group_size_inc)
+    !--- Open an ADIOS handler to the restart file. ---------
+    call adios_open (adios_handle, group_name, &
+        outputname, "w", comm, adios_err);
+    call adios_group_size (adios_handle, group_size_inc, &
+                           adios_totalsize, adios_err)
+    call adios_set_path (adios_handle, "dvp", adios_err)
+    !--- Schedule writes for the previously defined ADIOS variables
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", dvpstore, adios_err)
+
+    !--- Reset the path to zero and perform the actual write to disk
+    call adios_set_path (adios_handle, "", adios_err)
+    call adios_close(adios_handle, adios_err)
+  endif
+
+  !---------------------------------------------------------
+  !--- mehsfiles arrays ------------------------------------
+  !---------------------------------------------------------
+  ! uncomment for vp & vs model storage
+  if( SAVE_MESH_FILES ) then
+    ! outputs model files in binary format
+    if (ADIOS_FOR_SOLVER_MESHFILES) then
+      call save_arrays_solver_meshfiles_adios(myrank,iregion_code, &
+          reg_name, nspec)
+    else
+      call save_arrays_solver_meshfiles(myrank,nspec)
+    endif
+  endif
+
+end subroutine save_arrays_solver_adios
+
+
+!===============================================================================
+!> \brief Save the meshfiles that will be used by the solver in an ADIOS format.
+!! 
+!! \param myrank The MPI rank of the current process.
+!! \param iregion_code Code of the region considered. See constant.h for details
+!! \param reg_name Output file prefix with the name of the region included
+!! \param nspec Number of GLL points per spectral elements
+subroutine save_arrays_solver_meshfiles_adios(myrank, iregion_code, &
+    reg_name, nspec)
+
+  ! outputs model files in binary format
+  use mpi
+  use adios_write_mod
+  use constants
+
+  use meshfem3D_models_par,only: &
+    TRANSVERSE_ISOTROPY,ATTENUATION
+
+  use create_regions_mesh_par2,only: &
+    rhostore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
+    Qmu_store, &
+    prname
+
+  implicit none
+
+  integer :: myrank, nspec, iregion_code
+  character(len=150) :: reg_name
+
+  ! local parameters
+  integer :: i,j,k,ispec,ier
+  real(kind=CUSTOM_REAL) :: scaleval1,scaleval2
+  real(kind=CUSTOM_REAL),dimension(:,:,:,:),allocatable :: temp_store
+
+  ! local parameters
+  character(len=150) :: outputname, group_name
+  integer :: ierr, sizeprocs, comm, local_dim
+  integer(kind=8) :: group_size_inc
+  ! ADIOS variables
+  integer                 :: adios_err
+  integer(kind=8)         :: adios_group, adios_handle, varid
+  integer(kind=8)         :: adios_groupsize, adios_totalsize
+
+  ! scaling factors to re-dimensionalize units
+  scaleval1 = sngl( sqrt(PI*GRAV*RHOAV)*(R_EARTH/1000.0d0) )
+  scaleval2 = sngl( RHOAV/1000.0d0 )
+
+  call world_size(sizeprocs) ! TODO keep it in parameters
+  call MPI_Comm_dup (MPI_COMM_WORLD, comm, ierr)
+
+  ! isotropic model
+  outputname = trim(reg_name) // "solver_meshfiles.bp" 
+  write(group_name,"('SPECFEM3D_GLOBE_solver_meshfiles_reg',i1)") iregion_code
+
+  group_size_inc = 0
+
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, adios_err)
+  call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+  !--- Define ADIOS variables -----------------------------
+  !--- vp arrays -------------------------------------------
+  local_dim = size (kappavstore) 
+  call define_adios_global_real_1d_array(adios_group, "vp", &
+      local_dim, group_size_inc)
+  !--- vs arrays -------------------------------------------
+  local_dim = size (rhostore) 
+  call define_adios_global_real_1d_array(adios_group, "vs", &
+      local_dim, group_size_inc)
+  !--- rho arrays ------------------------------------------
+  local_dim = size (rhostore) 
+  call define_adios_global_real_1d_array(adios_group, "rho", &
+      local_dim, group_size_inc)
+  ! transverse isotropic model
+  if( TRANSVERSE_ISOTROPY ) then
+    !--- vpv arrays ----------------------------------------
+    local_dim = size (kappavstore) 
+    call define_adios_global_real_1d_array(adios_group, "vpv", &
+        local_dim, group_size_inc)
+    !--- vph arrays ----------------------------------------
+    local_dim = size (kappavstore) 
+    call define_adios_global_real_1d_array(adios_group, "vph", &
+        local_dim, group_size_inc)
+    !--- vsv arrays ----------------------------------------
+    local_dim = size (rhostore) 
+    call define_adios_global_real_1d_array(adios_group, "vsv", &
+        local_dim, group_size_inc)
+    !--- vsh arrays ----------------------------------------
+    local_dim = size (rhostore) 
+    call define_adios_global_real_1d_array(adios_group, "vsh", &
+        local_dim, group_size_inc)
+    !--- eta arrays ----------------------------------------
+    local_dim = size (eta_anisostore) 
+    call define_adios_global_real_1d_array(adios_group, "eta", &
+        local_dim, group_size_inc)
+  endif
+  if( ATTENUATION ) then
+    !--- Qmu arrays ----------------------------------------
+    local_dim = NGLLX * NGLLY * NGLLZ * nspec
+    call define_adios_global_real_1d_array(adios_group, "qmu", &
+        local_dim, group_size_inc)
+  endif
+
+  !--- Open an ADIOS handler to the restart file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, adios_err);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, adios_err)
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  !--- vp arrays -------------------------------------------
+  local_dim = size (kappavstore) 
+  call adios_set_path (adios_handle, "vp", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", &
+      sqrt( (kappavstore+4.*muvstore/3.)/rhostore )*scaleval1, &
+      adios_err)
+  !--- vs arrays -------------------------------------------
+  local_dim = size (rhostore) 
+  call adios_set_path (adios_handle, "vs", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", &
+      sqrt( muvstore/rhostore )*scaleval1, &
+      adios_err)
+  !--- rho arrays ------------------------------------------
+  local_dim = size (rhostore) 
+  call adios_set_path (adios_handle, "rho", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", &
+      rhostore *scaleval2, &
+      adios_err)
+
+  ! transverse isotropic model
+  if( TRANSVERSE_ISOTROPY ) then
+    !--- vpv arrays ----------------------------------------
+    local_dim = size (kappavstore) 
+    call adios_set_path (adios_handle, "vpv", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        sqrt( (kappavstore+4.*muvstore/3.)/rhostore )*scaleval1, &
+        adios_err)
+    !--- vph arrays ----------------------------------------
+    local_dim = size (kappavstore) 
+    call adios_set_path (adios_handle, "vph", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        sqrt( (kappahstore+4.*muhstore/3.)/rhostore )*scaleval1, &
+        adios_err)
+    !--- vsv arrays ----------------------------------------
+    local_dim = size (rhostore) 
+    call adios_set_path (adios_handle, "vsv", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        sqrt( muvstore/rhostore )*scaleval1, &
+        adios_err)
+    !--- vsh arrays ----------------------------------------
+    local_dim = size (rhostore) 
+    call adios_set_path (adios_handle, "vsh", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        sqrt( muhstore/rhostore )*scaleval1, &
+        adios_err)
+    !--- eta arrays ----------------------------------------
+    local_dim = size (eta_anisostore) 
+    call adios_set_path (adios_handle, "eta", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        eta_anisostore, &
+        adios_err)
+  endif ! TRANSVERSE_ISOTROPY
+
+  ! shear attenuation
+  if( ATTENUATION ) then
+    !-------------------------------------------------------
+    !--- Qmu arrays ----------------------------------------
+    !-------------------------------------------------------
+    ! saves Qmu_store to full custom_real array
+    ! uses temporary array
+    allocate(temp_store(NGLLX,NGLLY,NGLLZ,nspec))
+    if (USE_3D_ATTENUATION_ARRAYS) then
+      ! attenuation arrays are fully 3D
+      if(CUSTOM_REAL == SIZE_REAL) then
+        temp_store(:,:,:,:) = sngl(Qmu_store(:,:,:,:))
+      else
+        temp_store(:,:,:,:) = Qmu_store(:,:,:,:)
+      endif
+    else
+      ! attenuation array dimensions: Q_mustore(1,1,1,nspec)
+      do ispec = 1,nspec
+        do k = 1,NGLLZ
+          do j = 1,NGLLY
+            do i = 1,NGLLX
+              ! distinguish between single and double precision for reals
+              if(CUSTOM_REAL == SIZE_REAL) then
+                temp_store(i,j,k,ispec) = sngl(Qmu_store(1,1,1,ispec))
+              else
+                temp_store(i,j,k,ispec) = Qmu_store(1,1,1,ispec)
+              endif
+            enddo
+          enddo
+        enddo
+      enddo
+    endif
+
+    local_dim = NGLLX * NGLLY * NGLLZ * nspec
+    call adios_set_path (adios_handle, "qmu", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        temp_store, &
+        adios_err)
+
+    ! frees temporary memory
+    deallocate(temp_store)
+  endif ! ATTENUATION
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_set_path (adios_handle, "", adios_err)
+  call adios_close(adios_handle, adios_err)
+
+end subroutine save_arrays_solver_meshfiles_adios
+
+
+!===============================================================================
+!> \brief Save the arrays use by the solver for MPI communications.
+!! 
+!! \param myrank The MPI rank of the current process.
+!! \param iregion_code Code of the region considered. See constant.h for details
+!! \param LOCAL_PATH The full path to the output directory
+!! \param num_interfaces The number of interfaces between processors
+!! \param max_nibool_interfaces
+!! \param my_neighbours
+!! \param nibool_interfaces
+!! \param ibool_interfaces
+!! \param nspec_inner Number of spectral elements in the inner core
+!! \param nspec_outer Number of spectral elemetns in the outer core
+!! \param num_phase_ispec
+!! \param phase_ispec_inner
+!! \param num_colors_inner Number of colors for GPU computing in the inner core.
+!! \param num_colors_outer Number of colors for GPU computing in the outer core.
+subroutine save_MPI_arrays_adios(myrank,iregion_code,LOCAL_PATH, &
+   num_interfaces,max_nibool_interfaces, my_neighbours,nibool_interfaces, &
+   ibool_interfaces, nspec_inner,nspec_outer, num_phase_ispec, &
+   phase_ispec_inner, num_colors_outer,num_colors_inner, num_elem_colors)
+
+  use mpi
+  use adios_write_mod
+  implicit none
+
+  include "constants.h"
+
+  integer :: iregion_code,myrank
+  character(len=150) :: LOCAL_PATH
+  ! MPI interfaces
+  integer :: num_interfaces,max_nibool_interfaces
+  integer, dimension(num_interfaces) :: my_neighbours
+  integer, dimension(num_interfaces) :: nibool_interfaces
+  integer, dimension(max_nibool_interfaces,num_interfaces) :: &
+      ibool_interfaces
+  ! inner/outer elements
+  integer :: nspec_inner,nspec_outer
+  integer :: num_phase_ispec
+  integer,dimension(num_phase_ispec,2) :: phase_ispec_inner
+  ! mesh coloring
+  integer :: num_colors_outer,num_colors_inner
+  integer, dimension(num_colors_outer + num_colors_inner) :: &
+    num_elem_colors
+
+  ! local parameters
+  character(len=150) :: prname, outputname, group_name
+  integer :: ierr, sizeprocs, comm, local_dim
+  integer(kind=8) :: group_size_inc
+  ! ADIOS variables
+  integer                 :: adios_err
+  integer(kind=8)         :: adios_group, adios_handle, varid
+  integer(kind=8)         :: adios_groupsize, adios_totalsize
+
+  ! create the name for the database of the current slide and region
+  call create_name_database_adios(prname,iregion_code,LOCAL_PATH)
+
+  outputname = trim(prname) // "solver_data_mpi.bp" 
+  write(group_name,"('SPECFEM3D_GLOBE_MPI_ARRAYS_reg',i1)") iregion_code
+  call world_size(sizeprocs) ! TODO keep it in parameters
+  call MPI_Comm_dup (MPI_COMM_WORLD, comm, ierr)
+  group_size_inc = 0
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, adios_err)
+  call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+  !--- Define ADIOS variables -----------------------------
+  !! MPI interfaces
+  call define_adios_integer_scalar (adios_group, "num_interfaces", "", &
+      group_size_inc)
+  if( num_interfaces > 0 ) then
+    call define_adios_integer_scalar(adios_group, "max_nibool_interfaces", &
+        "", group_size_inc)
+    call define_adios_global_integer_1d_array(adios_group, "my_neighbours", &
+        num_interfaces, group_size_inc)
+    call define_adios_global_integer_1d_array(adios_group, "nibool_interfaces",&
+        num_interfaces, group_size_inc)
+    local_dim = max_nibool_interfaces*num_interfaces
+    call define_adios_global_integer_1d_array(adios_group, "ibool_interfaces", &
+        local_dim, group_size_inc)
+  endif
+
+  ! inner/outer elements
+  call define_adios_integer_scalar (adios_group, "nspec_inner", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "nspec_outer", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "num_phase_ispec", "", &
+      group_size_inc)
+  if(num_phase_ispec > 0 ) then
+    local_dim = num_phase_ispec * 2
+    call define_adios_global_integer_1d_array(adios_group, "phase_ispec_inner", &
+        local_dim, group_size_inc)
+  endif
+
+  ! mesh coloring
+  if( USE_MESH_COLORING_GPU ) then
+    call define_adios_integer_scalar (adios_group, "num_colors_outer", "", &
+        group_size_inc)
+    call define_adios_integer_scalar (adios_group, "num_colors_inner", "", &
+        group_size_inc)
+    call define_adios_global_integer_1d_array(adios_group, "num_elem_colors", &
+        num_colors_outer + num_colors_inner, group_size_inc)
+  endif
+
+  !--- Open an ADIOS handler to the restart file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, adios_err);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, adios_err)
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  ! MPI interfaces
+  call adios_write(adios_handle, "num_interfaces", num_interfaces, adios_err)
+  if( num_interfaces > 0 ) then
+    call adios_write(adios_handle, "max_nibool_interfaces", &
+        max_nibool_interfaces, adios_err)
+
+    local_dim = num_interfaces
+
+    call adios_set_path (adios_handle, "my_neighbours", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", my_neighbours, adios_err)
+
+    call adios_set_path (adios_handle, "nibool_interfaces", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", nibool_interfaces, adios_err)
+
+    local_dim = max_nibool_interfaces * num_interfaces
+
+    call adios_set_path (adios_handle, "ibool_interfaces", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        ibool_interfaces, adios_err)
+    call adios_set_path (adios_handle, "", adios_err)
+  endif
+
+  ! inner/outer elements
+  call adios_write(adios_handle, "nspec_inner", nspec_inner, adios_err)
+  call adios_write(adios_handle, "nspec_outer", nspec_outer, adios_err)
+  call adios_write(adios_handle, "num_phase_ispec", num_phase_ispec, adios_err)
+
+  if(num_phase_ispec > 0 ) then
+    local_dim = num_phase_ispec * 2
+    call adios_set_path (adios_handle, "phase_ispec_inner", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        phase_ispec_inner, adios_err)
+    call adios_set_path (adios_handle, "", adios_err)
+  endif
+
+  ! mesh coloring
+  if( USE_MESH_COLORING_GPU ) then
+    call adios_write(adios_handle, "num_colors_outer", nspec_inner, adios_err)
+    call adios_write(adios_handle, "num_colors_inner", nspec_inner, adios_err)
+    local_dim = num_colors_outer + num_colors_inner
+    call adios_set_path (adios_handle, "num_elem_colors", adios_err)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        local_dim, sizeprocs)
+    call adios_write(adios_handle, "array", &
+        num_elem_colors, adios_err)
+    call adios_set_path (adios_handle, "", adios_err)
+  endif
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_close(adios_handle, adios_err)
+
+end subroutine save_MPI_arrays_adios
+
+
+!===============================================================================
+!> \brief Write boundary conditions (MOHO, 400, 600) to a single ADIOS file.
+subroutine save_arrays_solver_boundary_adios()
+
+! saves arrays for boundaries such as MOHO, 400 and 670 discontinuities
+  use mpi
+  
+  use meshfem3d_par,only: &
+    myrank, LOCAL_PATH
+
+  use meshfem3D_models_par,only: &
+    HONOR_1D_SPHERICAL_MOHO
+    !SAVE_BOUNDARY_MESH,HONOR_1D_SPHERICAL_MOHO,SUPPRESS_CRUSTAL_MESH
+
+  use create_regions_mesh_par2, only: &
+    NSPEC2D_MOHO, NSPEC2D_400, NSPEC2D_670, &
+    ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot, &
+    ibelm_670_top,ibelm_670_bot,normal_moho,normal_400,normal_670, &
+    ispec2D_moho_top,ispec2D_moho_bot,ispec2D_400_top,ispec2D_400_bot, &
+    ispec2D_670_top,ispec2D_670_bot, &
+    prname
+
+  implicit none
+  include "constants.h"
+
+  ! local parameters
+  ! local parameters
+  character(len=150) :: outputname, group_name
+  integer :: ierr, sizeprocs, comm, local_dim
+  integer(kind=8) :: group_size_inc
+  ! ADIOS variables
+  integer                 :: adios_err
+  integer(kind=8)         :: adios_group, adios_handle, varid
+  integer(kind=8)         :: adios_groupsize, adios_totalsize
+
+  ! first check the number of surface elements are the same for Moho, 400, 670
+  if (.not. SUPPRESS_CRUSTAL_MESH .and. HONOR_1D_SPHERICAL_MOHO) then
+    if (ispec2D_moho_top /= NSPEC2D_MOHO .or. ispec2D_moho_bot /= NSPEC2D_MOHO) &
+           call exit_mpi(myrank, 'Not the same number of Moho surface elements')
+  endif
+  if (ispec2D_400_top /= NSPEC2D_400 .or. ispec2D_400_bot /= NSPEC2D_400) &
+           call exit_mpi(myrank,'Not the same number of 400 surface elements')
+  if (ispec2D_670_top /= NSPEC2D_670 .or. ispec2D_670_bot /= NSPEC2D_670) &
+           call exit_mpi(myrank,'Not the same number of 670 surface elements')
+
+  outputname = trim(LOCAL_PATH) // "/boundary_disc.bp" 
+  group_name = "SPECFEM3D_GLOBE_BOUNDARY_DISC"
+  call world_size(sizeprocs) ! TODO keep it in parameters
+  call MPI_Comm_dup (MPI_COMM_WORLD, comm, ierr)
+  group_size_inc = 0
+  call adios_declare_group(adios_group, group_name, &
+      "", 0, adios_err)
+  call adios_select_method(adios_group, "MPI", "", "", adios_err)
+
+  !--- Define ADIOS variables -----------------------------
+  call define_adios_integer_scalar (adios_group, "NSPEC2D_MOHO", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "NSPEC2D_400", "", &
+      group_size_inc)
+  call define_adios_integer_scalar (adios_group, "NSPEC2D_670", "", &
+      group_size_inc)
+
+  local_dim = NSPEC2D_MOHO
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_moho_top", &
+        local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_moho_bot", &
+        local_dim, group_size_inc)
+  local_dim = NSPEC2D_400
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_400_top", &
+        local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_400_bot", &
+        local_dim, group_size_inc)
+  local_dim = NSPEC2D_670 
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_670_top", &
+        local_dim, group_size_inc)
+  call define_adios_global_integer_1d_array(adios_group, "ibelm_670_bot", &
+        local_dim, group_size_inc)
+  local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_MOHO
+  call define_adios_global_real_1d_array(adios_group, "normal_moho", &
+        local_dim, group_size_inc)
+  local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_400
+  call define_adios_global_real_1d_array(adios_group, "normal_400", &
+        local_dim, group_size_inc)
+  local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_670
+  call define_adios_global_real_1d_array(adios_group, "normal_670", &
+        local_dim, group_size_inc)
+
+  !--- Open an ADIOS handler to the restart file. ---------
+  call adios_open (adios_handle, group_name, &
+      outputname, "w", comm, adios_err);
+  call adios_group_size (adios_handle, group_size_inc, &
+                         adios_totalsize, adios_err)
+
+  !--- Schedule writes for the previously defined ADIOS variables
+  call adios_write(adios_handle, "NSPEC2D_MOHO", NSPEC2D_MOHO, adios_err)
+  call adios_write(adios_handle, "NSPEC2D_400", NSPEC2D_400, adios_err)
+  call adios_write(adios_handle, "NSPEC2D_670", NSPEC2D_670, adios_err)
+
+  local_dim = NSPEC2D_MOHO
+  call adios_set_path (adios_handle, "ibelm_moho_top", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_moho_top, adios_err)
+  call adios_set_path (adios_handle, "ibelm_moho_bot", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_moho_bot, adios_err)
+
+  local_dim = NSPEC2D_400 
+  call adios_set_path (adios_handle, "ibelm_400_top", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_400_top, adios_err)
+  call adios_set_path (adios_handle, "ibelm_400_bot", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_400_bot, adios_err)
+
+  local_dim = NSPEC2D_670
+  call adios_set_path (adios_handle, "ibelm_670_top", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_670_top, adios_err)
+  call adios_set_path (adios_handle, "ibelm_670_bot", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", ibelm_670_bot, adios_err)
+
+  local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_MOHO
+  call adios_set_path (adios_handle, "normal_moho", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_moho, adios_err)
+
+  local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_400
+  call adios_set_path (adios_handle, "normal_400", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_400, adios_err)
+
+  local_dim = NDIM*NGLLX*NGLLY*NSPEC2D_670
+  call adios_set_path (adios_handle, "normal_670", adios_err)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      local_dim, sizeprocs)
+  call adios_write(adios_handle, "array", normal_670, adios_err)
+
+  !--- Reset the path to zero and perform the actual write to disk
+  call adios_set_path (adios_handle, "", adios_err)
+  call adios_close(adios_handle, adios_err)
+end subroutine save_arrays_solver_boundary_adios
+
+!-------------------------------------------------------------------------------
+!> Write local, global and offset dimensions to ADIOS 
+!! \param adios_handle Handle to the adios file
+!! \param local_dim Number of elements to be written by one process
+!! \param sizeprocs Number of MPI processes
+subroutine write_1D_global_array_adios_dims(adios_handle, myrank, &
+    local_dim, sizeprocs)
+  use adios_write_mod
+
+  implicit none
+
+  integer(kind=8), intent(in) :: adios_handle
+  integer, intent(in) :: sizeprocs, local_dim, myrank
+
+  integer :: adios_err
+
+  call adios_write(adios_handle, "local_dim", local_dim, adios_err)
+  call check_adios_err(myrank,adios_err)
+  call adios_write(adios_handle, "global_dim", local_dim*sizeprocs, adios_err)
+  call check_adios_err(myrank,adios_err)
+  call adios_write(adios_handle, "offset", local_dim*myrank, adios_err)
+  call check_adios_err(myrank,adios_err)
+end subroutine write_1D_global_array_adios_dims
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_MPI_interfaces.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_MPI_interfaces.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,572 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine setup_MPI_interfaces(iregion_code)
+
+  use meshfem3D_par,only: &
+    INCLUDE_CENTRAL_CUBE,myrank,NUMFACES_SHARED
+
+  use create_MPI_interfaces_par
+
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer,intent(in):: iregion_code
+
+  ! local parameters
+  ! assigns initial maximum arrays
+  ! for global slices, maximum number of neighbor is around 17 ( 8 horizontal, max of 8 on bottom )
+  integer :: MAX_NEIGHBOURS,max_nibool
+  integer, dimension(:),allocatable :: my_neighbours,nibool_neighbours
+  integer, dimension(:,:),allocatable :: ibool_neighbours
+  integer :: ier
+
+  ! allocates temporary arrays for setup routines
+  ! estimates a maximum size of needed arrays
+  MAX_NEIGHBOURS = 8 + NCORNERSCHUNKS
+  if( INCLUDE_CENTRAL_CUBE ) MAX_NEIGHBOURS = MAX_NEIGHBOURS + NUMMSGS_FACES
+
+  allocate(my_neighbours(MAX_NEIGHBOURS), &
+          nibool_neighbours(MAX_NEIGHBOURS),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating my_neighbours array')
+
+  ! estimates initial maximum ibool array
+  max_nibool = npoin2D_max_all_CM_IC * NUMFACES_SHARED &
+               + non_zero_nb_msgs_theor_in_cube*npoin2D_cube_from_slices
+
+  allocate(ibool_neighbours(max_nibool,MAX_NEIGHBOURS), stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating ibool_neighbours')
+
+  ! sets up MPI interfaces between different processes
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust/mantle
+    call setup_MPI_interfaces_cm(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                max_nibool,ibool_neighbours)
+
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    call setup_MPI_interfaces_oc(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                max_nibool,ibool_neighbours)
+
+  case( IREGION_INNER_CORE )
+    ! inner core
+    call setup_MPI_interfaces_ic(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                max_nibool,ibool_neighbours)
+  end select
+
+  ! frees temporary array
+  deallocate(ibool_neighbours)
+  deallocate(my_neighbours,nibool_neighbours)
+
+  ! frees arrays not needed any further
+  deallocate(iprocfrom_faces,iprocto_faces,imsg_type)
+  deallocate(iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners)
+  deallocate(buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar)
+  deallocate(buffer_send_chunkcorn_vector,buffer_recv_chunkcorn_vector)
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    deallocate(iboolcorner_crust_mantle)
+    deallocate(iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle)
+    deallocate(iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle)
+    deallocate(iboolfaces_crust_mantle)
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    deallocate(iboolcorner_outer_core)
+    deallocate(iboolleft_xi_outer_core,iboolright_xi_outer_core)
+    deallocate(iboolleft_eta_outer_core,iboolright_eta_outer_core)
+    deallocate(iboolfaces_outer_core)
+  case( IREGION_INNER_CORE )
+    ! inner core
+    deallocate(iboolcorner_inner_core)
+    deallocate(iboolleft_xi_inner_core,iboolright_xi_inner_core)
+    deallocate(iboolleft_eta_inner_core,iboolright_eta_inner_core)
+    deallocate(iboolfaces_inner_core)
+  end select
+
+  ! synchronizes MPI processes
+  call sync_all()
+
+  end subroutine setup_MPI_interfaces
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine setup_MPI_interfaces_cm(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                    max_nibool,ibool_neighbours)
+
+  use meshfem3D_par,only: &
+    myrank,iproc_xi,iproc_eta,ichunk,addressing,INCLUDE_CENTRAL_CUBE, &
+    NPROC_XI,NPROC_ETA,NPROCTOT, &
+    NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NCHUNKS, &
+    OUTPUT_FILES
+
+  use meshfem3D_par,only: ibool,is_on_a_slice_edge
+
+  use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+  implicit none
+
+  integer :: MAX_NEIGHBOURS,max_nibool
+  integer, dimension(MAX_NEIGHBOURS) :: my_neighbours,nibool_neighbours
+  integer, dimension(max_nibool,MAX_NEIGHBOURS) :: ibool_neighbours
+
+  ! local parameters
+  ! temporary buffers for send and receive between faces of the slices and the chunks
+  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all_CM_IC) ::  &
+    buffer_send_faces_scalar,buffer_received_faces_scalar
+  real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
+  integer,dimension(:),allocatable :: dummy_i
+  integer :: i,ier
+  ! debug file output
+  character(len=150) :: filename
+  logical,parameter :: DEBUG = .false.
+
+  ! sets up MPI interfaces
+  ! crust mantle region
+  if( myrank == 0 ) write(IMAIN,*) 'crust mantle mpi:'
+  allocate(test_flag(NGLOB_CRUST_MANTLE), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_flag')
+
+  ! sets flag to rank id (+1 to avoid problems with zero rank)
+  test_flag(:) = myrank + 1.0
+
+  ! assembles values
+  call assemble_MPI_scalar_block(myrank,test_flag, &
+            NGLOB_CRUST_MANTLE, &
+            iproc_xi,iproc_eta,ichunk,addressing, &
+            iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle,iboolleft_eta_crust_mantle,iboolright_eta_crust_mantle, &
+            npoin2D_faces_crust_mantle,npoin2D_xi_crust_mantle,npoin2D_eta_crust_mantle, &
+            iboolfaces_crust_mantle,iboolcorner_crust_mantle, &
+            iprocfrom_faces,iprocto_faces,imsg_type, &
+            iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all_CM_IC, &
+            buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar, &
+            NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
+            NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
+            NGLOB2DMAX_XMIN_XMAX(IREGION_CRUST_MANTLE),NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE), &
+            NGLOB2DMAX_XY,NCHUNKS)
+
+  ! removes own myrank id (+1)
+  test_flag(:) = test_flag(:) - ( myrank + 1.0)
+
+  allocate(dummy_i(NSPEC_CRUST_MANTLE),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating dummy_i')
+
+  ! determines neighbor rank for shared faces
+  call get_MPI_interfaces(myrank,NGLOB_CRUST_MANTLE,NSPEC_CRUST_MANTLE, &
+                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+                            num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
+                            max_nibool,MAX_NEIGHBOURS, &
+                            ibool,is_on_a_slice_edge, &
+                            IREGION_CRUST_MANTLE,.false.,dummy_i,INCLUDE_CENTRAL_CUBE, &
+                            xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle,NPROCTOT)
+
+  deallocate(test_flag)
+  deallocate(dummy_i)
+
+  ! stores MPI interfaces informations
+  allocate(my_neighbours_crust_mantle(num_interfaces_crust_mantle), &
+          nibool_interfaces_crust_mantle(num_interfaces_crust_mantle), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array my_neighbours_crust_mantle etc.')
+  my_neighbours_crust_mantle = -1
+  nibool_interfaces_crust_mantle = 0
+
+  ! copies interfaces arrays
+  if( num_interfaces_crust_mantle > 0 ) then
+    allocate(ibool_interfaces_crust_mantle(max_nibool_interfaces_cm,num_interfaces_crust_mantle), &
+           stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array ibool_interfaces_crust_mantle')
+    ibool_interfaces_crust_mantle = 0
+
+    ! ranks of neighbour processes
+    my_neighbours_crust_mantle(:) = my_neighbours(1:num_interfaces_crust_mantle)
+    ! number of global ibool entries on each interface
+    nibool_interfaces_crust_mantle(:) = nibool_neighbours(1:num_interfaces_crust_mantle)
+    ! global iglob point ids on each interface
+    ibool_interfaces_crust_mantle(:,:) = ibool_neighbours(1:max_nibool_interfaces_cm,1:num_interfaces_crust_mantle)
+  else
+    ! dummy allocation (fortran90 should allow allocate statement with zero array size)
+    max_nibool_interfaces_cm = 0
+    allocate(ibool_interfaces_crust_mantle(0,0),stat=ier)
+  endif
+
+  ! debug: outputs MPI interface
+  if( DEBUG ) then
+    do i=1,num_interfaces_crust_mantle
+      write(filename,'(a,i6.6,a,i2.2)') trim(OUTPUT_FILES)//'/MPI_points_crust_mantle_proc',myrank, &
+                      '_',my_neighbours_crust_mantle(i)
+      call write_VTK_data_points(NGLOB_crust_mantle, &
+                        xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
+                        ibool_interfaces_crust_mantle(1:nibool_interfaces_crust_mantle(i),i), &
+                        nibool_interfaces_crust_mantle(i),filename)
+    enddo
+    call sync_all()
+  endif
+
+  ! checks addressing
+  call test_MPI_neighbours(IREGION_CRUST_MANTLE, &
+                              num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
+                              my_neighbours_crust_mantle,nibool_interfaces_crust_mantle, &
+                              ibool_interfaces_crust_mantle)
+
+  ! checks with assembly of test fields
+  call test_MPI_cm()
+
+  end subroutine setup_MPI_interfaces_cm
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine setup_MPI_interfaces_oc(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                    max_nibool,ibool_neighbours)
+
+  use meshfem3D_par,only: &
+    myrank,iproc_xi,iproc_eta,ichunk,addressing,INCLUDE_CENTRAL_CUBE, &
+    NPROC_XI,NPROC_ETA,NPROCTOT, &
+    NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NCHUNKS, &
+    OUTPUT_FILES
+
+  use meshfem3D_par,only: ibool,is_on_a_slice_edge
+
+  use create_MPI_interfaces_par
+  use MPI_outer_core_par
+  implicit none
+
+  integer :: MAX_NEIGHBOURS,max_nibool
+  integer, dimension(MAX_NEIGHBOURS) :: my_neighbours,nibool_neighbours
+  integer, dimension(max_nibool,MAX_NEIGHBOURS) :: ibool_neighbours
+
+  ! local parameters
+  ! temporary buffers for send and receive between faces of the slices and the chunks
+  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all_CM_IC) ::  &
+    buffer_send_faces_scalar,buffer_received_faces_scalar
+  real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
+  integer,dimension(:),allocatable :: dummy_i
+  integer :: i,ier
+  ! debug file output
+  character(len=150) :: filename
+  logical,parameter :: DEBUG = .false.
+
+  ! sets up MPI interfaces
+  ! outer core region
+  if( myrank == 0 ) write(IMAIN,*) 'outer core mpi:'
+
+  allocate(test_flag(NGLOB_OUTER_CORE), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_flag outer core')
+
+  ! sets flag to rank id (+1 to avoid problems with zero rank)
+  test_flag(:) = myrank + 1.0
+
+  ! assembles values
+  call assemble_MPI_scalar_block(myrank,test_flag, &
+            NGLOB_OUTER_CORE, &
+            iproc_xi,iproc_eta,ichunk,addressing, &
+            iboolleft_xi_outer_core,iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core, &
+            npoin2D_faces_outer_core,npoin2D_xi_outer_core,npoin2D_eta_outer_core, &
+            iboolfaces_outer_core,iboolcorner_outer_core, &
+            iprocfrom_faces,iprocto_faces,imsg_type, &
+            iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all_CM_IC, &
+            buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar, &
+            NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
+            NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
+            NGLOB2DMAX_XMIN_XMAX(IREGION_OUTER_CORE),NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE), &
+            NGLOB2DMAX_XY,NCHUNKS)
+
+
+  ! removes own myrank id (+1)
+  test_flag(:) = test_flag(:) - ( myrank + 1.0)
+
+  allocate(dummy_i(NSPEC_OUTER_CORE),stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating dummy_i')
+
+  ! determines neighbor rank for shared faces
+  call get_MPI_interfaces(myrank,NGLOB_OUTER_CORE,NSPEC_OUTER_CORE, &
+                            test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+                            num_interfaces_outer_core,max_nibool_interfaces_oc, &
+                            max_nibool,MAX_NEIGHBOURS, &
+                            ibool,is_on_a_slice_edge, &
+                            IREGION_OUTER_CORE,.false.,dummy_i,INCLUDE_CENTRAL_CUBE, &
+                            xstore_outer_core,ystore_outer_core,zstore_outer_core,NPROCTOT)
+
+  deallocate(test_flag)
+  deallocate(dummy_i)
+
+  ! stores MPI interfaces informations
+  allocate(my_neighbours_outer_core(num_interfaces_outer_core), &
+          nibool_interfaces_outer_core(num_interfaces_outer_core), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array my_neighbours_outer_core etc.')
+  my_neighbours_outer_core = -1
+  nibool_interfaces_outer_core = 0
+
+  ! copies interfaces arrays
+  if( num_interfaces_outer_core > 0 ) then
+    allocate(ibool_interfaces_outer_core(max_nibool_interfaces_oc,num_interfaces_outer_core), &
+           stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array ibool_interfaces_outer_core')
+    ibool_interfaces_outer_core = 0
+
+    ! ranks of neighbour processes
+    my_neighbours_outer_core(:) = my_neighbours(1:num_interfaces_outer_core)
+    ! number of global ibool entries on each interface
+    nibool_interfaces_outer_core(:) = nibool_neighbours(1:num_interfaces_outer_core)
+    ! global iglob point ids on each interface
+    ibool_interfaces_outer_core(:,:) = ibool_neighbours(1:max_nibool_interfaces_oc,1:num_interfaces_outer_core)
+  else
+    ! dummy allocation (fortran90 should allow allocate statement with zero array size)
+    max_nibool_interfaces_oc = 0
+    allocate(ibool_interfaces_outer_core(0,0),stat=ier)
+  endif
+
+  ! debug: outputs MPI interface
+  if( DEBUG ) then
+    do i=1,num_interfaces_outer_core
+      write(filename,'(a,i6.6,a,i2.2)') trim(OUTPUT_FILES)//'/MPI_points_outer_core_proc',myrank, &
+                      '_',my_neighbours_outer_core(i)
+      call write_VTK_data_points(NGLOB_OUTER_CORE, &
+                        xstore_outer_core,ystore_outer_core,zstore_outer_core, &
+                        ibool_interfaces_outer_core(1:nibool_interfaces_outer_core(i),i), &
+                        nibool_interfaces_outer_core(i),filename)
+    enddo
+    call sync_all()
+  endif
+
+  ! checks addressing
+  call test_MPI_neighbours(IREGION_OUTER_CORE, &
+                              num_interfaces_outer_core,max_nibool_interfaces_oc, &
+                              my_neighbours_outer_core,nibool_interfaces_outer_core, &
+                              ibool_interfaces_outer_core)
+
+  ! checks with assembly of test fields
+  call test_MPI_oc()
+
+  end subroutine setup_MPI_interfaces_oc
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine setup_MPI_interfaces_ic(MAX_NEIGHBOURS,my_neighbours,nibool_neighbours, &
+                                    max_nibool,ibool_neighbours)
+
+  use meshfem3D_par,only: &
+    myrank,iproc_xi,iproc_eta,ichunk,addressing,INCLUDE_CENTRAL_CUBE, &
+    NPROC_XI,NPROC_ETA,NPROCTOT, &
+    NGLOB1D_RADIAL,NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NCHUNKS, &
+    OUTPUT_FILES,IFLAG_IN_FICTITIOUS_CUBE,NGLLX,NGLLY,NGLLZ,NSPEC2D_BOTTOM
+
+  use meshfem3D_par,only: ibool,idoubling,is_on_a_slice_edge
+
+  use create_MPI_interfaces_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer :: MAX_NEIGHBOURS,max_nibool
+  integer, dimension(MAX_NEIGHBOURS) :: my_neighbours,nibool_neighbours
+  integer, dimension(max_nibool,MAX_NEIGHBOURS) :: ibool_neighbours
+
+  ! local parameters
+  ! temporary buffers for send and receive between faces of the slices and the chunks
+  real(kind=CUSTOM_REAL), dimension(npoin2D_max_all_CM_IC) ::  &
+    buffer_send_faces_scalar,buffer_received_faces_scalar
+  real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
+  integer :: i,j,k,ispec,iglob,ier
+  integer :: ndim_assemble
+  ! debug file output
+  character(len=150) :: filename
+  logical,parameter :: DEBUG = .false.
+
+  ! sets up MPI interfaces
+  ! inner core
+  if( myrank == 0 ) write(IMAIN,*) 'inner core mpi:'
+
+  allocate(test_flag(NGLOB_INNER_CORE), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_flag inner core')
+
+  ! sets flag to rank id (+1 to avoid problems with zero rank)
+  test_flag(:) = 0.0
+  do ispec=1,NSPEC_INNER_CORE
+    ! suppress fictitious elements in central cube
+    if(idoubling(ispec) == IFLAG_IN_FICTITIOUS_CUBE) cycle
+    ! sets flags
+    do k = 1,NGLLZ
+      do j = 1,NGLLY
+        do i = 1,NGLLX
+          iglob = ibool(i,j,k,ispec)
+          test_flag(iglob) = myrank + 1.0
+        enddo
+      enddo
+    enddo
+  enddo
+
+  ! assembles values
+  call assemble_MPI_scalar_block(myrank,test_flag, &
+            NGLOB_INNER_CORE, &
+            iproc_xi,iproc_eta,ichunk,addressing, &
+            iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
+            npoin2D_faces_inner_core,npoin2D_xi_inner_core,npoin2D_eta_inner_core, &
+            iboolfaces_inner_core,iboolcorner_inner_core, &
+            iprocfrom_faces,iprocto_faces,imsg_type, &
+            iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+            buffer_send_faces_scalar,buffer_received_faces_scalar,npoin2D_max_all_CM_IC, &
+            buffer_send_chunkcorn_scalar,buffer_recv_chunkcorn_scalar, &
+            NUMMSGS_FACES,NUM_MSG_TYPES,NCORNERSCHUNKS, &
+            NPROC_XI,NPROC_ETA,NGLOB1D_RADIAL(IREGION_INNER_CORE), &
+            NGLOB2DMAX_XMIN_XMAX(IREGION_INNER_CORE),NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE), &
+            NGLOB2DMAX_XY,NCHUNKS)
+
+  ! debug: idoubling inner core
+  if( DEBUG ) then
+    write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_idoubling_inner_core_proc',myrank
+    call write_VTK_data_elem_i(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                            xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                            ibool, &
+                            idoubling,filename)
+    call sync_all()
+  endif
+
+  ! including central cube
+  if(INCLUDE_CENTRAL_CUBE) then
+    ! user output
+    if( myrank == 0 ) write(IMAIN,*) 'inner core with central cube mpi:'
+
+    ! test_flag is a scalar, not a vector
+    ndim_assemble = 1
+
+    ! use central cube buffers to assemble the inner core mass matrix with the central cube
+    call assemble_MPI_central_cube_block(ichunk,nb_msgs_theor_in_cube, sender_from_slices_to_cube, &
+                 npoin2D_cube_from_slices, buffer_all_cube_from_slices, &
+                 buffer_slices, buffer_slices2, ibool_central_cube, &
+                 receiver_cube_from_slices, ibool, &
+                 idoubling, NSPEC_INNER_CORE, &
+                 ibelm_bottom_inner_core, NSPEC2D_BOTTOM(IREGION_INNER_CORE), &
+                 NGLOB_INNER_CORE, &
+                 test_flag,ndim_assemble, &
+                 iproc_eta,addressing,NCHUNKS,NPROC_XI,NPROC_ETA)
+
+    ! frees array not needed anymore
+    deallocate(ibelm_bottom_inner_core)
+
+  endif
+
+  ! removes own myrank id (+1)
+  test_flag = test_flag - ( myrank + 1.0)
+  where( test_flag < 0.0 ) test_flag = 0.0
+
+  ! debug: in sequential order, for testing purpose
+  !do i=0,NPROCTOT - 1
+  !  if( myrank == i ) then
+  !    ! gets new interfaces for inner_core without central cube yet
+  !    ! determines neighbor rank for shared faces
+  !    call get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
+  !                          test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+  !                          num_interfaces_inner_core,max_nibool_interfaces_ic, &
+  !                          max_nibool,MAX_NEIGHBOURS, &
+  !                          ibool,is_on_a_slice_edge, &
+  !                          IREGION_INNER_CORE,.false.,idoubling,INCLUDE_CENTRAL_CUBE, &
+  !                          xstore_inner_core,ystore_inner_core,zstore_inner_core,NPROCTOT)
+  !  endif
+  !  call sync_all()
+  !enddo
+
+  ! gets new interfaces for inner_core without central cube yet
+  ! determines neighbor rank for shared faces
+  call get_MPI_interfaces(myrank,NGLOB_INNER_CORE,NSPEC_INNER_CORE, &
+                        test_flag,my_neighbours,nibool_neighbours,ibool_neighbours, &
+                        num_interfaces_inner_core,max_nibool_interfaces_ic, &
+                        max_nibool,MAX_NEIGHBOURS, &
+                        ibool,is_on_a_slice_edge, &
+                        IREGION_INNER_CORE,.false.,idoubling,INCLUDE_CENTRAL_CUBE, &
+                        xstore_inner_core,ystore_inner_core,zstore_inner_core,NPROCTOT)
+
+  deallocate(test_flag)
+
+  ! stores MPI interfaces informations
+  allocate(my_neighbours_inner_core(num_interfaces_inner_core), &
+          nibool_interfaces_inner_core(num_interfaces_inner_core), &
+          stat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error allocating array my_neighbours_inner_core etc.')
+  my_neighbours_inner_core = -1
+  nibool_interfaces_inner_core = 0
+
+  ! copies interfaces arrays
+  if( num_interfaces_inner_core > 0 ) then
+    allocate(ibool_interfaces_inner_core(max_nibool_interfaces_ic,num_interfaces_inner_core), &
+           stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array ibool_interfaces_inner_core')
+    ibool_interfaces_inner_core = 0
+
+    ! ranks of neighbour processes
+    my_neighbours_inner_core(:) = my_neighbours(1:num_interfaces_inner_core)
+    ! number of global ibool entries on each interface
+    nibool_interfaces_inner_core(:) = nibool_neighbours(1:num_interfaces_inner_core)
+    ! global iglob point ids on each interface
+    ibool_interfaces_inner_core(:,:) = ibool_neighbours(1:max_nibool_interfaces_ic,1:num_interfaces_inner_core)
+  else
+    ! dummy allocation (fortran90 should allow allocate statement with zero array size)
+    max_nibool_interfaces_ic = 0
+    allocate(ibool_interfaces_inner_core(0,0),stat=ier)
+  endif
+
+  ! debug: saves MPI interfaces
+  if( DEBUG ) then
+    do i=1,num_interfaces_inner_core
+      write(filename,'(a,i6.6,a,i2.2)') trim(OUTPUT_FILES)//'/MPI_points_inner_core_proc',myrank, &
+                      '_',my_neighbours_inner_core(i)
+      call write_VTK_data_points(NGLOB_INNER_CORE, &
+                        xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                        ibool_interfaces_inner_core(1:nibool_interfaces_inner_core(i),i), &
+                        nibool_interfaces_inner_core(i),filename)
+    enddo
+    call sync_all()
+  endif
+
+  ! checks addressing
+  call test_MPI_neighbours(IREGION_INNER_CORE, &
+                              num_interfaces_inner_core,max_nibool_interfaces_ic, &
+                              my_neighbours_inner_core,nibool_interfaces_inner_core, &
+                              ibool_interfaces_inner_core)
+
+  ! checks with assembly of test fields
+  call test_MPI_ic()
+
+  end subroutine setup_MPI_interfaces_ic
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_color_perm.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_color_perm.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_color_perm.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,1234 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+
+  subroutine setup_color_perm(iregion_code)
+
+  use meshfem3D_par,only: &
+    myrank,IMAIN,USE_MESH_COLORING_GPU,SAVE_MESH_FILES, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
+
+  use meshfem3D_par,only: ibool,is_on_a_slice_edge
+
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer,intent(in) :: iregion_code
+
+  ! local parameters
+  integer, dimension(:), allocatable :: perm
+  integer :: ier
+  integer :: nspec,nglob
+  integer :: idomain
+
+  ! user output
+  if(myrank == 0) then
+    write(IMAIN,*) '  mesh coloring: ',USE_MESH_COLORING_GPU
+  endif
+
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust mantle
+    ! initializes
+    num_colors_outer_crust_mantle = 0
+    num_colors_inner_crust_mantle = 0
+
+    ! mesh coloring
+    if( USE_MESH_COLORING_GPU ) then
+
+      ! user output
+      if(myrank == 0) write(IMAIN,*) '  coloring crust mantle... '
+
+      ! crust/mantle region
+      nspec = NSPEC_CRUST_MANTLE
+      nglob = NGLOB_CRUST_MANTLE
+      idomain = IREGION_CRUST_MANTLE
+
+      ! creates coloring of elements
+      allocate(perm(nspec),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm crust mantle array')
+      perm(:) = 0
+
+      call setup_color(myrank,nspec,nglob,ibool,perm, &
+                      idomain,is_on_a_slice_edge, &
+                      num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
+                      SAVE_MESH_FILES)
+
+      ! checks
+      if(minval(perm) /= 1) &
+        call exit_MPI(myrank, 'minval(perm) should be 1')
+      if(maxval(perm) /= num_phase_ispec_crust_mantle) &
+        call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_crust_mantle')
+
+      ! sorts array according to permutation
+      call sync_all()
+      if(myrank == 0) then
+        write(IMAIN,*) '     mesh permutation:'
+      endif
+      call setup_permutation(myrank,nspec,nglob,ibool, &
+                            idomain,perm, &
+                            num_colors_outer_crust_mantle,num_colors_inner_crust_mantle, &
+                            num_elem_colors_crust_mantle, &
+                            num_phase_ispec_crust_mantle,phase_ispec_inner_crust_mantle, &
+                            SAVE_MESH_FILES)
+
+      deallocate(perm)
+    else
+      ! dummy array
+      allocate(num_elem_colors_crust_mantle(num_colors_outer_crust_mantle+num_colors_inner_crust_mantle),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_crust_mantle array')
+    endif
+
+  case( IREGION_OUTER_CORE )
+    ! outer core
+    ! initializes
+    num_colors_outer_outer_core = 0
+    num_colors_inner_outer_core = 0
+
+    ! mesh coloring
+    if( USE_MESH_COLORING_GPU ) then
+
+      ! user output
+      if(myrank == 0) write(IMAIN,*) '  coloring outer core... '
+
+      ! outer core region
+      nspec = NSPEC_OUTER_CORE
+      nglob = NGLOB_OUTER_CORE
+      idomain = IREGION_OUTER_CORE
+
+      ! creates coloring of elements
+      allocate(perm(nspec),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm outer_core array')
+      perm(:) = 0
+
+      call setup_color(myrank,nspec,nglob,ibool,perm, &
+                      idomain,is_on_a_slice_edge, &
+                      num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
+                      SAVE_MESH_FILES)
+
+      ! checks
+      if(minval(perm) /= 1) &
+        call exit_MPI(myrank, 'minval(perm) should be 1')
+      if(maxval(perm) /= num_phase_ispec_outer_core) &
+        call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_outer_core')
+
+      ! sorts array according to permutation
+      call sync_all()
+      if(myrank == 0) then
+        write(IMAIN,*) '     mesh permutation:'
+      endif
+      call setup_permutation(myrank,nspec,nglob,ibool, &
+                            idomain,perm, &
+                            num_colors_outer_outer_core,num_colors_inner_outer_core, &
+                            num_elem_colors_outer_core, &
+                            num_phase_ispec_outer_core,phase_ispec_inner_outer_core, &
+                            SAVE_MESH_FILES)
+
+      deallocate(perm)
+    else
+      ! dummy array
+      allocate(num_elem_colors_outer_core(num_colors_outer_outer_core+num_colors_inner_outer_core),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_outer_core array')
+    endif
+
+  case( IREGION_INNER_CORE )
+    ! inner core
+    ! initializes
+    num_colors_outer_inner_core = 0
+    num_colors_inner_inner_core = 0
+
+    ! mesh coloring
+    if( USE_MESH_COLORING_GPU ) then
+
+      ! user output
+      if(myrank == 0) write(IMAIN,*) '  coloring inner core... '
+
+      ! inner core region
+      nspec = NSPEC_INNER_CORE
+      nglob = NGLOB_INNER_CORE
+      idomain = IREGION_INNER_CORE
+
+      ! creates coloring of elements
+      allocate(perm(nspec),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating temporary perm inner_core array')
+      perm(:) = 0
+
+      call setup_color(myrank,nspec,nglob,ibool,perm, &
+                      idomain,is_on_a_slice_edge, &
+                      num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
+                      SAVE_MESH_FILES)
+
+      ! checks
+      ! inner core contains ficticious elements not counted for
+      if(minval(perm) < 0) &
+        call exit_MPI(myrank, 'minval(perm) should be at least 0')
+      if(maxval(perm) > num_phase_ispec_inner_core) then
+        print*,'error perm inner core:',minval(perm),maxval(perm),num_phase_ispec_inner_core
+        call exit_MPI(myrank, 'maxval(perm) should be num_phase_ispec_inner_core')
+      endif
+
+      ! sorts array according to permutation
+      call sync_all()
+      if(myrank == 0) then
+        write(IMAIN,*) '     mesh permutation:'
+      endif
+      call setup_permutation(myrank,nspec,nglob,ibool, &
+                            idomain,perm, &
+                            num_colors_outer_inner_core,num_colors_inner_inner_core, &
+                            num_elem_colors_inner_core, &
+                            num_phase_ispec_inner_core,phase_ispec_inner_inner_core, &
+                            SAVE_MESH_FILES)
+
+      deallocate(perm)
+    else
+      ! dummy array
+      allocate(num_elem_colors_inner_core(num_colors_outer_inner_core+num_colors_inner_inner_core),stat=ier)
+      if( ier /= 0 ) call exit_mpi(myrank,'error allocating num_elem_colors_inner_core array')
+    endif
+
+  end select
+
+  end subroutine setup_color_perm
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine setup_color(myrank,nspec,nglob,ibool,perm, &
+                            idomain,is_on_a_slice_edge, &
+                            num_phase_ispec_d,phase_ispec_inner_d, &
+                            SAVE_MESH_FILES)
+
+! sets up mesh coloring
+
+  use meshfem3D_par,only: &
+    LOCAL_PATH,MAX_NUMBER_OF_COLORS,IMAIN,NGLLX,NGLLY,NGLLZ,IFLAG_IN_FICTITIOUS_CUBE, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
+
+  use meshfem3D_par,only: &
+    idoubling
+
+  use MPI_crust_mantle_par,only: &
+    num_colors_outer_crust_mantle,num_colors_inner_crust_mantle,num_elem_colors_crust_mantle, &
+    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
+
+  use MPI_outer_core_par,only: &
+    num_colors_outer_outer_core,num_colors_inner_outer_core,num_elem_colors_outer_core
+
+  use MPI_inner_core_par,only: &
+    num_colors_outer_inner_core,num_colors_inner_inner_core,num_elem_colors_inner_core
+
+  implicit none
+
+  integer :: myrank,nspec,nglob
+  integer, dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+
+  integer, dimension(nspec) :: perm
+
+  ! wrapper array for ispec is in domain:
+  ! idomain: 1 == crust/mantle, 2 == outer core, 3 == inner core
+  integer :: idomain
+  logical, dimension(nspec) :: is_on_a_slice_edge
+  integer :: num_phase_ispec_d
+  integer, dimension(num_phase_ispec_d,2) :: phase_ispec_inner_d
+
+  logical :: SAVE_MESH_FILES
+
+  ! local parameters
+  ! added for color permutation
+  integer :: nb_colors_outer_elements,nb_colors_inner_elements
+  integer, dimension(:), allocatable :: num_of_elems_in_this_color
+  integer, dimension(:), allocatable :: color
+  integer, dimension(:), allocatable :: first_elem_number_in_this_color
+  logical, dimension(:), allocatable :: ispec_is_d
+
+  integer :: nspec_outer,nspec_inner,nspec_domain
+  integer :: nspec_outer_min_global,nspec_outer_max_global
+  integer :: nspec_inner_min_global,nspec_inner_max_global
+  integer :: min_elem_global,max_elem_global
+
+  integer :: nb_colors
+  integer :: nb_colors_min,nb_colors_max
+
+  integer :: icolor,ispec,ispec_counter
+  integer :: ispec_inner,ispec_outer
+  integer :: ier
+
+  character(len=2),dimension(3) :: str_domain = (/ "cm", "oc", "ic" /)
+  character(len=256) :: filename
+  character(len=150) :: prname
+
+  ! debug file output
+  logical, parameter :: DEBUG = .false.
+  ! debug coloring : creates dummy mesh coloring, separating only inner/outer elements into colors
+  logical, parameter :: DEBUG_COLOR = .false.
+
+  !!!! David Michea: detection of the edges, coloring and permutation separately
+
+  ! implement mesh coloring for GPUs if needed, to create subsets of disconnected elements
+  ! to remove dependencies and the need for atomic operations in the sum of
+  ! elemental contributions in the solver
+
+  ! allocates temporary array with colors
+  allocate(color(nspec),stat=ier)
+  if( ier /= 0 ) stop 'error allocating temporary color array'
+  allocate(first_elem_number_in_this_color(MAX_NUMBER_OF_COLORS + 1),stat=ier)
+  if( ier /= 0 ) stop 'error allocating first_elem_number_in_this_color array'
+
+  ! flags for elements in this domain
+  ! for compatiblity with SPECFEM3D mesh coloring routine
+  allocate(ispec_is_d(nspec),stat=ier)
+  if( ier /= 0 ) stop 'error allocating ispec_is_d array'
+
+  ! sets up domain coloring arrays
+  select case(idomain)
+  case( IREGION_CRUST_MANTLE,IREGION_OUTER_CORE )
+    ! crust/mantle and outer core region meshes use all elements
+    ispec_is_d(:) = .true.
+  case( IREGION_INNER_CORE )
+    ! initializes
+    ispec_is_d(:) = .true.
+    ! excludes ficticious elements from coloring
+    where(idoubling == IFLAG_IN_FICTITIOUS_CUBE) ispec_is_d = .false.
+    ! checks
+    if( count(ispec_is_d) == 0 ) then
+      stop 'error no inner core elements'
+    endif
+  case default
+    stop 'error idomain in setup_color'
+  end select
+
+  ! fast element coloring scheme
+  call get_perm_color_faster(is_on_a_slice_edge,ispec_is_d, &
+                            ibool,perm,color, &
+                            nspec,nglob, &
+                            nb_colors_outer_elements,nb_colors_inner_elements, &
+                            nspec_outer,nspec_inner,nspec_domain, &
+                            first_elem_number_in_this_color, &
+                            myrank)
+
+  ! debug: file output
+  if( SAVE_MESH_FILES .and. DEBUG .and. idomain == IREGION_CRUST_MANTLE ) then
+    call create_name_database(prname,myrank,idomain,LOCAL_PATH)
+    filename = prname(1:len_trim(prname))//'color_'//str_domain(idomain)
+    call write_VTK_data_elem_i(nspec,nglob, &
+                              xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle,ibool, &
+                              color,filename)
+  endif
+  deallocate(color)
+
+  ! for the last color, the next color is fictitious and its first (fictitious) element number is nspec + 1
+  first_elem_number_in_this_color(nb_colors_outer_elements + nb_colors_inner_elements + 1) &
+    = nspec_domain + 1
+
+  allocate(num_of_elems_in_this_color(nb_colors_outer_elements + nb_colors_inner_elements),stat=ier)
+  if( ier /= 0 ) then
+    print*,'error',myrank,' allocating num_of_elems_in_this_color:',nb_colors_outer_elements,nb_colors_inner_elements, &
+          nb_colors_outer_elements + nb_colors_inner_elements
+    call exit_MPI(myrank,'error allocating num_of_elems_in_this_color array')
+  endif
+
+  num_of_elems_in_this_color(:) = 0
+  do icolor = 1, nb_colors_outer_elements + nb_colors_inner_elements
+    num_of_elems_in_this_color(icolor) = first_elem_number_in_this_color(icolor+1) - first_elem_number_in_this_color(icolor)
+  enddo
+  deallocate(first_elem_number_in_this_color)
+
+  ! check that the sum of all the numbers of elements found in each color is equal
+  ! to the total number of elements in the mesh
+  if(sum(num_of_elems_in_this_color) /= nspec_domain) then
+    print *,'error number of elements in this color:',idomain
+    print *,'rank: ',myrank,' nspec = ',nspec_domain
+    print *,'  total number of elements in all the colors of the mesh = ', &
+      sum(num_of_elems_in_this_color)
+    call exit_MPI(myrank, 'incorrect total number of elements in all the colors of the mesh')
+  endif
+
+  ! check that the sum of all the numbers of elements found in each color for the outer elements is equal
+  ! to the total number of outer elements found in the mesh
+  if(sum(num_of_elems_in_this_color(1:nb_colors_outer_elements)) /= nspec_outer) then
+    print *,'error number of outer elements in this color:',idomain
+    print *,'rank: ',myrank,' nspec_outer = ',nspec_outer
+    print*,'nb_colors_outer_elements = ',nb_colors_outer_elements
+    print *,'total number of elements in all the colors of the mesh for outer elements = ', &
+      sum(num_of_elems_in_this_color(1:nb_colors_outer_elements))
+    call exit_MPI(myrank, 'incorrect total number of elements in all the colors of the mesh for outer elements')
+  endif
+
+  ! debug: no mesh coloring, only creates dummy coloring arrays
+  if( DEBUG_COLOR ) then
+    nb_colors_outer_elements = 0
+    nb_colors_inner_elements = 0
+    ispec_counter = 0
+
+    ! first generate all the outer elements
+    do ispec = 1,nspec
+      if( ispec_is_d(ispec) ) then
+        if( is_on_a_slice_edge(ispec) .eqv. .true. ) then
+          ispec_counter = ispec_counter + 1
+          perm(ispec) = ispec_counter
+        endif
+      endif
+    enddo
+
+    ! store total number of outer elements
+    nspec_outer = ispec_counter
+
+    ! only single color
+    if(nspec_outer > 0 ) nb_colors_outer_elements = 1
+
+    ! then generate all the inner elements
+    do ispec = 1,nspec
+      if( ispec_is_d(ispec) ) then
+        if( is_on_a_slice_edge(ispec) .eqv. .false. ) then
+          ispec_counter = ispec_counter + 1
+          perm(ispec) = ispec_counter - nspec_outer ! starts again at 1
+        endif
+      endif
+    enddo
+    nspec_inner = ispec_counter - nspec_outer
+
+    ! only single color
+    if(nspec_inner > 0 ) nb_colors_inner_elements = 1
+
+    ! user output
+    if(myrank == 0 ) then
+      write(IMAIN,*) 'debugging mesh coloring:'
+      write(IMAIN,*) 'nb_colors inner / outer: ',nb_colors_inner_elements,nb_colors_outer_elements
+    endif
+
+    ! re-allocate
+    if(allocated(num_of_elems_in_this_color) ) deallocate(num_of_elems_in_this_color)
+    allocate(num_of_elems_in_this_color(nb_colors_outer_elements + nb_colors_inner_elements),stat=ier)
+    if( ier /= 0 ) then
+      print*,'error',myrank,' allocating num_of_elems_in_this_color:',nb_colors_outer_elements,nb_colors_inner_elements, &
+          nb_colors_outer_elements + nb_colors_inner_elements
+      call exit_MPI(myrank,'error allocating num_of_elems_in_this_color array')
+    endif
+
+    if( nspec_outer > 0 ) num_of_elems_in_this_color(1) = nspec_outer
+    if( nspec_inner > 0 ) num_of_elems_in_this_color(2) = nspec_inner
+  endif ! debug_color
+
+  ! debug: saves mesh coloring numbers into files
+  if( DEBUG ) then
+    ! debug file output
+    call create_name_database(prname,myrank,idomain,LOCAL_PATH)
+    filename = prname(1:len_trim(prname))//'num_of_elems_in_this_color_'//str_domain(idomain)//'.dat'
+    open(unit=99,file=trim(filename),status='unknown',iostat=ier)
+    if( ier /= 0 ) stop 'error opening num_of_elems_in_this_color file'
+    ! number of colors for outer elements
+    write(99,*) nb_colors_outer_elements
+    ! number of colors for inner elements
+    write(99,*) nb_colors_inner_elements
+    ! number of elements in each color
+    ! outer elements
+    do icolor = 1, nb_colors_outer_elements + nb_colors_inner_elements
+      write(99,*) num_of_elems_in_this_color(icolor)
+    enddo
+    close(99)
+  endif
+
+  ! checks non-zero elements in colors
+  do icolor = 1,nb_colors_outer_elements + nb_colors_inner_elements
+    ! checks
+    if( num_of_elems_in_this_color(icolor) == 0 ) then
+      print *,'rank: ',myrank,'domain:',idomain,' nspec = ',nspec_domain
+      print *,'error zero elements in this color:',icolor
+      print *,'total number of elements in all the colors of the mesh = ', &
+        sum(num_of_elems_in_this_color)
+      call exit_MPI(myrank, 'zero elements in a color of the mesh')
+    endif
+  enddo
+
+
+
+  ! sets up domain coloring arrays
+  select case(idomain)
+  case( IREGION_CRUST_MANTLE )
+    ! crust/mantle domains
+    num_colors_outer_crust_mantle = nb_colors_outer_elements
+    num_colors_inner_crust_mantle = nb_colors_inner_elements
+
+    allocate(num_elem_colors_crust_mantle(num_colors_outer_crust_mantle + num_colors_inner_crust_mantle),stat=ier)
+    if( ier /= 0 ) stop 'error allocating num_elem_colors_crust_mantle array'
+
+    num_elem_colors_crust_mantle(:) = num_of_elems_in_this_color(:)
+
+  case( IREGION_OUTER_CORE )
+    ! outer core domains
+    num_colors_outer_outer_core = nb_colors_outer_elements
+    num_colors_inner_outer_core = nb_colors_inner_elements
+
+    allocate(num_elem_colors_outer_core(num_colors_outer_outer_core + num_colors_inner_outer_core),stat=ier)
+    if( ier /= 0 ) stop 'error allocating num_elem_colors_outer_core array'
+
+    num_elem_colors_outer_core(:) = num_of_elems_in_this_color(:)
+
+  case( IREGION_INNER_CORE )
+    ! inner core domains
+    num_colors_outer_inner_core = nb_colors_outer_elements
+    num_colors_inner_inner_core = nb_colors_inner_elements
+
+    allocate(num_elem_colors_inner_core(num_colors_outer_inner_core + num_colors_inner_inner_core),stat=ier)
+    if( ier /= 0 ) stop 'error allocating num_elem_colors_inner_core array'
+
+    num_elem_colors_inner_core(:) = num_of_elems_in_this_color(:)
+
+  case default
+    stop 'error idomain not recognized'
+  end select
+
+  ! sets up elements for loops in simulations
+  ispec_inner = 0
+  ispec_outer = 0
+  do ispec = 1, nspec
+    ! only elements in this domain
+    if( ispec_is_d(ispec) ) then
+
+      ! sets phase_ispec arrays with ordering of elements
+      if( is_on_a_slice_edge(ispec) .eqv. .true. ) then
+        ! outer elements
+        ispec_outer = perm(ispec)
+
+        ! checks
+        if( ispec_outer < 1 .or. ispec_outer > num_phase_ispec_d ) then
+          print*,'error outer permutation:',idomain
+          print*,'rank:',myrank,'  ispec_inner = ',ispec_outer
+          print*,'num_phase_ispec_d = ',num_phase_ispec_d
+          call exit_MPI(myrank,'error outer permutation')
+        endif
+
+        phase_ispec_inner_d(ispec_outer,1) = ispec
+
+      else
+        ! inner elements
+        ispec_inner = perm(ispec)
+
+        ! checks
+        if( ispec_inner < 1 .or. ispec_inner > num_phase_ispec_d ) then
+          print*,'error inner permutation:',idomain
+          print*,'rank:',myrank,'  ispec_inner = ',ispec_inner
+          print*,'num_phase_ispec_d = ',num_phase_ispec_d
+          call exit_MPI(myrank,'error inner permutation')
+        endif
+
+        phase_ispec_inner_d(ispec_inner,2) = ispec
+
+      endif
+    endif
+  enddo
+
+  ! total number of colors
+  nb_colors = nb_colors_inner_elements + nb_colors_outer_elements
+  call min_all_i(nb_colors,nb_colors_min)
+  call max_all_i(nb_colors,nb_colors_max)
+
+  ! min/max of elements per color
+  call min_all_i(minval(num_of_elems_in_this_color(:)),min_elem_global)
+  call max_all_i(maxval(num_of_elems_in_this_color(:)),max_elem_global)
+
+  ! min/max of inner/outer elements
+  call min_all_i(nspec_inner,nspec_inner_min_global)
+  call max_all_i(nspec_inner,nspec_inner_max_global)
+  call min_all_i(nspec_outer,nspec_outer_min_global)
+  call max_all_i(nspec_outer,nspec_outer_max_global)
+
+  ! user output
+  if(myrank == 0) then
+    write(IMAIN,*) '     total colors:'
+    write(IMAIN,*) '       total colors min/max = ',nb_colors_min,nb_colors_max
+    write(IMAIN,*) '       elements per color min/max = ',min_elem_global,max_elem_global
+    write(IMAIN,*) '       inner elements min/max = ',nspec_inner_min_global,nspec_inner_max_global
+    write(IMAIN,*) '       outer elements min/max = ',nspec_outer_min_global,nspec_outer_max_global
+  endif
+
+  ! debug: outputs permutation array as vtk file
+  if( DEBUG .and. idomain == IREGION_CRUST_MANTLE ) then
+    call create_name_database(prname,myrank,idomain,LOCAL_PATH)
+    filename = prname(1:len_trim(prname))//'perm_'//str_domain(idomain)
+    call write_VTK_data_elem_i(nspec,nglob, &
+                              xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle,ibool, &
+                              perm,filename)
+  endif
+
+  deallocate(ispec_is_d)
+  deallocate(num_of_elems_in_this_color)
+
+  end subroutine setup_color
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine setup_permutation(myrank,nspec,nglob,ibool, &
+                              idomain,perm, &
+                              num_colors_outer,num_colors_inner, &
+                              num_elem_colors, &
+                              num_phase_ispec_d,phase_ispec_inner_d, &
+                              SAVE_MESH_FILES)
+
+  use constants
+
+  use meshfem3D_models_par,only: &
+    TRANSVERSE_ISOTROPY,HETEROGEN_3D_MANTLE,ANISOTROPIC_3D_MANTLE, &
+    ANISOTROPIC_INNER_CORE,ATTENUATION,SAVE_BOUNDARY_MESH
+
+  use meshfem3D_par,only: &
+    ABSORBING_CONDITIONS, &
+    LOCAL_PATH, &
+    NCHUNKS,NSPEC2D_TOP,NSPEC2D_BOTTOM, &
+    xstore,ystore,zstore,idoubling
+
+  use create_regions_mesh_par2,only: &
+    xixstore,xiystore,xizstore,etaxstore,etaystore,etazstore, &
+    gammaxstore,gammaystore,gammazstore, &
+    rhostore,dvpstore,kappavstore,kappahstore,muvstore,muhstore,eta_anisostore, &
+    c11store,c12store,c13store,c14store,c15store,c16store,c22store, &
+    c23store,c24store,c25store,c26store,c33store,c34store,c35store, &
+    c36store,c44store,c45store,c46store,c55store,c56store,c66store, &
+    ibelm_xmin,ibelm_xmax,ibelm_ymin,ibelm_ymax,ibelm_bottom,ibelm_top, &
+    rho_vp,rho_vs, &
+    nspec2D_xmin,nspec2D_xmax,nspec2D_ymin,nspec2D_ymax, &
+    ispec_is_tiso,tau_e_store,Qmu_store, &
+    NSPEC2D_MOHO, NSPEC2D_400, NSPEC2D_670, &
+    ibelm_moho_top,ibelm_moho_bot,ibelm_400_top,ibelm_400_bot, &
+    ibelm_670_top,ibelm_670_bot
+
+  use MPI_crust_mantle_par,only: NSPEC_CRUST_MANTLE, &
+    xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle
+
+  use MPI_outer_core_par,only: NSPEC_OUTER_CORE
+  use MPI_inner_core_par,only: NSPEC_INNER_CORE
+
+  implicit none
+
+  integer,intent(in) :: myrank,nspec,nglob
+  integer, dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+
+  integer,intent(in) :: idomain
+  integer, dimension(nspec),intent(inout) :: perm
+
+  integer :: num_colors_outer,num_colors_inner
+  integer, dimension(num_colors_outer + num_colors_inner) :: num_elem_colors
+  integer :: num_phase_ispec_d
+  integer, dimension(num_phase_ispec_d,2) :: phase_ispec_inner_d
+
+  logical :: SAVE_MESH_FILES
+
+  ! local parameters
+  ! added for sorting
+  double precision, dimension(:,:,:,:), allocatable :: temp_array_dble,temp_array_dble1
+  double precision, dimension(:,:,:,:,:), allocatable :: temp_array_dble_sls,temp_array_dble_sls1
+  real(kind=CUSTOM_REAL), dimension(:,:,:,:), allocatable :: temp_array_real
+  integer, dimension(:,:,:,:), allocatable :: temp_array_int
+  integer, dimension(:), allocatable :: temp_array_int_1D
+  integer, dimension(:), allocatable :: temp_perm_global
+  logical, dimension(:), allocatable :: temp_array_logical_1D
+  logical, dimension(:), allocatable :: mask_global
+
+  integer :: icolor,icounter,ispec,ielem,ier,i
+  integer :: iface,old_ispec,new_ispec
+
+  character(len=256) :: filename
+  character(len=150) :: prname
+
+  ! debug file output
+  logical,parameter :: DEBUG = .false.
+
+  ! sorts array according to permutation
+  allocate(temp_perm_global(nspec),stat=ier)
+  if( ier /= 0 ) stop 'error temp_perm_global array'
+
+  ! global ordering
+  temp_perm_global(:) = 0
+  icounter = 0
+
+  ! fills global permutation array
+
+  ! first outer elements coloring
+  ! phase element counter
+  ielem = 0
+  do icolor = 1,num_colors_outer
+    ! loops through elements
+    do i = 1,num_elem_colors(icolor)
+      ielem = ielem + 1
+      ispec = phase_ispec_inner_d(ielem,1) ! 1 <-- first phase, outer elements
+      ! reorders elements
+      icounter = icounter + 1
+      temp_perm_global(ispec) = icounter
+      ! resets to new order
+      phase_ispec_inner_d(ielem,1) = icounter
+    enddo
+  enddo
+  ! inner elements coloring
+  ielem = 0
+  do icolor = num_colors_outer+1,num_colors_outer+num_colors_inner
+    ! loops through elements
+    do i = 1,num_elem_colors(icolor)
+      ielem = ielem + 1
+      ispec = phase_ispec_inner_d(ielem,2) ! 2 <-- second phase, inner elements
+      ! reorders elements
+      icounter = icounter + 1
+      temp_perm_global(ispec) = icounter
+      ! resets to new order
+      phase_ispec_inner_d(ielem,2) = icounter
+    enddo
+  enddo
+
+  ! handles fictitious cube elements for inner core
+  ! which contains ficticious elements not counted for
+  if( idomain == IREGION_INNER_CORE ) then
+    ! fills up permutation with ficticious numbering
+    do ispec = 1,nspec
+      if( temp_perm_global(ispec) == 0 ) then
+        icounter = icounter + 1
+        temp_perm_global(ispec) = icounter
+      endif
+    enddo
+  endif
+
+  ! checks counter
+  if( icounter /= nspec ) then
+    print*,'error temp perm: ',icounter,nspec
+    stop 'error temporary global permutation incomplete'
+  endif
+  ! checks values
+  if(minval(temp_perm_global) /= 1) call exit_MPI(myrank, 'minval(temp_perm_global) should be 1')
+  if(maxval(temp_perm_global) /= nspec) call exit_MPI(myrank, 'maxval(temp_perm_global) should be nspec')
+
+  ! checks if every element was uniquely set
+  allocate(mask_global(nspec),stat=ier)
+  if( ier /= 0 ) stop 'error allocating temporary mask_global'
+  mask_global(:) = .false.
+
+  icounter = 0 ! counts permutations
+  do ispec = 1, nspec
+    new_ispec = temp_perm_global(ispec)
+    ! checks bounds
+    if( new_ispec < 1 .or. new_ispec > nspec ) call exit_MPI(myrank,'error temp_perm_global ispec bounds')
+    ! checks if already set
+    if( mask_global(new_ispec) ) then
+      print*,'error temp_perm_global:',ispec,new_ispec,'element already set'
+      call exit_MPI(myrank,'error global permutation')
+    else
+      mask_global(new_ispec) = .true.
+    endif
+    ! counts permutations
+    if( new_ispec /= ispec ) icounter = icounter + 1
+  enddo
+
+  ! checks number of set elements
+  if( count(mask_global(:)) /= nspec ) then
+    print*,'error temp_perm_global:',count(mask_global(:)),nspec,'permutation incomplete'
+    call exit_MPI(myrank,'error global permutation incomplete')
+  endif
+  deallocate(mask_global)
+
+  ! user output
+  if(myrank == 0) then
+    write(IMAIN,*) '       number of permutations = ',icounter
+  endif
+
+  ! outputs permutation array as vtk file
+  if( SAVE_MESH_FILES .and. DEBUG .and. idomain == IREGION_CRUST_MANTLE ) then
+    call create_name_database(prname,myrank,idomain,LOCAL_PATH)
+    filename = prname(1:len_trim(prname))//'perm_global'
+    call write_VTK_data_elem_i(nspec,nglob, &
+                              xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle,ibool, &
+                              temp_perm_global,filename)
+  endif
+
+  ! store as new permutation
+  perm(:) = temp_perm_global(:)
+  deallocate(temp_perm_global)
+
+  ! permutes all required mesh arrays according to new ordering
+
+  ! permutation of ibool
+  allocate(temp_array_int(NGLLX,NGLLY,NGLLZ,nspec))
+  call permute_elements_integer(ibool,temp_array_int,perm,nspec)
+  deallocate(temp_array_int)
+
+  ! element idoubling flags
+  allocate(temp_array_int_1D(nspec))
+  call permute_elements_integer1D(idoubling,temp_array_int_1D,perm,nspec)
+  deallocate(temp_array_int_1D)
+
+  ! element domain flags
+  allocate(temp_array_logical_1D(nspec))
+  call permute_elements_logical1D(ispec_is_tiso,temp_array_logical_1D,perm,nspec)
+  deallocate(temp_array_logical_1D)
+
+  ! mesh arrays
+  ! double precision
+  allocate(temp_array_dble(NGLLX,NGLLY,NGLLZ,nspec))
+  call permute_elements_dble(xstore,temp_array_dble,perm,nspec)
+  call permute_elements_dble(ystore,temp_array_dble,perm,nspec)
+  call permute_elements_dble(zstore,temp_array_dble,perm,nspec)
+  deallocate(temp_array_dble)
+  ! custom precision
+  allocate(temp_array_real(NGLLX,NGLLY,NGLLZ,nspec))
+  call permute_elements_real(xixstore,temp_array_real,perm,nspec)
+  call permute_elements_real(xiystore,temp_array_real,perm,nspec)
+  call permute_elements_real(xizstore,temp_array_real,perm,nspec)
+  call permute_elements_real(etaxstore,temp_array_real,perm,nspec)
+  call permute_elements_real(etaystore,temp_array_real,perm,nspec)
+  call permute_elements_real(etazstore,temp_array_real,perm,nspec)
+  call permute_elements_real(gammaxstore,temp_array_real,perm,nspec)
+  call permute_elements_real(gammaystore,temp_array_real,perm,nspec)
+  call permute_elements_real(gammazstore,temp_array_real,perm,nspec)
+
+  ! material parameters
+  call permute_elements_real(rhostore,temp_array_real,perm,nspec)
+  call permute_elements_real(kappavstore,temp_array_real,perm,nspec)
+  deallocate(temp_array_real)
+
+  ! boundary surfaces
+  ! note: only arrays pointing to ispec will have to be permutated since value of ispec will be different
+  !
+  ! xmin
+  do iface = 1,nspec2D_xmin
+      old_ispec = ibelm_xmin(iface)
+      new_ispec = perm(old_ispec)
+      ibelm_xmin(iface) = new_ispec
+  enddo
+  ! xmax
+  do iface = 1,nspec2D_xmax
+      old_ispec = ibelm_xmax(iface)
+      new_ispec = perm(old_ispec)
+      ibelm_xmax(iface) = new_ispec
+  enddo
+  ! ymin
+  do iface = 1,nspec2D_ymin
+      old_ispec = ibelm_ymin(iface)
+      new_ispec = perm(old_ispec)
+      ibelm_ymin(iface) = new_ispec
+  enddo
+  ! ymax
+  do iface = 1,nspec2D_ymax
+      old_ispec = ibelm_ymax(iface)
+      new_ispec = perm(old_ispec)
+      ibelm_ymax(iface) = new_ispec
+  enddo
+  ! bottom
+  do iface = 1,NSPEC2D_BOTTOM(idomain)
+      old_ispec = ibelm_bottom(iface)
+      new_ispec = perm(old_ispec)
+      ibelm_bottom(iface) = new_ispec
+  enddo
+  ! top
+  do iface = 1,NSPEC2D_TOP(idomain)
+      old_ispec = ibelm_top(iface)
+      new_ispec = perm(old_ispec)
+      ibelm_top(iface) = new_ispec
+  enddo
+
+  ! attenuation arrays
+  if (ATTENUATION) then
+    if (USE_3D_ATTENUATION_ARRAYS) then
+      allocate(temp_array_dble(NGLLX,NGLLY,NGLLZ,nspec))
+      allocate(temp_array_dble_sls(N_SLS,NGLLX,NGLLY,NGLLZ,nspec))
+      call permute_elements_dble(Qmu_store,temp_array_dble,perm,nspec)
+      call permute_elements_dble_sls(tau_e_store,temp_array_dble_sls,perm,nspec)
+      deallocate(temp_array_dble,temp_array_dble_sls)
+    else
+      allocate(temp_array_dble1(1,1,1,nspec))
+      allocate(temp_array_dble_sls1(N_SLS,1,1,1,nspec))
+      call permute_elements_dble1(Qmu_store,temp_array_dble1,perm,nspec)
+      call permute_elements_dble_sls1(tau_e_store,temp_array_dble_sls1,perm,nspec)
+      deallocate(temp_array_dble1,temp_array_dble_sls1)
+    endif
+  endif
+
+  select case( idomain )
+  case( IREGION_CRUST_MANTLE )
+    ! checks number of elements
+    if( nspec /= NSPEC_CRUST_MANTLE ) &
+      call exit_MPI(myrank,'error in permutation nspec should be NSPEC_CRUST_MANTLE')
+
+    allocate(temp_array_real(NGLLX,NGLLY,NGLLZ,nspec))
+
+    if(ANISOTROPIC_3D_MANTLE) then
+      call permute_elements_real(c11store,temp_array_real,perm,nspec)
+      call permute_elements_real(c11store,temp_array_real,perm,nspec)
+      call permute_elements_real(c12store,temp_array_real,perm,nspec)
+      call permute_elements_real(c13store,temp_array_real,perm,nspec)
+      call permute_elements_real(c14store,temp_array_real,perm,nspec)
+      call permute_elements_real(c15store,temp_array_real,perm,nspec)
+      call permute_elements_real(c16store,temp_array_real,perm,nspec)
+      call permute_elements_real(c22store,temp_array_real,perm,nspec)
+      call permute_elements_real(c23store,temp_array_real,perm,nspec)
+      call permute_elements_real(c24store,temp_array_real,perm,nspec)
+      call permute_elements_real(c25store,temp_array_real,perm,nspec)
+      call permute_elements_real(c26store,temp_array_real,perm,nspec)
+      call permute_elements_real(c33store,temp_array_real,perm,nspec)
+      call permute_elements_real(c34store,temp_array_real,perm,nspec)
+      call permute_elements_real(c35store,temp_array_real,perm,nspec)
+      call permute_elements_real(c36store,temp_array_real,perm,nspec)
+      call permute_elements_real(c44store,temp_array_real,perm,nspec)
+      call permute_elements_real(c45store,temp_array_real,perm,nspec)
+      call permute_elements_real(c46store,temp_array_real,perm,nspec)
+      call permute_elements_real(c55store,temp_array_real,perm,nspec)
+      call permute_elements_real(c56store,temp_array_real,perm,nspec)
+      call permute_elements_real(c66store,temp_array_real,perm,nspec)
+    else
+      call permute_elements_real(muvstore,temp_array_real,perm,nspec)
+
+      if(TRANSVERSE_ISOTROPY) then
+        call permute_elements_real(kappahstore,temp_array_real,perm,nspec)
+        call permute_elements_real(muhstore,temp_array_real,perm,nspec)
+        call permute_elements_real(eta_anisostore,temp_array_real,perm,nspec)
+      endif
+    endif
+
+    if(HETEROGEN_3D_MANTLE) then
+      call permute_elements_real(dvpstore,temp_array_real,perm,nspec)
+    endif
+
+    if(ABSORBING_CONDITIONS .and. NCHUNKS /= 6 ) then
+      call permute_elements_real(rho_vp,temp_array_real,perm,nspec)
+      call permute_elements_real(rho_vs,temp_array_real,perm,nspec)
+    endif
+
+    deallocate(temp_array_real)
+
+    ! discontinuities boundary surface
+    if( SAVE_BOUNDARY_MESH ) then
+      ! moho
+      do iface = 1,nspec2D_MOHO
+        ! top
+        old_ispec = ibelm_moho_top(iface)
+        new_ispec = perm(old_ispec)
+        ibelm_moho_top(iface) = new_ispec
+        ! bottom
+        old_ispec = ibelm_moho_bot(iface)
+        new_ispec = perm(old_ispec)
+        ibelm_moho_bot(iface) = new_ispec
+      enddo
+      ! 400
+      do iface = 1,nspec2D_400
+        ! top
+        old_ispec = ibelm_400_top(iface)
+        new_ispec = perm(old_ispec)
+        ibelm_400_top(iface) = new_ispec
+        ! bottom
+        old_ispec = ibelm_400_bot(iface)
+        new_ispec = perm(old_ispec)
+        ibelm_400_bot(iface) = new_ispec
+      enddo
+      ! 670
+      do iface = 1,nspec2D_670
+        ! top
+        old_ispec = ibelm_670_top(iface)
+        new_ispec = perm(old_ispec)
+        ibelm_670_top(iface) = new_ispec
+        ! bottom
+        old_ispec = ibelm_670_bot(iface)
+        new_ispec = perm(old_ispec)
+        ibelm_670_bot(iface) = new_ispec
+      enddo
+    endif
+
+  case( IREGION_OUTER_CORE )
+    ! checks number of elements
+    if( nspec /= NSPEC_OUTER_CORE ) &
+      call exit_MPI(myrank,'error in permutation nspec should be NSPEC_OUTER_CORE')
+
+    if(ABSORBING_CONDITIONS .and. NCHUNKS /= 6 ) then
+      allocate(temp_array_real(NGLLX,NGLLY,NGLLZ,nspec))
+
+      call permute_elements_real(rho_vp,temp_array_real,perm,nspec)
+
+      deallocate(temp_array_real)
+    endif
+
+  case( IREGION_INNER_CORE )
+    ! checks number of elements
+    if( nspec /= NSPEC_INNER_CORE ) &
+      call exit_MPI(myrank,'error in permutation nspec should be NSPEC_INNER_CORE')
+
+    allocate(temp_array_real(NGLLX,NGLLY,NGLLZ,nspec))
+
+    ! note: muvstore needed for attenuation also for anisotropic inner core
+    call permute_elements_real(muvstore,temp_array_real,perm,nspec)
+
+    !  anisotropy in the inner core only
+    if(ANISOTROPIC_INNER_CORE) then
+      call permute_elements_real(c11store,temp_array_real,perm,nspec)
+      call permute_elements_real(c33store,temp_array_real,perm,nspec)
+      call permute_elements_real(c12store,temp_array_real,perm,nspec)
+      call permute_elements_real(c13store,temp_array_real,perm,nspec)
+      call permute_elements_real(c44store,temp_array_real,perm,nspec)
+    endif
+
+    deallocate(temp_array_real)
+
+  case default
+    stop 'error idomain in setup_permutation'
+  end select
+
+  end subroutine setup_permutation
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+! deprecated ...
+!
+!  subroutine setup_color_perm(myrank,iregion_code,nspec,nglob, &
+!                              ibool,is_on_a_slice_edge,prname, &
+!                              npoin2D_xi,npoin2D_eta)
+!
+!  use constants
+!  use meshfem3D_par,only: NSTEP,DT,NPROC_XI,NPROC_ETA
+!  implicit none
+!
+!  ! standard include of the MPI library
+!  include 'mpif.h'
+!
+!  integer :: myrank
+!  integer :: iregion_code
+!
+!  integer :: nspec,nglob
+!  integer, dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+!
+!  ! this for non blocking MPI
+!  logical, dimension(nspec) :: is_on_a_slice_edge
+!
+!  ! name of the database file
+!  character(len=150) :: prname
+!
+!  integer :: npoin2D_xi,npoin2D_eta
+!
+!  ! local parameters
+!  integer :: nb_colors_outer_elements,nb_colors_inner_elements,nspec_outer
+!  integer, dimension(:), allocatable :: perm
+!  integer, dimension(:), allocatable :: first_elem_number_in_this_color
+!  integer, dimension(:), allocatable :: num_of_elems_in_this_color
+!
+!  integer :: icolor,ispec_counter
+!  integer :: nspec_outer_min_global,nspec_outer_max_global
+!  integer :: ispec,ier
+!
+!  !!!! David Michea: detection of the edges, coloring and permutation separately
+!  allocate(perm(nspec))
+!
+!  ! implement mesh coloring for GPUs if needed, to create subsets of disconnected elements
+!  ! to remove dependencies and the need for atomic operations in the sum of elemental contributions in the solver
+!  if(USE_MESH_COLORING_GPU) then
+!
+!    ! user output
+!    if(myrank == 0 ) write(IMAIN,*) '  creating mesh coloring'
+!
+!    allocate(first_elem_number_in_this_color(MAX_NUMBER_OF_COLORS + 1))
+!
+!    call get_perm_color_faster(is_on_a_slice_edge,ibool,perm,nspec,nglob, &
+!                              nb_colors_outer_elements,nb_colors_inner_elements,nspec_outer, &
+!                              first_elem_number_in_this_color,myrank)
+!
+!    ! for the last color, the next color is fictitious and its first (fictitious) element number is nspec + 1
+!    first_elem_number_in_this_color(nb_colors_outer_elements + nb_colors_inner_elements + 1) = nspec + 1
+!
+!    allocate(num_of_elems_in_this_color(nb_colors_outer_elements + nb_colors_inner_elements))
+!
+!    ! save mesh coloring
+!    open(unit=99,file=prname(1:len_trim(prname))//'num_of_elems_in_this_color.dat', &
+!         status='unknown',iostat=ier)
+!    if( ier /= 0 ) call exit_mpi(myrank,'error opening num_of_elems_in_this_color file')
+!
+!    ! number of colors for outer elements
+!    write(99,*) nb_colors_outer_elements
+!
+!    ! number of colors for inner elements
+!    write(99,*) nb_colors_inner_elements
+!
+!    ! number of elements in each color
+!    do icolor = 1, nb_colors_outer_elements + nb_colors_inner_elements
+!      num_of_elems_in_this_color(icolor) = first_elem_number_in_this_color(icolor+1) &
+!                                          - first_elem_number_in_this_color(icolor)
+!      write(99,*) num_of_elems_in_this_color(icolor)
+!    enddo
+!    close(99)
+!
+!    ! check that the sum of all the numbers of elements found in each color is equal
+!    ! to the total number of elements in the mesh
+!    if(sum(num_of_elems_in_this_color) /= nspec) then
+!      print *,'nspec = ',nspec
+!      print *,'total number of elements in all the colors of the mesh = ',sum(num_of_elems_in_this_color)
+!      call exit_mpi(myrank,'incorrect total number of elements in all the colors of the mesh')
+!    endif
+!
+!    ! check that the sum of all the numbers of elements found in each color for the outer elements is equal
+!    ! to the total number of outer elements found in the mesh
+!    if(sum(num_of_elems_in_this_color(1:nb_colors_outer_elements)) /= nspec_outer) then
+!      print *,'nspec_outer = ',nspec_outer
+!      print *,'total number of elements in all the colors of the mesh for outer elements = ', &
+!        sum(num_of_elems_in_this_color)
+!      call exit_mpi(myrank,'incorrect total number of elements in all the colors of the mesh for outer elements')
+!    endif
+!
+!    call MPI_ALLREDUCE(nspec_outer,nspec_outer_min_global,1,MPI_INTEGER,MPI_MIN,MPI_COMM_WORLD,ier)
+!    call MPI_ALLREDUCE(nspec_outer,nspec_outer_max_global,1,MPI_INTEGER,MPI_MAX,MPI_COMM_WORLD,ier)
+!
+!    deallocate(first_elem_number_in_this_color)
+!    deallocate(num_of_elems_in_this_color)
+!
+!  else
+!
+!    !! DK DK for regular C + MPI version for CPUs: do not use colors but nonetheless put all the outer elements
+!    !! DK DK first in order to be able to overlap non-blocking MPI communications with calculations
+!
+!    !! DK DK nov 2010, for Rosa Badia / StarSs:
+!    !! no need for mesh coloring, but need to implement inner/outer subsets for non blocking MPI for StarSs
+!    ispec_counter = 0
+!    perm(:) = 0
+!
+!    ! first generate all the outer elements
+!    do ispec = 1,nspec
+!      if(is_on_a_slice_edge(ispec)) then
+!        ispec_counter = ispec_counter + 1
+!        perm(ispec) = ispec_counter
+!      endif
+!    enddo
+!
+!    ! make sure we have detected some outer elements
+!    if(ispec_counter <= 0) stop 'fatal error: no outer elements detected!'
+!
+!    ! store total number of outer elements
+!    nspec_outer = ispec_counter
+!
+!    ! then generate all the inner elements
+!    do ispec = 1,nspec
+!      if(.not. is_on_a_slice_edge(ispec)) then
+!        ispec_counter = ispec_counter + 1
+!        perm(ispec) = ispec_counter
+!      endif
+!    enddo
+!
+!    ! test that all the elements have been used once and only once
+!    if(ispec_counter /= nspec) stop 'fatal error: ispec_counter not equal to nspec'
+!
+!    ! do basic checks
+!    if(minval(perm) /= 1) stop 'minval(perm) should be 1'
+!    if(maxval(perm) /= nspec) stop 'maxval(perm) should be nspec'
+!
+!    call MPI_ALLREDUCE(nspec_outer,nspec_outer_min_global,1,MPI_INTEGER,MPI_MIN,MPI_COMM_WORLD,ier)
+!    call MPI_ALLREDUCE(nspec_outer,nspec_outer_max_global,1,MPI_INTEGER,MPI_MAX,MPI_COMM_WORLD,ier)
+!
+!  endif ! USE_MESH_COLORING_GPU
+!
+!  !! DK DK and Manh Ha, Nov 2011: added this to use the new mesher in the CUDA or C / StarSs test codes
+!
+!  if (myrank == 0 .and. iregion_code == IREGION_CRUST_MANTLE) then
+!    ! write a header file for the Fortran version of the solver
+!    open(unit=99,file=prname(1:len_trim(prname))//'values_from_mesher_f90.h', &
+!          status='unknown',iostat=ier)
+!    if( ier /= 0 ) call exit_mpi(myrank,'error opening file values_from_mesher_f90.h')
+!
+!    write(99,*) 'integer, parameter :: NSPEC = ',nspec
+!    write(99,*) 'integer, parameter :: NGLOB = ',nglob
+!    !!! DK DK use 1000 time steps only for the scaling tests
+!    write(99,*) 'integer, parameter :: NSTEP = 1000 !!!!!!!!!!! ',nstep
+!    write(99,*) 'real(kind=4), parameter :: deltat = ',DT
+!    write(99,*)
+!    write(99,*) 'integer, parameter ::  NGLOB2DMAX_XMIN_XMAX = ',npoin2D_xi
+!    write(99,*) 'integer, parameter ::  NGLOB2DMAX_YMIN_YMAX = ',npoin2D_eta
+!    write(99,*) 'integer, parameter ::  NGLOB2DMAX_ALL = ',max(npoin2D_xi,npoin2D_eta)
+!    write(99,*) 'integer, parameter ::  NPROC_XI = ',NPROC_XI
+!    write(99,*) 'integer, parameter ::  NPROC_ETA = ',NPROC_ETA
+!    write(99,*)
+!    write(99,*) '! element number of the source and of the station'
+!    write(99,*) '! after permutation of the elements by mesh coloring'
+!    write(99,*) '! and inner/outer set splitting in the mesher'
+!    write(99,*) 'integer, parameter :: NSPEC_SOURCE = ',perm(NSPEC/3)
+!    write(99,*) 'integer, parameter :: RANK_SOURCE = 0'
+!    write(99,*)
+!    write(99,*) 'integer, parameter :: RANK_STATION = (NPROC_XI*NPROC_ETA - 1)'
+!    write(99,*) 'integer, parameter :: NSPEC_STATION = ',perm(2*NSPEC/3)
+!
+!    ! save coordinates of the seismic source
+!    !   write(99,*) xstore(2,2,2,10);
+!    !   write(99,*) ystore(2,2,2,10);
+!    !   write(99,*) zstore(2,2,2,10);
+!
+!    ! save coordinates of the seismic station
+!    !   write(99,*) xstore(2,2,2,nspec-10);
+!    !   write(99,*) ystore(2,2,2,nspec-10);
+!    !   write(99,*) zstore(2,2,2,nspec-10);
+!    close(99)
+!
+!    !! write a header file for the C version of the solver
+!    open(unit=99,file=prname(1:len_trim(prname))//'values_from_mesher_C.h', &
+!          status='unknown',iostat=ier)
+!    if( ier /= 0 ) call exit_mpi(myrank,'error opening file values_from_mesher_C.h')
+!
+!    write(99,*) '#define NSPEC ',nspec
+!    write(99,*) '#define NGLOB ',nglob
+!    !!    write(99,*) '#define NSTEP ',nstep
+!    !!! DK DK use 1000 time steps only for the scaling tests
+!    write(99,*) '// #define NSTEP ',nstep
+!    write(99,*) '#define NSTEP 1000'
+!    ! put an "f" at the end to force single precision
+!    write(99,"('#define deltat ',e18.10,'f')") DT
+!    write(99,*) '#define NGLOB2DMAX_XMIN_XMAX ',npoin2D_xi
+!    write(99,*) '#define NGLOB2DMAX_YMIN_YMAX ',npoin2D_eta
+!    write(99,*) '#define NGLOB2DMAX_ALL ',max(npoin2D_xi,npoin2D_eta)
+!    write(99,*) '#define NPROC_XI ',NPROC_XI
+!    write(99,*) '#define NPROC_ETA ',NPROC_ETA
+!    write(99,*)
+!    write(99,*) '// element and MPI slice number of the source and the station'
+!    write(99,*) '// after permutation of the elements by mesh coloring'
+!    write(99,*) '// and inner/outer set splitting in the mesher'
+!    write(99,*) '#define RANK_SOURCE 0'
+!    write(99,*) '#define NSPEC_SOURCE ',perm(NSPEC/3)
+!    write(99,*)
+!    write(99,*) '#define RANK_STATION (NPROC_XI*NPROC_ETA - 1)'
+!    write(99,*) '#define NSPEC_STATION ',perm(2*NSPEC/3)
+!    close(99)
+!
+!    open(unit=99,file=prname(1:len_trim(prname))//'values_from_mesher_nspec_outer.h', &
+!          status='unknown',iostat=ier)
+!    if( ier /= 0 ) call exit_mpi(myrank,'error opening values_from_mesher_nspec_outer.h file')
+!
+!    write(99,*) '#define NSPEC_OUTER ',nspec_outer_max_global
+!    write(99,*) '// NSPEC_OUTER_min = ',nspec_outer_min_global
+!    write(99,*) '// NSPEC_OUTER_max = ',nspec_outer_max_global
+!    close(99)
+!
+!  endif
+!
+!  !! DK DK and Manh Ha, Nov 2011: added this to use the new mesher in the CUDA or C / StarSs test codes
+!
+!  deallocate(perm)
+!
+!
+!  end subroutine setup_color_perm

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_counters.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_counters.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_counters.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,145 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine setup_counters(myrank, &
+                        NSPEC1D_RADIAL,NSPEC2D_XI,NSPEC2D_ETA,NGLOB1D_RADIAL, &
+                        DIFF_NSPEC1D_RADIAL,DIFF_NSPEC2D_XI,DIFF_NSPEC2D_ETA, &
+                        CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA, &
+                        NPROCTOT,iproc_xi_slice,iproc_eta_slice, &
+                        NSPEC1D_RADIAL_CORNER,NSPEC2D_XI_FACE, &
+                        NSPEC2D_ETA_FACE,NGLOB1D_RADIAL_CORNER)
+
+! returns: NSPEC1D_RADIAL_CORNER,NSPEC2D_XI_FACE,
+!              NSPEC2D_ETA_FACE,NGLOB1D_RADIAL_CORNER
+
+  implicit none
+
+  include "constants.h"
+
+  integer myrank
+
+! this for all the regions
+  integer, dimension(MAX_NUM_REGIONS) :: NSPEC2D_XI,NSPEC2D_ETA, &
+                                         NSPEC1D_RADIAL,NGLOB1D_RADIAL
+
+  integer, dimension(NB_SQUARE_CORNERS,NB_CUT_CASE) :: DIFF_NSPEC1D_RADIAL
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR,NB_CUT_CASE) :: DIFF_NSPEC2D_XI,DIFF_NSPEC2D_ETA
+
+  ! addressing for all the slices
+  integer :: NPROCTOT
+  integer, dimension(0:NPROCTOT-1) :: iproc_xi_slice,iproc_eta_slice
+
+  logical :: CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA
+
+! this for the different corners of the slice (which are different if the superbrick is cut)
+! 1 : xi_min, eta_min
+! 2 : xi_max, eta_min
+! 3 : xi_max, eta_max
+! 4 : xi_min, eta_max
+  integer, dimension(MAX_NUM_REGIONS,NB_SQUARE_CORNERS) :: &
+    NSPEC1D_RADIAL_CORNER,NGLOB1D_RADIAL_CORNER
+! 1 -> min, 2 -> max
+  integer, dimension(MAX_NUM_REGIONS,NB_SQUARE_EDGES_ONEDIR) :: NSPEC2D_XI_FACE,NSPEC2D_ETA_FACE
+
+
+  ! local parameters
+  integer :: iregion
+
+  do iregion=1,MAX_NUM_REGIONS
+    NSPEC1D_RADIAL_CORNER(iregion,:) = NSPEC1D_RADIAL(iregion)
+    NSPEC2D_XI_FACE(iregion,:) = NSPEC2D_XI(iregion)
+    NSPEC2D_ETA_FACE(iregion,:) = NSPEC2D_ETA(iregion)
+    NGLOB1D_RADIAL_CORNER(iregion,:) = NGLOB1D_RADIAL(iregion)
+  enddo
+
+  if (CUT_SUPERBRICK_XI) then
+    if (CUT_SUPERBRICK_ETA) then
+      if (mod(iproc_xi_slice(myrank),2) == 0) then
+        if (mod(iproc_eta_slice(myrank),2) == 0) then
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,1)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,1)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,1)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,1)*(NGLLZ-1))
+        else
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,2)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,2)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,2)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,2)*(NGLLZ-1))
+        endif
+      else
+        if (mod(iproc_eta_slice(myrank),2) == 0) then
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,3)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,3)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,3)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,3)*(NGLLZ-1))
+        else
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,4)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,4)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,4)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,4)*(NGLLZ-1))
+        endif
+      endif
+    else
+      if (mod(iproc_xi_slice(myrank),2) == 0) then
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,1)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,1)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,1)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,1)*(NGLLZ-1))
+      else
+        NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,2)
+        NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,2)
+        NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,2)
+        NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                      + (DIFF_NSPEC1D_RADIAL(:,2)*(NGLLZ-1))
+      endif
+    endif
+  else
+    if (CUT_SUPERBRICK_ETA) then
+      if (mod(iproc_eta_slice(myrank),2) == 0) then
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,1)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,1)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,1)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,1)*(NGLLZ-1))
+      else
+          NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NSPEC1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) + DIFF_NSPEC1D_RADIAL(:,2)
+          NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_XI_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_XI(:,2)
+          NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) = NSPEC2D_ETA_FACE(IREGION_OUTER_CORE,:) + DIFF_NSPEC2D_ETA(:,2)
+          NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) = NGLOB1D_RADIAL_CORNER(IREGION_OUTER_CORE,:) &
+                                                        + (DIFF_NSPEC1D_RADIAL(:,2)*(NGLLZ-1))
+      endif
+    endif
+  endif
+
+  end subroutine setup_counters
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_inner_outer.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_inner_outer.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_inner_outer.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,188 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine setup_inner_outer(iregion_code)
+
+  use meshfem3D_par,only: &
+    myrank,OUTPUT_FILES,IMAIN, &
+    IREGION_CRUST_MANTLE,IREGION_OUTER_CORE,IREGION_INNER_CORE
+
+  use meshfem3D_par,only: ibool,is_on_a_slice_edge
+
+  use MPI_crust_mantle_par
+  use MPI_outer_core_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  integer,intent(in) :: iregion_code
+
+  ! local parameters
+  real :: percentage_edge
+  integer :: ier,ispec,iinner,iouter
+  ! debug file output
+  character(len=150) :: filename
+  logical,parameter :: DEBUG = .false.
+
+  ! stores inner / outer elements
+  !
+  ! note: arrays is_on_a_slice_edge_.. have flags set for elements which need to
+  !         communicate with other MPI processes
+  select case( iregion_code )
+  case( IREGION_CRUST_MANTLE )
+    ! crust_mantle
+    nspec_outer_crust_mantle = count( is_on_a_slice_edge )
+    nspec_inner_crust_mantle = NSPEC_CRUST_MANTLE - nspec_outer_crust_mantle
+
+    num_phase_ispec_crust_mantle = max(nspec_inner_crust_mantle,nspec_outer_crust_mantle)
+
+    allocate(phase_ispec_inner_crust_mantle(num_phase_ispec_crust_mantle,2),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_crust_mantle')
+
+    phase_ispec_inner_crust_mantle(:,:) = 0
+    iinner = 0
+    iouter = 0
+    do ispec=1,NSPEC_CRUST_MANTLE
+      if( is_on_a_slice_edge(ispec) ) then
+        ! outer element
+        iouter = iouter + 1
+        phase_ispec_inner_crust_mantle(iouter,1) = ispec
+      else
+        ! inner element
+        iinner = iinner + 1
+        phase_ispec_inner_crust_mantle(iinner,2) = ispec
+      endif
+    enddo
+
+    ! user output
+    if(myrank == 0) then
+      write(IMAIN,*)
+      write(IMAIN,*) 'for overlapping of communications with calculations:'
+      write(IMAIN,*)
+      percentage_edge = 100. * nspec_outer_crust_mantle / real(NSPEC_CRUST_MANTLE)
+      write(IMAIN,*) 'percentage of edge elements in crust/mantle ',percentage_edge,'%'
+      write(IMAIN,*) 'percentage of volume elements in crust/mantle ',100. - percentage_edge,'%'
+      write(IMAIN,*)
+    endif
+
+    ! debug: saves element flags
+    if( DEBUG ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_crust_mantle_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_CRUST_MANTLE,NGLOB_CRUST_MANTLE, &
+                                xstore_crust_mantle,ystore_crust_mantle,zstore_crust_mantle, &
+                                ibool, &
+                                is_on_a_slice_edge,filename)
+    endif
+
+  case( IREGION_OUTER_CORE )
+    ! outer_core
+    nspec_outer_outer_core = count( is_on_a_slice_edge )
+    nspec_inner_outer_core = NSPEC_OUTER_CORE - nspec_outer_outer_core
+
+    num_phase_ispec_outer_core = max(nspec_inner_outer_core,nspec_outer_outer_core)
+
+    allocate(phase_ispec_inner_outer_core(num_phase_ispec_outer_core,2),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_outer_core')
+
+    phase_ispec_inner_outer_core(:,:) = 0
+    iinner = 0
+    iouter = 0
+    do ispec=1,NSPEC_OUTER_CORE
+      if( is_on_a_slice_edge(ispec) ) then
+        ! outer element
+        iouter = iouter + 1
+        phase_ispec_inner_outer_core(iouter,1) = ispec
+      else
+        ! inner element
+        iinner = iinner + 1
+        phase_ispec_inner_outer_core(iinner,2) = ispec
+      endif
+    enddo
+
+    ! user output
+    if(myrank == 0) then
+      percentage_edge = 100.* nspec_outer_outer_core / real(NSPEC_OUTER_CORE)
+      write(IMAIN,*) 'percentage of edge elements in outer core ',percentage_edge,'%'
+      write(IMAIN,*) 'percentage of volume elements in outer core ',100. - percentage_edge,'%'
+      write(IMAIN,*)
+    endif
+
+    ! debug: saves element flags
+    if( DEBUG ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_outer_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_OUTER_CORE,NGLOB_OUTER_CORE, &
+                                xstore_outer_core,ystore_outer_core,zstore_outer_core, &
+                                ibool, &
+                                is_on_a_slice_edge,filename)
+    endif
+
+  case( IREGION_INNER_CORE )
+    ! inner_core
+    nspec_outer_inner_core = count( is_on_a_slice_edge )
+    nspec_inner_inner_core = NSPEC_INNER_CORE - nspec_outer_inner_core
+
+    num_phase_ispec_inner_core = max(nspec_inner_inner_core,nspec_outer_inner_core)
+
+    allocate(phase_ispec_inner_inner_core(num_phase_ispec_inner_core,2),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating array phase_ispec_inner_inner_core')
+
+    phase_ispec_inner_inner_core(:,:) = 0
+    iinner = 0
+    iouter = 0
+    do ispec=1,NSPEC_INNER_CORE
+      if( is_on_a_slice_edge(ispec) ) then
+        ! outer element
+        iouter = iouter + 1
+        phase_ispec_inner_inner_core(iouter,1) = ispec
+      else
+        ! inner element
+        iinner = iinner + 1
+        phase_ispec_inner_inner_core(iinner,2) = ispec
+      endif
+    enddo
+
+    ! user output
+    if(myrank == 0) then
+      percentage_edge = 100. * nspec_outer_inner_core / real(NSPEC_INNER_CORE)
+      write(IMAIN,*) 'percentage of edge elements in inner core ',percentage_edge,'%'
+      write(IMAIN,*) 'percentage of volume elements in inner core ',100. - percentage_edge,'%'
+      write(IMAIN,*)
+    endif
+
+    ! debug: saves element flags
+    if( DEBUG ) then
+      write(filename,'(a,i6.6)') trim(OUTPUT_FILES)//'/MPI_innerouter_inner_core_proc',myrank
+      call write_VTK_data_elem_l(NSPEC_INNER_CORE,NGLOB_INNER_CORE, &
+                                xstore_inner_core,ystore_inner_core,zstore_inner_core, &
+                                ibool, &
+                                is_on_a_slice_edge,filename)
+    endif
+
+  end select
+
+  end subroutine setup_Inner_Outer

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_model.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_model.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/setup_model.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,189 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+  subroutine setup_model()
+
+  use meshfem3D_par
+  implicit none
+
+  ! user output
+  if(myrank == 0) call sm_output_info()
+
+  ! dynamic allocation of mesh arrays
+  allocate(addressing(NCHUNKS,0:NPROC_XI-1,0:NPROC_ETA-1))
+  allocate(ichunk_slice(0:NPROCTOT-1))
+  allocate(iproc_xi_slice(0:NPROCTOT-1))
+  allocate(iproc_eta_slice(0:NPROCTOT-1))
+
+  ! creates global slice addressing for solver
+  call create_addressing(myrank,NCHUNKS,NPROC,NPROC_ETA,NPROC_XI,NPROCTOT, &
+                        addressing,ichunk_slice,iproc_xi_slice,iproc_eta_slice, &
+                        OUTPUT_FILES)
+
+
+  ! this for the different counters (which are now different if the superbrick is cut in the outer core)
+  call setup_counters(myrank, &
+                        NSPEC1D_RADIAL,NSPEC2D_XI,NSPEC2D_ETA,NGLOB1D_RADIAL, &
+                        DIFF_NSPEC1D_RADIAL,DIFF_NSPEC2D_XI,DIFF_NSPEC2D_ETA, &
+                        CUT_SUPERBRICK_XI,CUT_SUPERBRICK_ETA, &
+                        NPROCTOT,iproc_xi_slice,iproc_eta_slice, &
+                        NSPEC1D_RADIAL_CORNER,NSPEC2D_XI_FACE, &
+                        NSPEC2D_ETA_FACE,NGLOB1D_RADIAL_CORNER)
+
+
+  ! distributes 3D models
+  call meshfem3D_models_broadcast(myrank,NSPEC, &
+                                MIN_ATTENUATION_PERIOD,MAX_ATTENUATION_PERIOD,&
+                                R80,R220,R670,RCMB,RICB, &
+                                LOCAL_PATH)
+
+
+  ! user output
+  if(myrank == 0 ) then
+    write(IMAIN,*)
+    write(IMAIN,*)
+  endif
+  call sync_all()
+
+  end subroutine setup_model
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine sm_output_info()
+
+  use meshfem3D_models_par
+  use meshfem3D_par,only: &
+    MODEL,sizeprocs,NEX_XI,NEX_ETA, &
+    NPROC_XI,NPROC_ETA,NPROC,NCHUNKS,NPROCTOT, &
+    R_CENTRAL_CUBE
+
+  implicit none
+
+  ! user output
+  write(IMAIN,*)
+  write(IMAIN,*) 'There are ',sizeprocs,' MPI processes'
+  write(IMAIN,*) 'Processes are numbered from 0 to ',sizeprocs-1
+  write(IMAIN,*)
+  write(IMAIN,*) 'There are ',NEX_XI,' elements along xi in each chunk'
+  write(IMAIN,*) 'There are ',NEX_ETA,' elements along eta in each chunk'
+  write(IMAIN,*)
+  write(IMAIN,*) 'There are ',NPROC_XI,' slices along xi in each chunk'
+  write(IMAIN,*) 'There are ',NPROC_ETA,' slices along eta in each chunk'
+  write(IMAIN,*) 'There is a total of ',NPROC,' slices in each chunk'
+  write(IMAIN,*) 'There are ',NCHUNKS,' chunks in the global mesh'
+  write(IMAIN,*) 'There is a total of ',NPROCTOT,' slices in the global mesh'
+  write(IMAIN,*)
+  write(IMAIN,*) 'NGLLX = ',NGLLX
+  write(IMAIN,*) 'NGLLY = ',NGLLY
+  write(IMAIN,*) 'NGLLZ = ',NGLLZ
+  write(IMAIN,*)
+  write(IMAIN,*) 'Shape functions defined by NGNOD = ',NGNOD,' control nodes'
+  write(IMAIN,*) 'Surface shape functions defined by NGNOD2D = ',NGNOD2D,' control nodes'
+  write(IMAIN,*)
+
+  ! model user parameters
+  write(IMAIN,*) 'model: ',trim(MODEL)
+  if(OCEANS) then
+    write(IMAIN,*) '  incorporating the oceans using equivalent load'
+  else
+    write(IMAIN,*) '  no oceans'
+  endif
+  if(ELLIPTICITY) then
+    write(IMAIN,*) '  incorporating ellipticity'
+  else
+    write(IMAIN,*) '  no ellipticity'
+  endif
+  if(TOPOGRAPHY) then
+    write(IMAIN,*) '  incorporating surface topography'
+  else
+    write(IMAIN,*) '  no surface topography'
+  endif
+  if(GRAVITY) then
+    write(IMAIN,*) '  incorporating self-gravitation (Cowling approximation)'
+  else
+    write(IMAIN,*) '  no self-gravitation'
+  endif
+  if(ROTATION) then
+    write(IMAIN,*) '  incorporating rotation'
+  else
+    write(IMAIN,*) '  no rotation'
+  endif
+  if(ATTENUATION) then
+    write(IMAIN,*) '  incorporating attenuation using ',N_SLS,' standard linear solids'
+    if(ATTENUATION_3D) write(IMAIN,*)'  using 3D attenuation model'
+  else
+    write(IMAIN,*) '  no attenuation'
+  endif
+  write(IMAIN,*)
+
+  ! model mesh parameters
+  if(ISOTROPIC_3D_MANTLE) then
+    write(IMAIN,*) '  incorporating 3-D lateral variations'
+  else
+    write(IMAIN,*) '  no 3-D lateral variations'
+  endif
+  if(HETEROGEN_3D_MANTLE) then
+    write(IMAIN,*) '  incorporating heterogeneities in the mantle'
+  else
+    write(IMAIN,*) '  no heterogeneities in the mantle'
+  endif
+  if(CRUSTAL) then
+    write(IMAIN,*) '  incorporating crustal variations'
+  else
+    write(IMAIN,*) '  no crustal variations'
+  endif
+  if(ONE_CRUST) then
+    write(IMAIN,*) '  using one layer only in PREM crust'
+  else
+    write(IMAIN,*) '  using unmodified 1D crustal model with two layers'
+  endif
+  if(TRANSVERSE_ISOTROPY) then
+    write(IMAIN,*) '  incorporating anisotropy'
+  else
+    write(IMAIN,*) '  no anisotropy'
+  endif
+  if(ANISOTROPIC_INNER_CORE) then
+    write(IMAIN,*) '  incorporating anisotropic inner core'
+  else
+    write(IMAIN,*) '  no inner-core anisotropy'
+  endif
+  if(ANISOTROPIC_3D_MANTLE) then
+    write(IMAIN,*) '  incorporating anisotropic mantle'
+  else
+    write(IMAIN,*) '  no general mantle anisotropy'
+  endif
+  write(IMAIN,*)
+  write(IMAIN,*) 'Reference radius of the Earth used is ',R_EARTH_KM,' km'
+  write(IMAIN,*)
+  write(IMAIN,*) 'Central cube is at a radius of ',R_CENTRAL_CUBE/1000.d0,' km'
+
+  ! flushes I/O buffer
+  call flush_IMAIN()
+
+  end subroutine sm_output_info

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/test_MPI_interfaces.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/test_MPI_interfaces.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/test_MPI_interfaces.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,575 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+
+  subroutine test_MPI_neighbours(iregion_code, &
+                                     num_interfaces,max_nibool_interfaces, &
+                                     my_neighbours,nibool_interfaces, &
+                                     ibool_interfaces)
+
+  use constants
+  use meshfem3D_par,only: NPROCTOT,myrank
+  use MPI_crust_mantle_par,only: NGLOB_CRUST_MANTLE
+  use MPI_outer_core_par,only: NGLOB_OUTER_CORE
+  use MPI_inner_core_par,only: NGLOB_INNER_CORE
+
+  implicit none
+
+  integer,intent(in) :: iregion_code
+  integer,intent(in) :: num_interfaces,max_nibool_interfaces
+  integer,dimension(num_interfaces),intent(in) :: my_neighbours,nibool_interfaces
+  integer,dimension(max_nibool_interfaces,num_interfaces),intent(in):: ibool_interfaces
+
+  ! local parameters
+  integer,dimension(:),allocatable :: dummy_i
+  integer,dimension(:,:),allocatable :: test_interfaces
+  integer,dimension(:,:),allocatable :: test_interfaces_nibool
+  integer :: ineighbour,iproc,inum,i,j,ier,ipoints,max_num,iglob
+  logical :: is_okay
+  logical,dimension(:),allocatable :: mask
+
+  ! debug output
+  !do iproc=0,NPROCTOT-1
+  !  if( myrank == iproc ) then
+  !    print*, 'mpi rank',myrank,'interfaces : ',num_interfaces,'region',iregion_code
+  !    do j=1,num_interfaces
+  !      print*, '  my_neighbours: ',my_neighbours(j),nibool_interfaces(j)
+  !    enddo
+  !    print*
+  !  endif
+  !  call sync_all()
+  !enddo
+
+  ! checks maximum number of interface points
+  if( max_nibool_interfaces == 0 .and. NPROCTOT > 1 ) then
+    print*,'test MPI: rank ',myrank,'max_nibool_interfaces is zero'
+    call exit_mpi(myrank,'error test max_nibool_interfaces zero')
+  endif
+
+  ! allocates global mask
+  select case(iregion_code)
+  case( IREGION_CRUST_MANTLE )
+    allocate(mask(NGLOB_CRUST_MANTLE))
+  case( IREGION_OUTER_CORE )
+    allocate(mask(NGLOB_OUTER_CORE))
+  case( IREGION_INNER_CORE )
+    allocate(mask(NGLOB_INNER_CORE))
+  case default
+    call exit_mpi(myrank,'error test MPI: iregion_code not recognized')
+  end select
+
+  ! test ibool entries
+  ! (must be non-zero and unique)
+  do i = 1,num_interfaces
+    ! number of interface points
+    if( nibool_interfaces(i) > max_nibool_interfaces ) then
+      print*,'error test MPI: rank',myrank,'nibool values:',nibool_interfaces(i),max_nibool_interfaces
+      call exit_mpi(myrank,'error test MPI: nibool exceeds max_nibool_interfaces')
+    endif
+
+    mask(:) = .false.
+
+    ! ibool entries
+    do j = 1,nibool_interfaces(i)
+      iglob = ibool_interfaces(j,i)
+
+      ! checks zero entry
+      if( iglob <= 0 ) then
+        print*,'error test MPI: rank ',myrank,'ibool value:',iglob,'interface:',i,'point:',j
+        call exit_mpi(myrank,'error test MPI: ibool values invalid')
+      endif
+
+      ! checks duplicate
+      if( j < nibool_interfaces(i) ) then
+        if( iglob == ibool_interfaces(j+1,i) ) then
+          print*,'error test MPI: rank',myrank,'ibool duplicate:',iglob,'interface:',i,'point:',j
+          call exit_mpi(myrank,'error test MPI: ibool duplicates')
+        endif
+      endif
+
+      ! checks if unique global value
+      if( .not. mask(iglob) ) then
+        mask(iglob) = .true.
+      else
+        print*,'error test MPI: rank',myrank,'ibool masked:',iglob,'interface:',i,'point:',j
+        call exit_mpi(myrank,'error test MPI: ibool masked already')
+      endif
+    enddo
+  enddo
+  deallocate(mask)
+
+  ! checks neighbors
+  ! gets maximum interfaces from all processes
+  call max_all_i(num_interfaces,max_num)
+
+  ! master gathers infos
+  if( myrank == 0 ) then
+    ! array for gathering infos
+    allocate(test_interfaces(max_num,0:NPROCTOT),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_interfaces')
+    test_interfaces = -1
+
+    allocate(test_interfaces_nibool(max_num,0:NPROCTOT),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating test_interfaces_nibool')
+    test_interfaces_nibool = 0
+
+    ! used to store number of interfaces per proc
+    allocate(dummy_i(0:NPROCTOT),stat=ier)
+    if( ier /= 0 ) call exit_mpi(myrank,'error allocating dummy_i for test interfaces')
+    dummy_i = 0
+
+    ! sets infos for master process
+    test_interfaces(1:num_interfaces,0) = my_neighbours(1:num_interfaces)
+    test_interfaces_nibool(1:num_interfaces,0) = nibool_interfaces(1:num_interfaces)
+    dummy_i(0) = num_interfaces
+
+    ! collects from other processes
+    do iproc=1,NPROCTOT-1
+      ! gets number of interfaces
+      !call MPI_RECV(inum,1,MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+      call recv_singlei(inum,iproc,itag)
+      dummy_i(iproc) = inum
+      if( inum > 0 ) then
+        !call MPI_RECV(test_interfaces(1:inum,iproc),inum, &
+        !              MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+        call recv_i(test_interfaces(1:inum,iproc),inum,iproc,itag)
+
+        !call MPI_RECV(test_interfaces_nibool(1:inum,iproc),inum, &
+        !              MPI_INTEGER,iproc,itag,MPI_COMM_WORLD,msg_status,ier)
+        call recv_i(test_interfaces_nibool(1:inum,iproc),inum,iproc,itag)
+      endif
+    enddo
+  else
+    ! sends infos to master process
+    !call MPI_SEND(num_interfaces,1,MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+    call send_singlei(num_interfaces,0,itag)
+    if( num_interfaces > 0 ) then
+      !call MPI_SEND(my_neighbours(1:num_interfaces),num_interfaces, &
+      !              MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+      call send_i(my_neighbours(1:num_interfaces),num_interfaces,0,itag)
+
+      !call MPI_SEND(nibool_interfaces(1:num_interfaces),num_interfaces, &
+      !              MPI_INTEGER,0,itag,MPI_COMM_WORLD,ier)
+      call send_i(nibool_interfaces(1:num_interfaces),num_interfaces,0,itag)
+
+    endif
+  endif
+  call sync_all()
+
+  ! checks if addressing is okay
+  if( myrank == 0 ) then
+    ! for each process
+    do iproc=0,NPROCTOT-1
+      ! loops over all neighbors
+      do i=1,dummy_i(iproc)
+        ! gets neighbour rank and number of points on interface with it
+        ineighbour = test_interfaces(i,iproc)
+        ipoints = test_interfaces_nibool(i,iproc)
+
+        ! checks values
+        if( ineighbour < 0 .or. ineighbour > NPROCTOT-1 ) then
+          print*,'error neighbour:',iproc,ineighbour
+          call exit_mpi(myrank,'error ineighbour')
+        endif
+        if( ipoints <= 0 ) then
+          print*,'error neighbour points:',iproc,ipoints
+          call exit_mpi(myrank,'error ineighbour points')
+        endif
+
+        ! looks up corresponding entry in neighbour array
+        is_okay = .false.
+        do j=1,dummy_i(ineighbour)
+          if( test_interfaces(j,ineighbour) == iproc ) then
+            ! checks if same number of interface points with this neighbour
+            if( test_interfaces_nibool(j,ineighbour) == ipoints ) then
+              is_okay = .true.
+            else
+              print*,'error ',iproc,'neighbour ',ineighbour,' points =',ipoints
+              print*,'  ineighbour has points = ',test_interfaces_nibool(j,ineighbour)
+              print*
+              call exit_mpi(myrank,'error ineighbour points differ')
+            endif
+            exit
+          endif
+        enddo
+        if( .not. is_okay ) then
+          print*,'error ',iproc,' neighbour not found: ',ineighbour
+          print*,'iproc ',iproc,' interfaces:'
+          print*,test_interfaces(1:dummy_i(iproc),iproc)
+          print*,'ineighbour ',ineighbour,' interfaces:'
+          print*,test_interfaces(1:dummy_i(ineighbour),ineighbour)
+          print*
+          call exit_mpi(myrank,'error ineighbour not found')
+        endif
+      enddo
+    enddo
+
+    ! user output
+    write(IMAIN,*) '  mpi addressing maximum interfaces:',maxval(dummy_i)
+    write(IMAIN,*) '  mpi addressing : all interfaces okay'
+    write(IMAIN,*)
+
+    deallocate(dummy_i)
+    deallocate(test_interfaces)
+  endif
+  call sync_all()
+
+  end subroutine test_MPI_neighbours
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine test_MPI_cm()
+
+  use meshfem3D_par,only: NPROCTOT,myrank
+  use create_MPI_interfaces_par
+  use MPI_crust_mantle_par
+
+  implicit none
+
+  ! local parameters
+  real(kind=CUSTOM_REAL),dimension(:,:),allocatable :: test_flag_vector
+  integer :: i,j,iglob,ier
+  integer :: inum,icount,ival
+  integer :: num_unique,num_max_valence
+  integer,dimension(:),allocatable :: valence
+
+  ! crust mantle
+  allocate(test_flag_vector(NDIM,NGLOB_CRUST_MANTLE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array test_flag etc.'
+  allocate(valence(NGLOB_CRUST_MANTLE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array valence'
+
+  ! points defined by interfaces
+  valence(:) = 0
+  test_flag_vector(:,:) = 0.0
+  do i=1,num_interfaces_crust_mantle
+    do j=1,nibool_interfaces_crust_mantle(i)
+      iglob = ibool_interfaces_crust_mantle(j,i)
+      ! sets flag on
+      test_flag_vector(1,iglob) = 1.0_CUSTOM_REAL
+      ! counts valence (occurrences)
+      valence(iglob) = valence(iglob) + 1
+    enddo
+  enddo
+  ! total number of  interface points
+  i = sum(nibool_interfaces_crust_mantle)
+  call sum_all_i(i,inum)
+
+  ! total number of unique points (some could be shared between different processes)
+  i = nint( sum(test_flag_vector) )
+  num_unique= i
+  call sum_all_i(i,icount)
+
+  ! maximum valence
+  i = maxval( valence(:) )
+  num_max_valence = i
+  call max_all_i(i,ival)
+
+  ! user output
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total MPI interface points : ',inum
+    write(IMAIN,*) '  unique MPI interface points: ',icount
+    write(IMAIN,*) '  maximum valence            : ',ival
+  endif
+
+  ! initializes for assembly
+  test_flag_vector(:,:) = 1.0_CUSTOM_REAL
+
+  ! adds contributions from different partitions to flag arrays
+  call assemble_MPI_vector(NPROCTOT,NGLOB_CRUST_MANTLE, &
+                      test_flag_vector, &
+                      num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
+                      nibool_interfaces_crust_mantle,ibool_interfaces_crust_mantle,&
+                      my_neighbours_crust_mantle)
+
+  ! removes initial flag
+  test_flag_vector(:,:) = test_flag_vector(:,:) - 1.0_CUSTOM_REAL
+
+  ! checks number of interface points
+  i = 0
+  do iglob=1,NGLOB_CRUST_MANTLE
+    ! only counts flags with MPI contributions
+    if( test_flag_vector(1,iglob) > 0.0 ) i = i + 1
+
+    ! checks valence
+    if( valence(iglob) /= nint(test_flag_vector(1,iglob)) .or. &
+       valence(iglob) /= nint(test_flag_vector(2,iglob)) .or. &
+       valence(iglob) /= nint(test_flag_vector(3,iglob)) ) then
+      print*,'error test MPI: rank',myrank,'valence:',valence(iglob),'flag:',test_flag_vector(:,:)
+      call exit_mpi(myrank,'error test MPI crust mantle valence')
+    endif
+  enddo
+
+  ! checks within slice
+  if( i /= num_unique ) then
+    print*,'error test crust mantle : rank',myrank,'unique mpi points:',i,num_unique
+    call exit_mpi(myrank,'error MPI assembly crust mantle')
+  endif
+
+  ! total number of assembly points
+  call sum_all_i(i,inum)
+
+  ! points defined by interfaces
+  if( myrank == 0 ) then
+    ! checks
+    if( inum /= icount ) then
+      print*,'error crust mantle : total mpi points:',myrank,'total: ',inum,icount
+      call exit_mpi(myrank,'error MPI assembly crust mantle')
+    endif
+
+    ! user output
+    write(IMAIN,*) '  total unique MPI interface points:',inum
+    write(IMAIN,*)
+  endif
+
+  deallocate(test_flag_vector)
+  deallocate(valence)
+
+  call sync_all()
+
+  end subroutine test_MPI_cm
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine test_MPI_oc()
+
+  use meshfem3D_par,only: NPROCTOT,myrank
+  use create_MPI_interfaces_par
+  use MPI_outer_core_par
+
+  implicit none
+
+  ! local parameters
+  real(kind=CUSTOM_REAL),dimension(:),allocatable :: test_flag
+  integer :: i,j,iglob,ier
+  integer :: inum,icount,ival
+  integer :: num_max_valence,num_unique
+  integer,dimension(:),allocatable :: valence
+
+  ! outer core
+  allocate(test_flag(NGLOB_OUTER_CORE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array test_flag etc.'
+  allocate(valence(NGLOB_OUTER_CORE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array valence'
+
+  ! points defined by interfaces
+  valence(:) = 0
+  test_flag = 0.0
+  do i=1,num_interfaces_outer_core
+    do j=1,nibool_interfaces_outer_core(i)
+      iglob = ibool_interfaces_outer_core(j,i)
+      test_flag(iglob) = 1.0_CUSTOM_REAL
+      ! counts valence (occurrences)
+      valence(iglob) = valence(iglob) + 1
+    enddo
+  enddo
+  i = sum(nibool_interfaces_outer_core)
+  call sum_all_i(i,inum)
+
+  i = nint( sum(test_flag) )
+  num_unique = i
+  call sum_all_i(i,icount)
+
+  ! maximum valence
+  i = maxval( valence(:) )
+  num_max_valence = i
+  call max_all_i(i,ival)
+
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total MPI interface points : ',inum
+    write(IMAIN,*) '  unique MPI interface points: ',icount
+    write(IMAIN,*) '  maximum valence            : ',ival
+  endif
+
+  ! initialized for assembly
+  test_flag(:) = 1.0_CUSTOM_REAL
+
+  ! adds contributions from different partitions to flag arrays
+  call assemble_MPI_scalar(NPROCTOT,NGLOB_OUTER_CORE, &
+                                test_flag, &
+                                num_interfaces_outer_core,max_nibool_interfaces_oc, &
+                                nibool_interfaces_outer_core,ibool_interfaces_outer_core,&
+                                my_neighbours_outer_core)
+
+
+  ! removes initial flag
+  test_flag(:) = test_flag(:) - 1.0_CUSTOM_REAL
+
+  ! checks number of interface points
+  i = 0
+  do iglob=1,NGLOB_OUTER_CORE
+    ! only counts flags with MPI contributions
+    if( test_flag(iglob) > 0.0 ) i = i + 1
+
+    ! checks valence
+    if( valence(iglob) /= nint(test_flag(iglob)) ) then
+      print*,'error test MPI: rank',myrank,'valence:',valence(iglob),'flag:',test_flag(iglob)
+      call exit_mpi(myrank,'error test outer core valence')
+    endif
+  enddo
+
+  ! checks within slice
+  if( i /= num_unique ) then
+    print*,'error test outer core : rank',myrank,'unique mpi points:',i,num_unique
+    call exit_mpi(myrank,'error MPI assembly outer core')
+  endif
+  call sum_all_i(i,inum)
+
+  ! output
+  if( myrank == 0 ) then
+    ! checks
+    if( inum /= icount ) then
+      print*,'error outer core : total mpi points:',myrank,'total: ',inum,icount
+      call exit_mpi(myrank,'error MPI assembly outer_core')
+    endif
+
+    ! user output
+    write(IMAIN,*) '  total assembled MPI interface points:',inum
+    write(IMAIN,*)
+  endif
+
+  deallocate(test_flag)
+  deallocate(valence)
+
+  call sync_all()
+
+  end subroutine test_MPI_oc
+
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine test_MPI_ic()
+
+  use meshfem3D_par,only: NPROCTOT,myrank
+  use create_MPI_interfaces_par
+  use MPI_inner_core_par
+
+  implicit none
+
+  ! local parameters
+  real(kind=CUSTOM_REAL),dimension(:,:),allocatable :: test_flag_vector
+  integer :: i,j,iglob,ier
+  integer :: inum,icount,ival
+  integer :: num_unique,num_max_valence
+  integer,dimension(:),allocatable :: valence
+
+  ! inner core
+  allocate(test_flag_vector(NDIM,NGLOB_INNER_CORE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array test_flag etc.'
+  allocate(valence(NGLOB_INNER_CORE),stat=ier)
+  if( ier /= 0 ) stop 'error allocating array valence'
+
+  ! points defined by interfaces
+  valence(:) = 0
+  test_flag_vector(:,:) = 0.0
+  do i=1,num_interfaces_inner_core
+    do j=1,nibool_interfaces_inner_core(i)
+      iglob = ibool_interfaces_inner_core(j,i)
+      ! sets flag on
+      test_flag_vector(1,iglob) = 1.0_CUSTOM_REAL
+      ! counts valence (occurrences)
+      valence(iglob) = valence(iglob) + 1
+    enddo
+  enddo
+  i = sum(nibool_interfaces_inner_core)
+  call sum_all_i(i,inum)
+
+  i = nint( sum(test_flag_vector) )
+  num_unique= i
+  call sum_all_i(i,icount)
+
+  ! maximum valence
+  i = maxval( valence(:) )
+  num_max_valence = i
+  call max_all_i(i,ival)
+
+  if( myrank == 0 ) then
+    write(IMAIN,*) '  total MPI interface points : ',inum
+    write(IMAIN,*) '  unique MPI interface points: ',icount
+    write(IMAIN,*) '  maximum valence            : ',ival
+  endif
+
+  ! initializes for assembly
+  test_flag_vector = 1.0_CUSTOM_REAL
+
+  ! adds contributions from different partitions to flag arrays
+  call assemble_MPI_vector(NPROCTOT,NGLOB_INNER_CORE, &
+                      test_flag_vector, &
+                      num_interfaces_inner_core,max_nibool_interfaces_ic, &
+                      nibool_interfaces_inner_core,ibool_interfaces_inner_core,&
+                      my_neighbours_inner_core)
+
+  ! removes initial flag
+  test_flag_vector(:,:) = test_flag_vector(:,:) - 1.0_CUSTOM_REAL
+
+  ! checks number of interface points
+  i = 0
+  do iglob=1,NGLOB_INNER_CORE
+    ! only counts flags with MPI contributions
+    if( test_flag_vector(1,iglob) > 0.0 ) i = i + 1
+
+    ! checks valence
+    if( valence(iglob) /= nint(test_flag_vector(1,iglob)) .or. &
+       valence(iglob) /= nint(test_flag_vector(2,iglob)) .or. &
+       valence(iglob) /= nint(test_flag_vector(3,iglob)) ) then
+      print*,'error test MPI: rank',myrank,'valence:',valence(iglob),'flag:',test_flag_vector(:,:)
+      call exit_mpi(myrank,'error test MPI inner core valence')
+    endif
+
+  enddo
+
+  ! checks within slice
+  if( i /= num_unique ) then
+    print*,'error test inner core : rank',myrank,'unique mpi points:',i,num_unique
+    call exit_mpi(myrank,'error MPI assembly inner core')
+  endif
+  call sum_all_i(i,inum)
+
+  if( myrank == 0 ) then
+    ! checks
+    if( inum /= icount ) then
+      print*,'error inner core : total mpi points:',myrank,'total: ',inum,icount
+      call exit_mpi(myrank,'error MPI assembly inner core')
+    endif
+
+    ! user output
+    write(IMAIN,*) '  total assembled MPI interface points:',inum
+    write(IMAIN,*)
+  endif
+
+  deallocate(test_flag_vector)
+  deallocate(valence)
+
+  call sync_all()
+
+  end subroutine test_MPI_ic

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_chunks_data_adios.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_chunks_data_adios.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_chunks_data_adios.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,1145 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+! create AVS or DX 2D data for the faces of the global chunks,
+! to be recombined in postprocessing
+
+module AVS_DX_global_chunks_mod
+
+  implicit none
+
+  type avs_dx_global_chunks_t
+    integer(kind=4) :: npoin, nspecface
+    real(kind=4), dimension(:), allocatable :: x_adios, y_adios, z_adios
+    integer(kind=4), dimension(:), allocatable :: idoubling, iglob1, iglob2, &
+        iglob3, iglob4
+    real, dimension(:), allocatable :: vmin, vmax 
+    real, dimension(:), allocatable :: dvp, dvs
+  endtype
+
+contains
+
+
+subroutine define_AVS_DX_global_chunks_data(adios_group, &
+    myrank,prname,nspec,iboun,ibool, &
+    idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+    npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN,iregion_code, &
+    group_size_inc, avs_dx_adios)
+  use mpi
+  use adios_write_mod
+
+  implicit none
+
+  include "constants.h"
+
+  integer(kind=8), intent(in) :: adios_group
+  integer(kind=8), intent(inout) :: group_size_inc
+
+  integer :: myrank
+
+  ! processor identification
+  character(len=150) :: prname
+
+  integer :: nspec
+
+  logical iboun(6,nspec)
+
+  integer,dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+  integer idoubling(nspec)
+
+  double precision,dimension(NGLLX,NGLLY,NGLLZ,nspec) :: xstore,ystore,zstore
+
+  integer :: npointot
+  ! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+  ! logical mask used to output global points only once
+  logical mask_ibool(npointot)
+
+  real(kind=CUSTOM_REAL) kappavstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) muvstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) rhostore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  ! for ellipticity
+  integer nspl
+  double precision rspl(NR),espl(NR),espl2(NR)
+
+  logical ELLIPTICITY,ISOTROPIC_3D_MANTLE
+
+  double precision RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771, &
+    R400,R120,R80,RMOHO,RMIDDLE_CRUST,ROCEAN
+
+  integer iregion_code
+
+  ! local parameters
+  integer ispec
+  integer i,j,k,np
+  integer, dimension(8) :: iglobval
+  integer npoin,numpoin,nspecface,ispecface
+
+  real(kind=CUSTOM_REAL) vmin,vmax
+
+  double precision r,rho,vp,vs,Qkappa,Qmu
+  double precision vpv,vph,vsv,vsh,eta_aniso
+  double precision x,y,z,theta,phi_dummy,cost,p20,ell,factor
+  real(kind=CUSTOM_REAL) dvp,dvs
+
+  type(avs_dx_global_chunks_t), intent(inout) :: avs_dx_adios
+
+  integer :: ierr
+
+  mask_ibool(:) = .false.
+
+  nspecface = 0
+
+  ! mark global AVS or DX points
+  do ispec=1,nspec
+  ! only if on face
+    if(iboun(1,ispec) .or. iboun(2,ispec) .or. &
+                iboun(3,ispec) .or. iboun(4,ispec)) then
+      iglobval(1)=ibool(1,1,1,ispec)
+      iglobval(2)=ibool(NGLLX,1,1,ispec)
+      iglobval(3)=ibool(NGLLX,NGLLY,1,ispec)
+      iglobval(4)=ibool(1,NGLLY,1,ispec)
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! face xi = xi_min
+      if(iboun(1,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(1)) = .true.
+        mask_ibool(iglobval(4)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+        mask_ibool(iglobval(5)) = .true.
+      endif
+
+      ! face xi = xi_max
+      if(iboun(2,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(2)) = .true.
+        mask_ibool(iglobval(3)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+      endif
+
+      ! face eta = eta_min
+      if(iboun(3,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(1)) = .true.
+        mask_ibool(iglobval(2)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+        mask_ibool(iglobval(5)) = .true.
+      endif
+
+      ! face eta = eta_max
+      if(iboun(4,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(4)) = .true.
+        mask_ibool(iglobval(3)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+      endif
+
+    endif
+  enddo
+
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  avs_dx_adios%npoin = npoin
+  avs_dx_adios%nspecface = nspecface
+
+  allocate(avs_dx_adios%x_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating x_adios.")
+  allocate(avs_dx_adios%y_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating y_adios.")
+  allocate(avs_dx_adios%z_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating z_adios.")
+  
+  allocate(avs_dx_adios%vmin(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating vmin.")
+  allocate(avs_dx_adios%vmax(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating vmax.")
+
+  ! Allocate temporary arrays for AVS/DX elements.
+  allocate(avs_dx_adios%idoubling(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating idoubling.")
+  allocate(avs_dx_adios%iglob1(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob1.")
+  allocate(avs_dx_adios%iglob2(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob2.")
+  allocate(avs_dx_adios%iglob3(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob3.")
+  allocate(avs_dx_adios%iglob4(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob4.")
+
+  !--- Variables for '...AVS_DXpointschunk.txt'
+  call define_adios_global_real_1d_array(adios_group, "points_chunks/x_value", &
+      npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "points_chunks/y_value", &
+      npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "points_chunks/z_value", &
+      npoin, group_size_inc)
+  !--- Variables for '...AVS_DXpointschunk_stability.txt'
+  call define_adios_global_real_1d_array(adios_group, &
+      "points_chunks_stability/vmin", npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "points_chunks_stability/vmax", npoin, group_size_inc)
+  !--- Variables for AVS_DXelementschunks.txt
+  call define_adios_global_real_1d_array(adios_group, & 
+      "elements_chunks/idoubling", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_chunks/num_ibool_AVS_DX_iglob1", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_chunks/num_ibool_AVS_DX_iglob2", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_chunks/num_ibool_AVS_DX_iglob3", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_chunks/num_ibool_AVS_DX_iglob4", nspecface, group_size_inc)
+
+  !--- Variables for AVS_DXelementschunks_dvp_dvs.txt
+  if(ISOTROPIC_3D_MANTLE) then 
+    allocate(avs_dx_adios%dvp(nspecface), stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, "Error allocating dvp.")
+    allocate(avs_dx_adios%dvs(nspecface), stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, "Error allocating dvs.")
+    call define_adios_global_real_1d_array(adios_group, &
+        "elements_chunks/dvp", dvp, group_size_inc)
+    call define_adios_global_real_1d_array(adios_group, &
+        "elements_chunks/dvp", dvs, group_size_inc)
+  endif
+
+end subroutine define_AVS_DX_global_chunks_data
+
+!===============================================================================
+subroutine prepare_AVS_DX_global_chunks_data_adios(myrank,prname,nspec, &
+    iboun,ibool, idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+    npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN,iregion_code, &
+    avs_dx_adios)
+
+  implicit none
+
+  include "constants.h"
+
+  integer :: myrank
+
+  ! processor identification
+  character(len=150) :: prname
+
+  integer :: nspec
+
+  logical iboun(6,nspec)
+
+  integer,dimension(NGLLX,NGLLY,NGLLZ,nspec) :: ibool
+  integer idoubling(nspec)
+
+  double precision,dimension(NGLLX,NGLLY,NGLLZ,nspec) :: xstore,ystore,zstore
+
+  integer :: npointot
+  ! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+  ! logical mask used to output global points only once
+  logical mask_ibool(npointot)
+
+  real(kind=CUSTOM_REAL) kappavstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) muvstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) rhostore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  ! for ellipticity
+  integer nspl
+  double precision rspl(NR),espl(NR),espl2(NR)
+
+  logical ELLIPTICITY,ISOTROPIC_3D_MANTLE
+
+  double precision RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771, &
+    R400,R120,R80,RMOHO,RMIDDLE_CRUST,ROCEAN
+
+  integer iregion_code
+
+  ! local parameters
+  integer ispec
+  integer i,j,k,np
+  integer, dimension(8) :: iglobval
+  integer npoin,numpoin,nspecface,ispecface
+
+  real(kind=CUSTOM_REAL) vmin,vmax
+
+  double precision r,rho,vp,vs,Qkappa,Qmu
+  double precision vpv,vph,vsv,vsh,eta_aniso
+  double precision x,y,z,theta,phi_dummy,cost,p20,ell,factor
+  real(kind=CUSTOM_REAL) dvp,dvs
+
+  type(avs_dx_global_chunks_t), intent(inout) :: avs_dx_adios ! out for adios_write
+
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  nspecface = 0
+
+  ! mark global AVS or DX points
+  do ispec=1,nspec
+  ! only if on face
+    if(iboun(1,ispec) .or. iboun(2,ispec) .or. &
+                iboun(3,ispec) .or. iboun(4,ispec)) then
+      iglobval(1)=ibool(1,1,1,ispec)
+      iglobval(2)=ibool(NGLLX,1,1,ispec)
+      iglobval(3)=ibool(NGLLX,NGLLY,1,ispec)
+      iglobval(4)=ibool(1,NGLLY,1,ispec)
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! face xi = xi_min
+      if(iboun(1,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(1)) = .true.
+        mask_ibool(iglobval(4)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+        mask_ibool(iglobval(5)) = .true.
+      endif
+
+      ! face xi = xi_max
+      if(iboun(2,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(2)) = .true.
+        mask_ibool(iglobval(3)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+      endif
+
+      ! face eta = eta_min
+      if(iboun(3,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(1)) = .true.
+        mask_ibool(iglobval(2)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+        mask_ibool(iglobval(5)) = .true.
+      endif
+
+      ! face eta = eta_max
+      if(iboun(4,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglobval(4)) = .true.
+        mask_ibool(iglobval(3)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+      endif
+
+    endif
+  enddo
+
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  ! number of points in AVS or DX file
+  write(10,*) npoin
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  ! output global AVS or DX points
+  numpoin = 0
+  do ispec=1,nspec
+  ! only if on face
+    if(iboun(1,ispec) .or. iboun(2,ispec) .or. &
+                iboun(3,ispec) .or. iboun(4,ispec)) then
+      iglobval(1)=ibool(1,1,1,ispec)
+      iglobval(2)=ibool(NGLLX,1,1,ispec)
+      iglobval(3)=ibool(NGLLX,NGLLY,1,ispec)
+      iglobval(4)=ibool(1,NGLLY,1,ispec)
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! face xi = xi_min
+      if(iboun(1,ispec)) then
+
+        if(.not. mask_ibool(iglobval(1))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(1)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,1,ispec))
+
+          vmax = sqrt((kappavstore(1,1,1,ispec) &
+              + 4.*muvstore(1,1,1,ispec)/3.)/rhostore(1,1,1,ispec))
+          vmin = sqrt(muvstore(1,1,1,ispec)/rhostore(1,1,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,1,1,ispec)**2 + ystore(1,1,1,ispec)**2 &
+                + zstore(1,1,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(4))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(4)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,1,ispec)) 
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,1,ispec))
+
+          vmax = sqrt((kappavstore(1,NGLLY,1,ispec) &
+              +4.*muvstore(1,NGLLY,1,ispec)/3.)/rhostore(1,NGLLY,1,ispec))
+          vmin = sqrt(muvstore(1,NGLLY,1,ispec)/rhostore(1,NGLLY,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,NGLLY,1,ispec)**2 + ystore(1,NGLLY,1,ispec)**2 &
+                + zstore(1,NGLLY,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(8))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(8)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(1,NGLLY,NGLLZ,ispec) &
+              +4.*muvstore(1,NGLLY,NGLLZ,ispec)/3.) &
+              / rhostore(1,NGLLY,NGLLZ,ispec))
+          vmin = sqrt(muvstore(1,NGLLY,NGLLZ,ispec) &
+              / rhostore(1,NGLLY,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,NGLLY,NGLLZ,ispec)**2 &
+                + ystore(1,NGLLY,NGLLZ,ispec)**2 &
+                + zstore(1,NGLLY,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(5))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(5)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(1,1,NGLLZ,ispec) &
+              +4.*muvstore(1,1,NGLLZ,ispec)/3.)/rhostore(1,1,NGLLZ,ispec))
+          vmin = sqrt(muvstore(1,1,NGLLZ,ispec)/rhostore(1,1,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,1,NGLLZ,ispec)**2 + ystore(1,1,NGLLZ,ispec)**2 &
+                + zstore(1,1,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        mask_ibool(iglobval(1)) = .true.
+        mask_ibool(iglobval(4)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+        mask_ibool(iglobval(5)) = .true.
+      endif
+
+      ! face xi = xi_max
+      if(iboun(2,ispec)) then
+
+        if(.not. mask_ibool(iglobval(2))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(2)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,1,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,1,1,ispec) & 
+              +4.*muvstore(NGLLX,1,1,ispec)/3.)/rhostore(NGLLX,1,1,ispec))
+          vmin = sqrt(muvstore(NGLLX,1,1,ispec)/rhostore(NGLLX,1,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,1,1,ispec)**2 + ystore(NGLLX,1,1,ispec)**2 &
+                + zstore(NGLLX,1,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(3))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(3)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,1,ispec)) 
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,1,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,NGLLY,1,ispec) &
+              + 4.*muvstore(NGLLX,NGLLY,1,ispec)/3.) &
+              / rhostore(NGLLX,NGLLY,1,ispec))
+          vmin = sqrt(muvstore(NGLLX,NGLLY,1,ispec) &
+              / rhostore(NGLLX,NGLLY,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,NGLLY,1,ispec)**2 &
+                + ystore(NGLLX,NGLLY,1,ispec)**2 &
+                + zstore(NGLLX,NGLLY,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(7))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(7)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,NGLLY,NGLLZ,ispec) &
+              + 4.*muvstore(NGLLX,NGLLY,NGLLZ,ispec)/3.) &
+              / rhostore(NGLLX,NGLLY,NGLLZ,ispec))
+          vmin = sqrt(muvstore(NGLLX,NGLLY,NGLLZ,ispec) &
+              / rhostore(NGLLX,NGLLY,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,NGLLY,NGLLZ,ispec)**2 &
+                + ystore(NGLLX,NGLLY,NGLLZ,ispec)**2 &
+                + zstore(NGLLX,NGLLY,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(6))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(6)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,1,NGLLZ,ispec) &
+              + 4.*muvstore(NGLLX,1,NGLLZ,ispec)/3.) &
+              / rhostore(NGLLX,1,NGLLZ,ispec))
+          vmin = sqrt(muvstore(NGLLX,1,NGLLZ,ispec) &
+              / rhostore(NGLLX,1,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,1,NGLLZ,ispec)**2 &
+                + ystore(NGLLX,1,NGLLZ,ispec)**2 &
+                + zstore(NGLLX,1,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        mask_ibool(iglobval(2)) = .true.
+        mask_ibool(iglobval(3)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+      endif
+
+      ! face eta = eta_min
+      if(iboun(3,ispec)) then
+
+        if(.not. mask_ibool(iglobval(1))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(1)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,1,ispec))
+
+          vmax = sqrt((kappavstore(1,1,1,ispec) &
+              + 4.*muvstore(1,1,1,ispec)/3.)/rhostore(1,1,1,ispec))
+          vmin = sqrt(muvstore(1,1,1,ispec)/rhostore(1,1,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,1,1,ispec)**2 &
+                + ystore(1,1,1,ispec)**2 + zstore(1,1,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(2))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(2)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,1,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,1,1,ispec) &
+              +4.*muvstore(NGLLX,1,1,ispec)/3.)/rhostore(NGLLX,1,1,ispec))
+          vmin = sqrt(muvstore(NGLLX,1,1,ispec)/rhostore(NGLLX,1,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,1,1,ispec)**2 &
+                + ystore(NGLLX,1,1,ispec)**2 + zstore(NGLLX,1,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin = vmin
+          avs_dx_adios%vmax = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(6))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(6)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,1,NGLLZ,ispec) &
+              + 4.*muvstore(NGLLX,1,NGLLZ,ispec)/3.) &
+              / rhostore(NGLLX,1,NGLLZ,ispec))
+          vmin = sqrt(muvstore(NGLLX,1,NGLLZ,ispec) &
+              / rhostore(NGLLX,1,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,1,NGLLZ,ispec)**2 &
+                + ystore(NGLLX,1,NGLLZ,ispec)**2 &
+                + zstore(NGLLX,1,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(5))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(5)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(1,1,NGLLZ,ispec) &
+              + 4.*muvstore(1,1,NGLLZ,ispec)/3.) &
+              / rhostore(1,1,NGLLZ,ispec))
+          vmin = sqrt(muvstore(1,1,NGLLZ,ispec)/rhostore(1,1,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,1,NGLLZ,ispec)**2 &
+                + ystore(1,1,NGLLZ,ispec)**2 + zstore(1,1,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        mask_ibool(iglobval(1)) = .true.
+        mask_ibool(iglobval(2)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+        mask_ibool(iglobval(5)) = .true.
+      endif
+
+      ! face eta = eta_max
+      if(iboun(4,ispec)) then
+
+        if(.not. mask_ibool(iglobval(4))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(4)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,1,ispec))
+
+          vmax = sqrt((kappavstore(1,NGLLY,1,ispec) &
+              + 4.*muvstore(1,NGLLY,1,ispec)/3.)/rhostore(1,NGLLY,1,ispec))
+          vmin = sqrt(muvstore(1,NGLLY,1,ispec)/rhostore(1,NGLLY,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,NGLLY,1,ispec)**2 &
+                + ystore(1,NGLLY,1,ispec)**2 + zstore(1,NGLLY,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(3))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(3)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,1,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,NGLLY,1,ispec) &
+              + 4.*muvstore(NGLLX,NGLLY,1,ispec)/3.) &
+              / rhostore(NGLLX,NGLLY,1,ispec))
+          vmin = sqrt(muvstore(NGLLX,NGLLY,1,ispec) &
+              / rhostore(NGLLX,NGLLY,1,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,NGLLY,1,ispec)**2 &
+                + ystore(NGLLX,NGLLY,1,ispec)**2 &
+                + zstore(NGLLX,NGLLY,1,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+
+          if(vmin == 0.0) vmin=vmax
+
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(7))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(7)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(NGLLX,NGLLY,NGLLZ,ispec) &
+              + 4.*muvstore(NGLLX,NGLLY,NGLLZ,ispec)/3.) &
+              / rhostore(NGLLX,NGLLY,NGLLZ,ispec))
+          vmin = sqrt(muvstore(NGLLX,NGLLY,NGLLZ,ispec) &
+              / rhostore(NGLLX,NGLLY,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(NGLLX,NGLLY,NGLLZ,ispec)**2 &
+                + ystore(NGLLX,NGLLY,NGLLZ,ispec)**2 &
+                + zstore(NGLLX,NGLLY,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+          vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        if(.not. mask_ibool(iglobval(8))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(8)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,NGLLZ,ispec))
+
+          vmax = sqrt((kappavstore(1,NGLLY,NGLLZ,ispec) &
+              + 4.*muvstore(1,NGLLY,NGLLZ,ispec)/3.) &
+              / rhostore(1,NGLLY,NGLLZ,ispec))
+          vmin = sqrt(muvstore(1,NGLLY,NGLLZ,ispec) & 
+              / rhostore(1,NGLLY,NGLLZ,ispec))
+          ! particular case of the outer core (muvstore contains 1/rho)
+          if(idoubling(ispec) == IFLAG_OUTER_CORE_NORMAL) then
+            r = dsqrt(xstore(1,NGLLY,NGLLZ,ispec)**2 &
+                + ystore(1,NGLLY,NGLLZ,ispec)**2 &
+                + zstore(1,NGLLY,NGLLZ,ispec)**2)
+            call prem_display_outer_core(myrank,r,rho,vp,vs, &
+                Qkappa,Qmu,idoubling(ispec))
+            vmax = vp
+            vmin = vp
+          endif
+          if(vmin == 0.0) vmin=vmax
+
+          avs_dx_adios%vmin(numpoin) = vmin
+          avs_dx_adios%vmax(numpoin) = vmax
+        endif
+
+        mask_ibool(iglobval(4)) = .true.
+        mask_ibool(iglobval(3)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+      endif
+
+    endif
+  enddo
+
+! check that number of global points output is okay
+  if(numpoin /= npoin) &
+    call exit_MPI(myrank,&
+        'incorrect number of global points in AVS or DX file creation')
+
+  ! output global AVS or DX elements
+  ispecface = 0
+  do ispec=1,nspec
+    ! only if on face
+    if(iboun(1,ispec) .or. iboun(2,ispec) .or. &
+        iboun(3,ispec) .or. iboun(4,ispec)) then
+      iglobval(1)=ibool(1,1,1,ispec)
+      iglobval(2)=ibool(NGLLX,1,1,ispec)
+      iglobval(3)=ibool(NGLLX,NGLLY,1,ispec)
+      iglobval(4)=ibool(1,NGLLY,1,ispec)
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! include lateral variations if needed
+
+      if(ISOTROPIC_3D_MANTLE) then
+        !   pick a point within the element and get its radius
+        r=dsqrt(xstore(2,2,2,ispec)**2+ystore(2,2,2,ispec)**2 &
+            +zstore(2,2,2,ispec)**2)
+
+        if(r > RCMB/R_EARTH .and. r < R_UNIT_SPHERE) then
+          !     average over the element
+          dvp = 0.0
+          dvs = 0.0
+          np =0
+          do k=2,NGLLZ-1
+            do j=2,NGLLY-1
+              do i=2,NGLLX-1
+                np=np+1
+                x=xstore(i,j,k,ispec)
+                y=ystore(i,j,k,ispec)
+                z=zstore(i,j,k,ispec)
+                r=dsqrt(x*x+y*y+z*z)
+                ! take out ellipticity
+                if(ELLIPTICITY) then
+                  call xyz_2_rthetaphi_dble(x,y,z,r,theta,phi_dummy)
+                  cost=dcos(theta)
+                  p20=0.5d0*(3.0d0*cost*cost-1.0d0)
+                  call spline_evaluation(rspl,espl,espl2,nspl,r,ell)
+                  factor=ONE-(TWO/3.0d0)*ell*p20
+                  r=r/factor
+                endif
+
+
+                ! get reference model values: rho,vpv,vph,vsv,vsh and eta_aniso
+                call meshfem3D_models_get1D_val(myrank,iregion_code, &
+                    idoubling(ispec), &
+                    r,rho,vpv,vph,vsv,vsh,eta_aniso, &
+                    Qkappa,Qmu,RICB,RCMB, &
+                    RTOPDDOUBLEPRIME,R80,R120,R220,R400,R600,R670,R771, &
+                    RMOHO,RMIDDLE_CRUST,ROCEAN)
+
+                ! calculates isotropic values
+                vp = sqrt(((8.d0+4.d0*eta_aniso)*vph*vph + 3.d0*vpv*vpv &
+                        + (8.d0 - 8.d0*eta_aniso)*vsv*vsv)/15.d0)
+                vs = sqrt(((1.d0-2.d0*eta_aniso)*vph*vph + vpv*vpv &
+                        + 5.d0*vsh*vsh + (6.d0+4.d0*eta_aniso)*vsv*vsv)/15.d0)
+
+                if( abs(rhostore(i,j,k,ispec))< 1.e-20 ) then
+                  print*,' attention: rhostore close to zero', &
+                      rhostore(i,j,k,ispec),r,i,j,k,ispec
+                  dvp = 0.0
+                  dvs = 0.0
+                else if( abs(sngl(vp))< 1.e-20 ) then
+                  print*,' attention: vp close to zero', &
+                      sngl(vp),r,i,j,k,ispec
+                  dvp = 0.0
+                else if( abs(sngl(vs))< 1.e-20 ) then
+                  print*,' attention: vs close to zero', &
+                      sngl(vs),r,i,j,k,ispec
+                  dvs = 0.0
+                else
+                  dvp = dvp + (sqrt((kappavstore(i,j,k,ispec) &
+                      +4.*muvstore(i,j,k,ispec)/3.)/rhostore(i,j,k,ispec)) &
+                      - sngl(vp))/sngl(vp)
+                  dvs = dvs + (sqrt(muvstore(i,j,k,ispec)/rhostore(i,j,k,ispec)) &
+                      - sngl(vs))/sngl(vs)
+                endif
+
+              enddo
+            enddo
+          enddo
+          dvp = dvp / np
+          dvs = dvs / np
+        else
+          dvp = 0.0
+          dvs = 0.0
+        endif
+      endif
+
+      ! face xi = xi_min
+      if(iboun(1,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglobval(1))
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglobval(4))
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglobval(8))
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglobval(5))
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+      ! face xi = xi_max
+      if(iboun(2,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface)= num_ibool_AVS_DX(iglobval(2))
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglobval(3))
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglobval(7))
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglobval(6))
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+    ! face eta = eta_min
+      if(iboun(3,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglobval(1))
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglobval(2))
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglobval(6))
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglobval(5))
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+      ! face eta = eta_max
+      if(iboun(4,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglobval(4))
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglobval(3))
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglobval(7))
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglobval(8))
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+    endif
+  enddo
+
+  ! check that number of surface elements output is okay
+  if(ispecface /= nspecface) &
+    call exit_MPI(myrank, &
+        'incorrect number of surface elements in AVS or DX file creation')
+
+end subroutine prepare_AVS_DX_global_chunks_data_adios
+
+!===============================================================================
+subroutine write_AVS_DX_global_chunks_data_adios(adios_handle, myrank, &
+    sizeprocs, avs_dx_adios, ISOTROPIC_3D_MANTLE)
+  use mpi
+  use adios_write_mod
+  implicit none
+  !--- Arguments
+  integer(kind=8), intent(in) :: adios_handle
+  integer, intent(in) :: myrank, sizeprocs
+  type(avs_dx_global_chunks_t), intent(inout) :: avs_dx_adios ! out for adios_write
+  logical ISOTROPIC_3D_MANTLE
+  !--- Variables
+  integer :: npoin, nspec
+  integer :: ierr
+  
+  npoin = avs_dx_adios%npoin
+  nspec = avs_dx_adios%nspecface
+
+  call adios_set_path(adios_handle, "points_chunks/x_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%x_adios, ierr)
+
+  call adios_set_path(adios_handle, "points_chunks/y_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%y_adios, ierr)
+
+  call adios_set_path(adios_handle, "points_chunks/z_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%z_adios, ierr)
+
+
+  call adios_set_path(adios_handle, "points_chunks/vmin", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%vmin, ierr)
+
+  call adios_set_path(adios_handle, "points_chunks/vmax", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%vmax, ierr)
+
+
+  call adios_set_path(adios_handle, "elements_chunks/idoubling", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%idoubling, ierr)
+
+
+  call adios_set_path(adios_handle, &
+      "elements_chunks/num_ibool_AVS_DX_iglob1", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob1, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_chunks/num_ibool_AVS_DX_iglob2", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob2, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_chunks/num_ibool_AVS_DX_iglob3", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob3, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_chunks/num_ibool_AVS_DX_iglob4", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob4, ierr)
+  
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    call adios_set_path(adios_handle, "elements_chunks/dvp", ierr)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        nspec, sizeprocs)
+    call adios_write(adios_handle, "array", avs_dx_adios%dvp, ierr)
+    call adios_set_path(adios_handle, "elements_chunks/dvs", ierr)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        nspec, sizeprocs)
+    call adios_write(adios_handle, "array", avs_dx_adios%dvs, ierr)
+  endif
+
+end subroutine write_AVS_DX_global_chunks_data_adios
+
+!===============================================================================
+subroutine free_AVS_DX_global_chunks_data_adios(myrank, avs_dx_adios, &
+    ISOTROPIC_3D_MANTLE)
+  implicit none
+  !--- Arguments
+  integer, intent(in) :: myrank
+  type(avs_dx_global_chunks_t), intent(inout) :: avs_dx_adios
+  logical ISOTROPIC_3D_MANTLE
+  !--- Variables
+  !--- Variables
+  integer :: ierr
+
+  deallocate(avs_dx_adios%x_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating x_adios.")
+  deallocate(avs_dx_adios%y_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating y_adios.")
+  deallocate(avs_dx_adios%z_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating z_adios.")
+
+  deallocate(avs_dx_adios%vmin, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating vmin.")
+  deallocate(avs_dx_adios%vmax, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating vmax.")
+
+  deallocate(avs_dx_adios%idoubling, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob1, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob2, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob2.")
+  deallocate(avs_dx_adios%iglob3, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob3.")
+  deallocate(avs_dx_adios%iglob4, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob4.")
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    deallocate(avs_dx_adios%dvp, stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, &
+        "Error deallocating dvp.")
+    deallocate(avs_dx_adios%dvs, stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, &
+        "Error deallocating dvs.")
+  endif
+
+  avs_dx_adios%npoin = 0 
+  avs_dx_adios%nspecface = 0
+end subroutine free_AVS_DX_global_chunks_data_adios
+
+end module

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_data_adios.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_data_adios.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_data_adios.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,470 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+!-------------------------------------------------------------------------------
+!> \file write_AVS_DX_global_adios.f90
+!! \brief Define a module to hold global AVS/DX data (points and elements) and
+!!        provides function to deal with them.
+!! \author MPBL      
+!-------------------------------------------------------------------------------
+
+!===============================================================================
+!> AVS_DX_global_mod module. Hold and write to ADIOS file global data (points
+!! and elements).
+module AVS_DX_global_mod
+
+  implicit none
+
+  ! ADIOS Arrays to write down
+  type avs_dx_global_t
+    integer(kind=4) :: npoin, nspec
+    real(kind=4), dimension(:), allocatable :: x_adios, y_adios, z_adios
+    integer(kind=4), dimension(:), allocatable :: idoubling, iglob1, iglob2, &
+        iglob3, iglob4, iglob5, iglob6, iglob7, iglob8
+  endtype
+
+contains
+
+!===============================================================================
+!> Allocate the structure that hold data to be written; initialize adios vars.
+!! \param adios_group ADIOS group where the variables belong
+!! \param group_size_inc The size of the ADIOS group to increment
+!! \param avs_dx_adios The structure holding the data to be allocated
+subroutine define_AVS_DX_global_data_adios(adios_group, myrank, nspec, ibool, &
+    npointot, mask_ibool, group_size_inc, avs_dx_adios)
+  use mpi
+  use adios_write_mod
+  implicit none
+  include "constants.h"
+  !--- Arguments -------------------------------------------
+  integer(kind=8), intent(in) :: adios_group
+  integer(kind=4), intent(in) :: nspec, npointot, myrank
+  integer(kind=4), intent(in) :: ibool(NGLLX,NGLLY,NGLLZ,nspec)
+  logical, intent(inout) :: mask_ibool(npointot)
+  integer(kind=8), intent(inout) :: group_size_inc
+  type(avs_dx_global_t), intent(inout) :: avs_dx_adios
+  !--- Variables -------------------------------------------
+  integer ispec, npoin, ierr
+  integer iglob1,iglob2,iglob3,iglob4,iglob5,iglob6,iglob7,iglob8
+
+  mask_ibool(:) = .false.
+
+  ! mark global AVS or DX points
+  do ispec=1,nspec
+    iglob1=ibool(1,1,1,ispec)
+    iglob2=ibool(NGLLX,1,1,ispec)
+    iglob3=ibool(NGLLX,NGLLY,1,ispec)
+    iglob4=ibool(1,NGLLY,1,ispec)
+    iglob5=ibool(1,1,NGLLZ,ispec)
+    iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+    iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+    iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+    mask_ibool(iglob1) = .true.
+    mask_ibool(iglob2) = .true.
+    mask_ibool(iglob3) = .true.
+    mask_ibool(iglob4) = .true.
+    mask_ibool(iglob5) = .true.
+    mask_ibool(iglob6) = .true.
+    mask_ibool(iglob7) = .true.
+    mask_ibool(iglob8) = .true.
+  enddo
+
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  avs_dx_adios%npoin = npoin
+  avs_dx_adios%nspec = nspec
+  ! Allocate temporary arrays for AVS/DX points
+  allocate(avs_dx_adios%x_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating x_adios.")
+  allocate(avs_dx_adios%y_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating y_adios.")
+  allocate(avs_dx_adios%z_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating z_adios.")
+
+  ! Allocate temporary arrays for AVS/DX elements.
+  allocate(avs_dx_adios%idoubling(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating idoubling.")
+  allocate(avs_dx_adios%iglob1(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob1.")
+  allocate(avs_dx_adios%iglob2(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob2.")
+  allocate(avs_dx_adios%iglob3(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob3.")
+  allocate(avs_dx_adios%iglob4(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob4.")
+  allocate(avs_dx_adios%iglob5(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob5.")
+  allocate(avs_dx_adios%iglob6(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob6.")
+  allocate(avs_dx_adios%iglob7(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob7.")
+  allocate(avs_dx_adios%iglob8(nspec), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob8.")
+
+  !--- Variables for '...AVS_DXpoints.txt'
+  call define_adios_global_real_1d_array(adios_group, "points/x_value", &
+      npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "points/y_value", &
+      npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "points/z_value", &
+      npoin, group_size_inc)
+  !--- Variables for AVS_DXelements.txt
+  call define_adios_global_real_1d_array(adios_group, "elements/idoubling", &
+      nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob1", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob2", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob3", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob4", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob5", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob6", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob7", nspec, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements/num_ibool_AVS_DX_iglob8", nspec, group_size_inc)
+
+end subroutine define_AVS_DX_global_data_adios
+
+
+!===============================================================================
+!> Prepare the global AVS/DX data to be written; fill the structure.
+!! \param adios_handle The handle to the ADIOS file to be written.
+!! \param myrank The MPI rank of the current process.
+!! \param avs_dx_adios The structure to be filled.
+!!
+!! Create AVS or DX 3D data for the slice, to be recombined in postprocessing.
+subroutine prepare_AVS_DX_global_data_adios(adios_handle, myrank, &
+    nspec, ibool, idoubling, xstore, ystore, zstore, num_ibool_AVS_DX, &
+    mask_ibool, npointot, avs_dx_adios)
+  use mpi
+  use adios_write_mod
+
+  implicit none
+
+  include "constants.h"
+
+  integer(kind=8), intent(in)    :: adios_handle
+  integer nspec,myrank
+  integer ibool(NGLLX,NGLLY,NGLLZ,nspec)
+
+  integer idoubling(nspec)
+
+  double precision xstore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision ystore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision zstore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  ! logical mask used to output global points only once
+  integer npointot
+  logical mask_ibool(npointot)
+
+  ! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+
+  integer ispec
+  integer iglob1,iglob2,iglob3,iglob4,iglob5,iglob6,iglob7,iglob8
+  integer npoin,numpoin
+  
+  type(avs_dx_global_t), intent(inout) :: avs_dx_adios
+
+  integer :: ierr
+
+! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+! mark global AVS or DX points
+  do ispec=1,nspec
+    iglob1=ibool(1,1,1,ispec)
+    iglob2=ibool(NGLLX,1,1,ispec)
+    iglob3=ibool(NGLLX,NGLLY,1,ispec)
+    iglob4=ibool(1,NGLLY,1,ispec)
+    iglob5=ibool(1,1,NGLLZ,ispec)
+    iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+    iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+    iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+    mask_ibool(iglob1) = .true.
+    mask_ibool(iglob2) = .true.
+    mask_ibool(iglob3) = .true.
+    mask_ibool(iglob4) = .true.
+    mask_ibool(iglob5) = .true.
+    mask_ibool(iglob6) = .true.
+    mask_ibool(iglob7) = .true.
+    mask_ibool(iglob8) = .true.
+  enddo
+
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  ! fill the structure with global AVS or DX points
+  numpoin = 0
+  do ispec=1,nspec
+    iglob1=ibool(1,1,1,ispec)
+    iglob2=ibool(NGLLX,1,1,ispec)
+    iglob3=ibool(NGLLX,NGLLY,1,ispec)
+    iglob4=ibool(1,NGLLY,1,ispec)
+    iglob5=ibool(1,1,NGLLZ,ispec)
+    iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+    iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+    iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+    if(.not. mask_ibool(iglob1)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob1) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,1,ispec))
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,1,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,1,ispec))
+    endif
+    if(.not. mask_ibool(iglob2)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob2) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,1,ispec))
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,1,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,1,ispec))
+    endif
+    if(.not. mask_ibool(iglob3)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob3) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,1,ispec)) 
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,1,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,1,ispec))
+    endif
+    if(.not. mask_ibool(iglob4)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob4) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,1,ispec)) 
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,1,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,1,ispec))
+    endif
+    if(.not. mask_ibool(iglob5)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob5) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,NGLLZ,ispec))
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,NGLLZ,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,NGLLZ,ispec))
+    endif
+    if(.not. mask_ibool(iglob6)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob6) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,NGLLZ,ispec))
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,NGLLZ,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,NGLLZ,ispec))
+    endif
+    if(.not. mask_ibool(iglob7)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob7) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,NGLLZ,ispec))
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,NGLLZ,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,NGLLZ,ispec))
+    endif
+    if(.not. mask_ibool(iglob8)) then
+      numpoin = numpoin + 1
+      num_ibool_AVS_DX(iglob8) = numpoin
+      avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,NGLLZ,ispec))
+      avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,NGLLZ,ispec))
+      avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,NGLLZ,ispec))
+    endif
+    mask_ibool(iglob1) = .true.
+    mask_ibool(iglob2) = .true.
+    mask_ibool(iglob3) = .true.
+    mask_ibool(iglob4) = .true.
+    mask_ibool(iglob5) = .true.
+    mask_ibool(iglob6) = .true.
+    mask_ibool(iglob7) = .true.
+    mask_ibool(iglob8) = .true.
+  enddo
+
+  ! check that number of global points output is okay
+  if(numpoin /= npoin) &
+    call exit_MPI(myrank, &
+        'incorrect number of global points in AVS or DX file creation')
+
+  ! AVS or DX elements
+  do ispec=1,nspec
+    iglob1=ibool(1,1,1,ispec)
+    iglob2=ibool(NGLLX,1,1,ispec)
+    iglob3=ibool(NGLLX,NGLLY,1,ispec)
+    iglob4=ibool(1,NGLLY,1,ispec)
+    iglob5=ibool(1,1,NGLLZ,ispec)
+    iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+    iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+    iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+
+    avs_dx_adios%iglob1 = num_ibool_AVS_DX(iglob1)
+    avs_dx_adios%iglob2 = num_ibool_AVS_DX(iglob2)
+    avs_dx_adios%iglob3 = num_ibool_AVS_DX(iglob3)
+    avs_dx_adios%iglob4 = num_ibool_AVS_DX(iglob4)
+    avs_dx_adios%iglob5 = num_ibool_AVS_DX(iglob5)
+    avs_dx_adios%iglob6 = num_ibool_AVS_DX(iglob6)
+    avs_dx_adios%iglob7 = num_ibool_AVS_DX(iglob7)
+    avs_dx_adios%iglob8 = num_ibool_AVS_DX(iglob8)
+  enddo
+  avs_dx_adios%idoubling = idoubling
+end subroutine prepare_AVS_DX_global_data_adios
+
+!===============================================================================
+!> Schedule write to ADIOS file for global AVS/DX data
+!! \param adios_handle The handle to the ADIOS file we want to write into
+!! \param nspec Number of spectral elements
+!! \avs_dx_adios Structure with the data that have to be wrtten
+subroutine write_AVS_DX_global_data_adios(adios_handle, myrank, &
+    sizeprocs, avs_dx_adios)
+  use mpi
+  use adios_write_mod
+  implicit none
+  !--- Arguments
+  integer(kind=8), intent(in) :: adios_handle
+  integer, intent(in) :: myrank, sizeprocs
+  type(avs_dx_global_t), intent(inout) :: avs_dx_adios ! out for adios_write
+  !--- Variables
+  integer :: npoin, nspec
+  integer :: ierr
+  
+  npoin = avs_dx_adios%npoin
+  nspec = avs_dx_adios%nspec
+
+  call adios_set_path(adios_handle, "points/x_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%x_adios, ierr)
+
+  call adios_set_path(adios_handle, "points/y_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%y_adios, ierr)
+
+  call adios_set_path(adios_handle, "points/z_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%z_adios, ierr)
+
+
+  call adios_set_path(adios_handle, "elements/idoubling", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%idoubling, ierr)
+
+
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob1", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob1, ierr)
+  
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob2", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob2, ierr)
+  
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob3", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob3, ierr)
+  
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob4", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob4, ierr)
+  
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob5", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob5, ierr)
+  
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob6", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob6, ierr)
+
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob7", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob7, ierr)
+
+  call adios_set_path(adios_handle, "elements/num_ibool_AVS_DX_iglob1", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob8, ierr)
+end subroutine write_AVS_DX_global_data_adios
+
+!===============================================================================
+!> Free temporary structure filled to write AVS/DX global variable to file.
+!! \param myrank The MPI rank of the process
+!! \param avs_dx_adios The structure holding AVS/DX information
+subroutine free_AVS_DX_global_data_adios(myrank, avs_dx_adios)
+  implicit none
+  !--- Arguments
+  integer, intent(in) :: myrank
+  type(avs_dx_global_t), intent(inout) :: avs_dx_adios
+  !--- Variables
+  integer :: ierr
+
+  deallocate(avs_dx_adios%x_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating x_adios.")
+  deallocate(avs_dx_adios%y_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating y_adios.")
+  deallocate(avs_dx_adios%z_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating z_adios.")
+
+  deallocate(avs_dx_adios%idoubling, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob1, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob2, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob2.")
+  deallocate(avs_dx_adios%iglob3, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob3.")
+  deallocate(avs_dx_adios%iglob4, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob4.")
+  deallocate(avs_dx_adios%iglob5, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob5.")
+  deallocate(avs_dx_adios%iglob6, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob6.")
+  deallocate(avs_dx_adios%iglob7, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob7.")
+  deallocate(avs_dx_adios%iglob8, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob8.")
+
+  avs_dx_adios%npoin = 0 
+  avs_dx_adios%nspec = 0
+end subroutine free_AVS_DX_global_data_adios
+
+end module AVS_DX_global_mod

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_faces_data_adios.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_faces_data_adios.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_global_faces_data_adios.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,825 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+!-------------------------------------------------------------------------------
+!> \file write_AVS_DX_global_faces_data_adios.f90
+!! \brief create AVS or DX 2D data for the faces of the slice,
+!!        to be recombined in postprocessing
+!! \author MPBL
+!-------------------------------------------------------------------------------
+
+!===============================================================================
+module AVS_DX_global_faces_mod
+
+  implicit none
+
+  type avs_dx_global_faces_t
+    integer(kind=4) :: npoin, nspecface
+    real(kind=4), dimension(:), allocatable :: x_adios, y_adios, z_adios
+    integer(kind=4), dimension(:), allocatable :: idoubling, iglob1, iglob2, &
+        iglob3, iglob4
+    real, dimension(:), allocatable :: dvp, dvs
+  endtype
+
+contains
+
+!===============================================================================
+subroutine define_AVS_DX_global_faces_data_adios (adios_group, &
+    myrank, prname, nspec, iMPIcut_xi,iMPIcut_eta, &
+    ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+    npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN,iregion_code, &
+    group_size_inc, avs_dx_adios)
+  use mpi
+  use adios_write_mod
+
+  implicit none
+  include "constants.h"
+
+  integer(kind=8), intent(in) :: adios_group
+  integer(kind=8), intent(inout) :: group_size_inc
+
+  integer nspec,myrank
+  integer ibool(NGLLX,NGLLY,NGLLZ,nspec)
+
+  integer idoubling(nspec)
+
+  logical ELLIPTICITY,ISOTROPIC_3D_MANTLE
+
+  logical iMPIcut_xi(2,nspec)
+  logical iMPIcut_eta(2,nspec)
+
+  double precision RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771, &
+      R400,R120,R80,RMOHO,RMIDDLE_CRUST,ROCEAN
+
+  double precision xstore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision ystore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision zstore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  real(kind=CUSTOM_REAL) kappavstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) muvstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) rhostore(NGLLX,NGLLY,NGLLZ,nspec)
+
+! logical mask used to output global points only once
+  integer npointot
+  logical mask_ibool(npointot)
+
+! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+
+  integer ispec
+  integer i,j,k,np
+  integer iglob1,iglob2,iglob3,iglob4,iglob5,iglob6,iglob7,iglob8
+  integer npoin,numpoin,nspecface,ispecface
+
+  double precision r,rho,vp,vs,Qkappa,Qmu
+  double precision vpv,vph,vsv,vsh,eta_aniso
+  double precision x,y,z,theta,phi_dummy,cost,p20,ell,factor
+  real(kind=CUSTOM_REAL) dvp,dvs
+
+! for ellipticity
+  integer nspl
+  double precision rspl(NR),espl(NR),espl2(NR)
+
+! processor identification
+  character(len=150) prname
+
+  integer iregion_code
+
+  type(avs_dx_global_faces_t), intent(inout) :: avs_dx_adios
+
+  integer :: ierr
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  nspecface = 0
+
+  ! mark global AVS or DX points
+  do ispec=1,nspec
+    ! only if on face
+    if(iMPIcut_xi(1,ispec) .or. iMPIcut_xi(2,ispec) .or. &
+        iMPIcut_eta(1,ispec) .or. iMPIcut_eta(2,ispec)) then
+      iglob1=ibool(1,1,1,ispec)
+      iglob2=ibool(NGLLX,1,1,ispec)
+      iglob3=ibool(NGLLX,NGLLY,1,ispec)
+      iglob4=ibool(1,NGLLY,1,ispec)
+      iglob5=ibool(1,1,NGLLZ,ispec)
+      iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+      iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! face xi = xi_min
+      if(iMPIcut_xi(1,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob1) = .true.
+        mask_ibool(iglob4) = .true.
+        mask_ibool(iglob8) = .true.
+        mask_ibool(iglob5) = .true.
+      endif
+
+    ! face xi = xi_max
+      if(iMPIcut_xi(2,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob2) = .true.
+        mask_ibool(iglob3) = .true.
+        mask_ibool(iglob7) = .true.
+        mask_ibool(iglob6) = .true.
+      endif
+
+    ! face eta = eta_min
+      if(iMPIcut_eta(1,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob1) = .true.
+        mask_ibool(iglob2) = .true.
+        mask_ibool(iglob6) = .true.
+        mask_ibool(iglob5) = .true.
+      endif
+
+    ! face eta = eta_max
+      if(iMPIcut_eta(2,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob4) = .true.
+        mask_ibool(iglob3) = .true.
+        mask_ibool(iglob7) = .true.
+        mask_ibool(iglob8) = .true.
+      endif
+    endif
+  enddo
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+  
+  avs_dx_adios%npoin = npoin
+  avs_dx_adios%nspecface = nspecface
+
+  allocate(avs_dx_adios%x_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating x_adios.")
+  allocate(avs_dx_adios%y_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating y_adios.")
+  allocate(avs_dx_adios%z_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating z_adios.")
+
+  ! Allocate temporary arrays for AVS/DX elements.
+  allocate(avs_dx_adios%idoubling(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating idoubling.")
+  allocate(avs_dx_adios%iglob1(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob1.")
+  allocate(avs_dx_adios%iglob2(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob2.")
+  allocate(avs_dx_adios%iglob3(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob3.")
+  allocate(avs_dx_adios%iglob4(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob4.")
+
+  !--- Variables for '...AVS_DXpointsfaces.txt'
+  call define_adios_global_real_1d_array(adios_group, "points_faces/x_value", &
+      npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "points_faces/y_value", &
+      npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, "points_faces/z_value", &
+      npoin, group_size_inc)
+  !--- Variables for AVS_DXelementsfaces.txt
+  call define_adios_global_real_1d_array(adios_group, & 
+      "elements_faces/idoubling", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_faces/num_ibool_AVS_DX_iglob1", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_faces/num_ibool_AVS_DX_iglob2", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_faces/num_ibool_AVS_DX_iglob3", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_faces/num_ibool_AVS_DX_iglob4", nspecface, group_size_inc)
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    allocate(avs_dx_adios%dvp(nspecface), stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, "Error allocating dvp.")
+    allocate(avs_dx_adios%dvs(nspecface), stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, "Error allocating dvs.")
+    call define_adios_global_real_1d_array(adios_group, &
+        "elements_faces/dvp", dvp, group_size_inc)
+    call define_adios_global_real_1d_array(adios_group, &
+        "elements_faces/dvp", dvs, group_size_inc)
+  endif
+
+end subroutine define_AVS_DX_global_faces_data_adios
+
+!===============================================================================
+subroutine prepare_AVS_DX_global_faces_data_adios (myrank, prname, nspec, &
+        iMPIcut_xi,iMPIcut_eta, &
+        ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool, &
+        npointot,rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+        ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+        RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+        RMIDDLE_CRUST,ROCEAN,iregion_code, &
+        avs_dx_adios)
+
+  implicit none
+
+  include "constants.h"
+
+  integer nspec,myrank
+  integer ibool(NGLLX,NGLLY,NGLLZ,nspec)
+
+  integer idoubling(nspec)
+
+  logical ELLIPTICITY,ISOTROPIC_3D_MANTLE
+
+  logical iMPIcut_xi(2,nspec)
+  logical iMPIcut_eta(2,nspec)
+
+  double precision RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771, &
+    R400,R120,R80,RMOHO,RMIDDLE_CRUST,ROCEAN
+
+  double precision xstore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision ystore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision zstore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  real(kind=CUSTOM_REAL) kappavstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) muvstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) rhostore(NGLLX,NGLLY,NGLLZ,nspec)
+
+! logical mask used to output global points only once
+  integer npointot
+  logical mask_ibool(npointot)
+
+! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+
+  integer ispec
+  integer i,j,k,np
+  integer iglob1,iglob2,iglob3,iglob4,iglob5,iglob6,iglob7,iglob8
+  integer npoin,numpoin,nspecface,ispecface
+
+  double precision r,rho,vp,vs,Qkappa,Qmu
+  double precision vpv,vph,vsv,vsh,eta_aniso
+  double precision x,y,z,theta,phi_dummy,cost,p20,ell,factor
+  real(kind=CUSTOM_REAL) dvp,dvs
+
+! for ellipticity
+  integer nspl
+  double precision rspl(NR),espl(NR),espl2(NR)
+
+! processor identification
+  character(len=150) prname
+
+  integer iregion_code
+
+  type(avs_dx_global_faces_t), intent(inout) :: avs_dx_adios
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  nspecface = 0
+
+! mark global AVS or DX points
+  do ispec=1,nspec
+  ! only if on face
+    if(iMPIcut_xi(1,ispec) .or. iMPIcut_xi(2,ispec) .or. &
+                iMPIcut_eta(1,ispec) .or. iMPIcut_eta(2,ispec)) then
+      iglob1=ibool(1,1,1,ispec)
+      iglob2=ibool(NGLLX,1,1,ispec)
+      iglob3=ibool(NGLLX,NGLLY,1,ispec)
+      iglob4=ibool(1,NGLLY,1,ispec)
+      iglob5=ibool(1,1,NGLLZ,ispec)
+      iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+      iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! face xi = xi_min
+      if(iMPIcut_xi(1,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob1) = .true.
+        mask_ibool(iglob4) = .true.
+        mask_ibool(iglob8) = .true.
+        mask_ibool(iglob5) = .true.
+      endif
+
+      ! face xi = xi_max
+      if(iMPIcut_xi(2,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob2) = .true.
+        mask_ibool(iglob3) = .true.
+        mask_ibool(iglob7) = .true.
+        mask_ibool(iglob6) = .true.
+      endif
+
+      ! face eta = eta_min
+      if(iMPIcut_eta(1,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob1) = .true.
+        mask_ibool(iglob2) = .true.
+        mask_ibool(iglob6) = .true.
+        mask_ibool(iglob5) = .true.
+      endif
+
+      ! face eta = eta_max
+      if(iMPIcut_eta(2,ispec)) then
+        nspecface = nspecface + 1
+        mask_ibool(iglob4) = .true.
+        mask_ibool(iglob3) = .true.
+        mask_ibool(iglob7) = .true.
+        mask_ibool(iglob8) = .true.
+      endif
+
+    endif
+  enddo
+
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  ! output global AVS or DX points
+  numpoin = 0
+  do ispec=1,nspec
+    ! only if on face
+    if(iMPIcut_xi(1,ispec) .or. iMPIcut_xi(2,ispec) .or. &
+                iMPIcut_eta(1,ispec) .or. iMPIcut_eta(2,ispec)) then
+      iglob1=ibool(1,1,1,ispec)
+      iglob2=ibool(NGLLX,1,1,ispec)
+      iglob3=ibool(NGLLX,NGLLY,1,ispec)
+      iglob4=ibool(1,NGLLY,1,ispec)
+      iglob5=ibool(1,1,NGLLZ,ispec)
+      iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+      iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! face xi = xi_min
+      if(iMPIcut_xi(1,ispec)) then
+        if(.not. mask_ibool(iglob1)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob1) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob4)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob4) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,1,ispec)) 
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob8)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob8) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,NGLLZ,ispec))
+        endif
+        if(.not. mask_ibool(iglob5)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob5) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,NGLLZ,ispec))
+        endif
+        mask_ibool(iglob1) = .true.
+        mask_ibool(iglob4) = .true.
+        mask_ibool(iglob8) = .true.
+        mask_ibool(iglob5) = .true.
+      endif
+
+      ! face xi = xi_max
+      if(iMPIcut_xi(2,ispec)) then
+        if(.not. mask_ibool(iglob2)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob2) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob3)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob3) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,1,ispec)) 
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob7)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob7) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,NGLLZ,ispec))
+        endif
+        if(.not. mask_ibool(iglob6)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob6) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,NGLLZ,ispec))
+        endif
+        mask_ibool(iglob2) = .true.
+        mask_ibool(iglob3) = .true.
+        mask_ibool(iglob7) = .true.
+        mask_ibool(iglob6) = .true.
+      endif
+
+      ! face eta = eta_min
+      if(iMPIcut_eta(1,ispec)) then
+        if(.not. mask_ibool(iglob1)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob1) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob2)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob2) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob6)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob6) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,NGLLZ,ispec))
+        endif
+        if(.not. mask_ibool(iglob5)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob5) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,NGLLZ,ispec))
+        endif
+        mask_ibool(iglob1) = .true.
+        mask_ibool(iglob2) = .true.
+        mask_ibool(iglob6) = .true.
+        mask_ibool(iglob5) = .true.
+      endif
+
+      ! face eta = eta_max
+      if(iMPIcut_eta(2,ispec)) then
+        if(.not. mask_ibool(iglob4)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob4) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob3)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob3) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,1,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,1,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,1,ispec))
+        endif
+        if(.not. mask_ibool(iglob7)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob7) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,NGLLZ,ispec))
+        endif
+        if(.not. mask_ibool(iglob8)) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglob8) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,NGLLZ,ispec))
+        endif
+        mask_ibool(iglob4) = .true.
+        mask_ibool(iglob3) = .true.
+        mask_ibool(iglob7) = .true.
+        mask_ibool(iglob8) = .true.
+      endif
+
+    endif
+  enddo
+
+  ! check that number of global points output is okay
+  if(numpoin /= npoin) &
+    call exit_MPI(myrank, &
+        'incorrect number of global points in AVS or DX file creation')
+
+  ! output global AVS or DX elements
+
+  ispecface = 0
+  do ispec=1,nspec
+!    print *, ispecface, nspecface
+    ! only if on face
+    if(iMPIcut_xi(1,ispec) .or. iMPIcut_xi(2,ispec) .or. &
+                iMPIcut_eta(1,ispec) .or. iMPIcut_eta(2,ispec)) then
+      iglob1=ibool(1,1,1,ispec)
+      iglob2=ibool(NGLLX,1,1,ispec)
+      iglob3=ibool(NGLLX,NGLLY,1,ispec)
+      iglob4=ibool(1,NGLLY,1,ispec)
+      iglob5=ibool(1,1,NGLLZ,ispec)
+      iglob6=ibool(NGLLX,1,NGLLZ,ispec)
+      iglob7=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglob8=ibool(1,NGLLY,NGLLZ,ispec)
+if (iglob1 > npointot) print *, myrank, "problem with iglob1", iglob1, npointot
+if (iglob2 > npointot) print *, myrank, "problem with iglob2", iglob2, npointot
+if (iglob3 > npointot) print *, myrank, "problem with iglob3", iglob3, npointot
+if (iglob4 > npointot) print *, myrank, "problem with iglob4", iglob4, npointot
+if (iglob5 > npointot) print *, myrank, "problem with iglob5", iglob5, npointot
+if (iglob6 > npointot) print *, myrank, "problem with iglob6", iglob6, npointot
+if (iglob7 > npointot) print *, myrank, "problem with iglob7", iglob7, npointot
+if (iglob8 > npointot) print *, myrank, "problem with iglob8", iglob8, npointot
+
+if (iglob1 < 0) print *, myrank, "problem with iglob1", iglob1, npointot
+if (iglob2 < 0) print *, myrank, "problem with iglob2", iglob2, npointot
+if (iglob3 < 0) print *, myrank, "problem with iglob3", iglob3, npointot
+if (iglob4 < 0) print *, myrank, "problem with iglob4", iglob4, npointot
+if (iglob5 < 0) print *, myrank, "problem with iglob5", iglob5, npointot
+if (iglob6 < 0) print *, myrank, "problem with iglob6", iglob6, npointot
+if (iglob7 < 0) print *, myrank, "problem with iglob7", iglob7, npointot
+if (iglob8 < 0) print *, myrank, "problem with iglob8", iglob8, npointot
+
+      ! include lateral variations if needed
+      if(ISOTROPIC_3D_MANTLE) then
+        ! pick a point within the element and get its radius
+        r = dsqrt(xstore(2,2,2,ispec)**2 &
+            + ystore(2,2,2,ispec)**2 &
+            + zstore(2,2,2,ispec)**2)
+
+        if(r > RCMB/R_EARTH .and. r < R_UNIT_SPHERE) then
+          ! average over the element
+          dvp = 0.0
+          dvs = 0.0
+          np =0
+          do k=2,NGLLZ-1
+            do j=2,NGLLY-1
+              do i=2,NGLLX-1
+                np=np+1
+                x=xstore(i,j,k,ispec)
+                y=ystore(i,j,k,ispec)
+                z=zstore(i,j,k,ispec)
+                r=dsqrt(x*x+y*y+z*z)
+                ! take out ellipticity
+                if(ELLIPTICITY) then
+                  call xyz_2_rthetaphi_dble(x,y,z,r,theta,phi_dummy)
+                  cost=dcos(theta)
+                  p20=0.5d0*(3.0d0*cost*cost-1.0d0)
+                  call spline_evaluation(rspl,espl,espl2,nspl,r,ell)
+                  factor=ONE-(TWO/3.0d0)*ell*p20
+                  r=r/factor
+                endif
+
+                ! gets reference model values:rho,vpv,vph,vsv,vsh and eta_aniso
+                call meshfem3D_models_get1D_val(myrank, iregion_code, &
+                    idoubling(ispec), r, rho, vpv, vph, vsv, vsh, eta_aniso, &
+                    Qkappa, Qmu, RICB, RCMB, RTOPDDOUBLEPRIME, R80, R120, & 
+                    R220, R400, R600, R670, R771, RMOHO, RMIDDLE_CRUST, ROCEAN)
+
+                 ! calculates isotropic values
+                vp = sqrt(((8.d0+4.d0*eta_aniso)*vph*vph + 3.d0*vpv*vpv &
+                        + (8.d0 - 8.d0*eta_aniso)*vsv*vsv)/15.d0)
+                vs = sqrt(((1.d0-2.d0*eta_aniso)*vph*vph + vpv*vpv &
+                        + 5.d0*vsh*vsh + (6.d0+4.d0*eta_aniso)*vsv*vsv)/15.d0)
+
+                if( abs(rhostore(i,j,k,ispec))< 1.e-20 ) then
+                  print*,'attention: rhostore close to zero', &
+                      rhostore(i,j,k,ispec),r,i,j,k,ispec
+                  dvp = 0.0
+                  dvs = 0.0
+                else if( abs(sngl(vp))< 1.e-20 ) then
+                  print*,' attention: vp close to zero',sngl(vp),r,i,j,k,ispec
+                  dvp = 0.0
+                else if( abs(sngl(vs))< 1.e-20 ) then
+                  print*,' attention: vs close to zero',sngl(vs),r,i,j,k,ispec
+                  dvs = 0.0
+                else
+                  dvp = dvp + (sqrt((kappavstore(i,j,k,ispec) &
+                      +4.*muvstore(i,j,k,ispec)/3.)/rhostore(i,j,k,ispec)) &
+                      - sngl(vp))/sngl(vp)
+                  dvs = dvs &
+                      + (sqrt(muvstore(i,j,k,ispec)/rhostore(i,j,k,ispec)) &
+                      - sngl(vs))/sngl(vs)
+                endif
+
+              enddo
+            enddo
+          enddo
+          dvp = dvp / np
+          dvs = dvs / np
+        else
+          dvp = 0.0
+          dvs = 0.0
+        endif
+      endif
+
+      ! face xi = xi_min
+      if(iMPIcut_xi(1,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglob1)
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglob4)
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglob8)
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglob5)
+        if(ISOTROPIC_3D_MANTLE)  then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+      ! face xi = xi_max
+      if(iMPIcut_xi(2,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface)= num_ibool_AVS_DX(iglob2)
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglob3)
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglob7)
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglob6)
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+      ! face eta = eta_min
+      if(iMPIcut_eta(1,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglob1)
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglob2)
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglob6)
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglob5)
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+      ! face eta = eta_max
+      if(iMPIcut_eta(2,ispec)) then
+        ispecface = ispecface + 1
+        avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+        avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglob4)
+        avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglob3)
+        avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglob7)
+        avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglob8)
+        if(ISOTROPIC_3D_MANTLE) then
+          avs_dx_adios%dvp(ispecface) = dvp
+          avs_dx_adios%dvs(ispecface) = dvs
+        endif
+      endif
+
+    endif
+  enddo
+
+  ! check that number of surface elements output is okay
+  if(ispecface /= nspecface) &
+    call exit_MPI(myrank,&
+        'incorrect number of surface elements in AVS or DX file creation')
+
+end subroutine prepare_AVS_DX_global_faces_data_adios
+
+!===============================================================================
+subroutine write_AVS_DX_global_faces_data_adios(adios_handle, myrank, &
+    sizeprocs, avs_dx_adios, ISOTROPIC_3D_MANTLE)
+  use mpi
+  use adios_write_mod
+  implicit none
+  !--- Arguments
+  integer(kind=8), intent(in) :: adios_handle
+  integer, intent(in) :: myrank, sizeprocs
+  type(avs_dx_global_faces_t), intent(inout) :: avs_dx_adios ! out for adios_write
+  logical ISOTROPIC_3D_MANTLE
+  !--- Variables
+  integer :: npoin, nspec
+  integer :: ierr
+  
+  npoin = avs_dx_adios%npoin
+  nspec = avs_dx_adios%nspecface
+
+  call adios_set_path(adios_handle, "points_faces/x_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%x_adios, ierr)
+
+  call adios_set_path(adios_handle, "points_faces/y_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%y_adios, ierr)
+
+  call adios_set_path(adios_handle, "points_faces/z_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%z_adios, ierr)
+
+
+  call adios_set_path(adios_handle, "elements_faces/idoubling", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%idoubling, ierr)
+
+
+  call adios_set_path(adios_handle, &
+      "elements_faces/num_ibool_AVS_DX_iglob1", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob1, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_faces/num_ibool_AVS_DX_iglob2", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob2, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_faces/num_ibool_AVS_DX_iglob3", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob3, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_faces/num_ibool_AVS_DX_iglob4", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob4, ierr)
+  
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    call adios_set_path(adios_handle, "elements_faces/dvp", ierr)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        nspec, sizeprocs)
+    call adios_write(adios_handle, "array", avs_dx_adios%dvp, ierr)
+    call adios_set_path(adios_handle, "elements_faces/dvs", ierr)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        nspec, sizeprocs)
+    call adios_write(adios_handle, "array", avs_dx_adios%dvs, ierr)
+  endif
+
+end subroutine write_AVS_DX_global_faces_data_adios
+
+!===============================================================================
+subroutine free_AVS_DX_global_faces_data_adios(myrank, avs_dx_adios, &
+    ISOTROPIC_3D_MANTLE)
+  implicit none
+  !--- Arguments
+  integer, intent(in) :: myrank
+  type(avs_dx_global_faces_t), intent(inout) :: avs_dx_adios
+  logical ISOTROPIC_3D_MANTLE
+  !--- Variables
+  !--- Variables
+  integer :: ierr
+
+  deallocate(avs_dx_adios%x_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating x_adios.")
+  deallocate(avs_dx_adios%y_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating y_adios.")
+  deallocate(avs_dx_adios%z_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating z_adios.")
+
+  deallocate(avs_dx_adios%idoubling, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob1, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob2, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob2.")
+  deallocate(avs_dx_adios%iglob3, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob3.")
+  deallocate(avs_dx_adios%iglob4, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob4.")
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    deallocate(avs_dx_adios%dvp, stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, &
+        "Error deallocating dvp.")
+    deallocate(avs_dx_adios%dvs, stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, &
+        "Error deallocating dvs.")
+  endif
+
+  avs_dx_adios%npoin = 0 
+  avs_dx_adios%nspecface = 0
+end subroutine free_AVS_DX_global_faces_data_adios
+
+end module

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_surface_data_adios.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_surface_data_adios.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/meshfem3D/write_AVS_DX_surface_data_adios.f90	2013-07-01 01:33:15 UTC (rev 22469)
@@ -0,0 +1,577 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  5 . 1
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!                        Princeton University, USA
+!             and University of Pau / CNRS / INRIA, France
+! (c) Princeton University / California Institute of Technology and University of Pau / CNRS / INRIA
+!                            April 2011
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+! create AVS or DX 2D data for the surface of the model
+! to be recombined in postprocessing
+
+module AVS_DX_surface_mod
+
+  implicit none
+
+  type avs_dx_surface_t
+    integer(kind=4) :: npoin, nspecface
+    real(kind=4), dimension(:), allocatable :: x_adios, y_adios, z_adios
+    integer(kind=4), dimension(:), allocatable :: idoubling, iglob1, iglob2, &
+        iglob3, iglob4
+    real, dimension(:), allocatable :: dvp, dvs
+  endtype
+
+contains
+
+subroutine define_AVS_DX_surfaces_data_adios(adios_group, &
+    myrank,prname,nspec,iboun, &
+    ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,npointot,&
+    rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN,iregion_code, &
+    group_size_inc, avs_dx_adios)
+  use mpi
+  use adios_write_mod
+
+  implicit none
+
+  include "constants.h"
+
+  integer(kind=8), intent(in) :: adios_group
+  integer(kind=8), intent(inout) :: group_size_inc
+
+  integer nspec,myrank
+  integer ibool(NGLLX,NGLLY,NGLLZ,nspec)
+
+  integer idoubling(nspec)
+
+  logical iboun(6,nspec)
+  logical ELLIPTICITY,ISOTROPIC_3D_MANTLE
+
+  double precision RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771, &
+       R400,R120,R80,RMOHO,RMIDDLE_CRUST,ROCEAN
+
+  double precision r,rho,vp,vs,Qkappa,Qmu
+  double precision vpv,vph,vsv,vsh,eta_aniso
+  double precision x,y,z,theta,phi_dummy,cost,p20,ell,factor
+  real(kind=CUSTOM_REAL) dvp,dvs
+
+  double precision xstore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision ystore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision zstore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  real(kind=CUSTOM_REAL) kappavstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) muvstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) rhostore(NGLLX,NGLLY,NGLLZ,nspec)
+
+! logical mask used to output global points only once
+  integer npointot
+  logical mask_ibool(npointot)
+
+! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+
+  integer ispec
+  integer i,j,k,np
+  integer, dimension(8) :: iglobval
+  integer npoin,numpoin,nspecface,ispecface
+
+! for ellipticity
+  integer nspl
+  double precision rspl(NR),espl(NR),espl2(NR)
+
+! processor identification
+  character(len=150) prname
+
+  integer iregion_code
+
+  type(avs_dx_surface_t), intent(inout) :: avs_dx_adios
+
+  integer :: ierr
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  nspecface = 0
+
+  ! mark global AVS or DX points
+  do ispec=1,nspec
+  ! only if at the surface (top plane)
+    if(iboun(6,ispec)) then
+
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! element is at the surface
+      nspecface = nspecface + 1
+      mask_ibool(iglobval(5)) = .true.
+      mask_ibool(iglobval(6)) = .true.
+      mask_ibool(iglobval(7)) = .true.
+      mask_ibool(iglobval(8)) = .true.
+    endif
+  enddo
+
+! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  avs_dx_adios%npoin = npoin
+  avs_dx_adios%nspecface = nspecface
+
+  allocate(avs_dx_adios%x_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating x_adios.")
+  allocate(avs_dx_adios%y_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating y_adios.")
+  allocate(avs_dx_adios%z_adios(npoin), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating z_adios.")
+  
+  ! Allocate temporary arrays for AVS/DX elements.
+  allocate(avs_dx_adios%idoubling(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating idoubling.")
+  allocate(avs_dx_adios%iglob1(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob1.")
+  allocate(avs_dx_adios%iglob2(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob2.")
+  allocate(avs_dx_adios%iglob3(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob3.")
+  allocate(avs_dx_adios%iglob4(nspecface), stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error allocating iglob4.")
+
+  !--- Variables for '...AVS_DXpointschunk.txt'
+  call define_adios_global_real_1d_array(adios_group, &
+      "points_surfaces/x_value", npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "points_surfaces/y_value", npoin, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "points_surfaces/z_value", npoin, group_size_inc)
+  !--- Variables for AVS_DXelementschunks.txt
+  call define_adios_global_real_1d_array(adios_group, & 
+      "elements_surfaces/idoubling", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob1", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob2", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob3", nspecface, group_size_inc)
+  call define_adios_global_real_1d_array(adios_group, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob4", nspecface, group_size_inc)
+
+  !--- Variables for AVS_DXelementschunks_dvp_dvs.txt
+  if(ISOTROPIC_3D_MANTLE) then 
+    allocate(avs_dx_adios%dvp(nspecface), stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, "Error allocating dvp.")
+    allocate(avs_dx_adios%dvs(nspecface), stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, "Error allocating dvs.")
+    call define_adios_global_real_1d_array(adios_group, &
+        "elements_surfaces/dvp", dvp, group_size_inc)
+    call define_adios_global_real_1d_array(adios_group, &
+        "elements_surfaces/dvp", dvs, group_size_inc)
+  endif
+
+end subroutine define_AVS_DX_surfaces_data_adios
+
+!===============================================================================
+subroutine prepare_AVS_DX_surfaces_data_adios(myrank,prname,nspec,iboun, &
+    ibool,idoubling,xstore,ystore,zstore,num_ibool_AVS_DX,mask_ibool,npointot,&
+    rhostore,kappavstore,muvstore,nspl,rspl,espl,espl2, &
+    ELLIPTICITY,ISOTROPIC_3D_MANTLE, &
+    RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771,R400,R120,R80,RMOHO, &
+    RMIDDLE_CRUST,ROCEAN,iregion_code, &
+    avs_dx_adios)
+
+  implicit none
+
+  include "constants.h"
+
+  integer nspec,myrank
+  integer ibool(NGLLX,NGLLY,NGLLZ,nspec)
+
+  integer idoubling(nspec)
+
+  logical iboun(6,nspec)
+  logical ELLIPTICITY,ISOTROPIC_3D_MANTLE
+
+  double precision RICB,RCMB,RTOPDDOUBLEPRIME,R600,R670,R220,R771, &
+       R400,R120,R80,RMOHO,RMIDDLE_CRUST,ROCEAN
+
+  double precision r,rho,vp,vs,Qkappa,Qmu
+  double precision vpv,vph,vsv,vsh,eta_aniso
+  double precision x,y,z,theta,phi_dummy,cost,p20,ell,factor
+  real(kind=CUSTOM_REAL) dvp,dvs
+
+  double precision xstore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision ystore(NGLLX,NGLLY,NGLLZ,nspec)
+  double precision zstore(NGLLX,NGLLY,NGLLZ,nspec)
+
+  real(kind=CUSTOM_REAL) kappavstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) muvstore(NGLLX,NGLLY,NGLLZ,nspec)
+  real(kind=CUSTOM_REAL) rhostore(NGLLX,NGLLY,NGLLZ,nspec)
+
+! logical mask used to output global points only once
+  integer npointot
+  logical mask_ibool(npointot)
+
+! numbering of global AVS or DX points
+  integer num_ibool_AVS_DX(npointot)
+
+  integer ispec
+  integer i,j,k,np
+  integer, dimension(8) :: iglobval
+  integer npoin,numpoin,nspecface,ispecface
+
+! for ellipticity
+  integer nspl
+  double precision rspl(NR),espl(NR),espl2(NR)
+
+! processor identification
+  character(len=150) prname
+
+  integer iregion_code
+
+  type(avs_dx_surface_t), intent(inout) :: avs_dx_adios
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  nspecface = 0
+
+  ! mark global AVS or DX points
+  do ispec=1,nspec
+    ! only if at the surface (top plane)
+    if(iboun(6,ispec)) then
+
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+  ! element is at the surface
+      nspecface = nspecface + 1
+      mask_ibool(iglobval(5)) = .true.
+      mask_ibool(iglobval(6)) = .true.
+      mask_ibool(iglobval(7)) = .true.
+      mask_ibool(iglobval(8)) = .true.
+
+    endif
+  enddo
+
+  ! count global number of AVS or DX points
+  npoin = count(mask_ibool(:))
+
+  ! erase the logical mask used to mark points already found
+  mask_ibool(:) = .false.
+
+  ! output global AVS or DX points
+  numpoin = 0
+  do ispec=1,nspec
+  ! only if at the surface
+    if(iboun(6,ispec)) then
+
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      ! top face
+      if(iboun(6,ispec)) then
+
+        if(.not. mask_ibool(iglobval(5))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(5)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,1,NGLLZ,ispec))
+        endif
+
+        if(.not. mask_ibool(iglobval(6))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(6)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,1,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,1,NGLLZ,ispec))
+        endif
+
+        if(.not. mask_ibool(iglobval(7))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(7)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(NGLLX,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(NGLLX,NGLLY,NGLLZ,ispec))
+        endif
+
+        if(.not. mask_ibool(iglobval(8))) then
+          numpoin = numpoin + 1
+          num_ibool_AVS_DX(iglobval(8)) = numpoin
+          avs_dx_adios%x_adios(numpoin) = sngl(xstore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%y_adios(numpoin) = sngl(ystore(1,NGLLY,NGLLZ,ispec))
+          avs_dx_adios%z_adios(numpoin) = sngl(zstore(1,NGLLY,NGLLZ,ispec))
+        endif
+
+        mask_ibool(iglobval(5)) = .true.
+        mask_ibool(iglobval(6)) = .true.
+        mask_ibool(iglobval(7)) = .true.
+        mask_ibool(iglobval(8)) = .true.
+      endif
+
+    endif
+  enddo
+
+  ! check that number of global points output is okay
+  if(numpoin /= npoin) &
+      call exit_MPI(myrank, &
+          'incorrect number of global points in AVS or DX file creation')
+
+  ! output global AVS or DX elements
+  ispecface = 0
+  do ispec=1,nspec
+    ! only if at the surface
+    if(iboun(6,ispec)) then
+
+      iglobval(5)=ibool(1,1,NGLLZ,ispec)
+      iglobval(6)=ibool(NGLLX,1,NGLLZ,ispec)
+      iglobval(7)=ibool(NGLLX,NGLLY,NGLLZ,ispec)
+      iglobval(8)=ibool(1,NGLLY,NGLLZ,ispec)
+
+      if(ISOTROPIC_3D_MANTLE) then
+        !   pick a point within the element and get its radius
+        r=dsqrt(xstore(2,2,2,ispec)**2 &
+            + ystore(2,2,2,ispec)**2+zstore(2,2,2,ispec)**2)
+
+        if(r > RCMB/R_EARTH .and. r < R_UNIT_SPHERE) then
+          !     average over the element
+          dvp = 0.0
+          dvs = 0.0
+          np =0
+          do k=2,NGLLZ-1
+            do j=2,NGLLY-1
+              do i=2,NGLLX-1
+                np=np+1
+                x=xstore(i,j,k,ispec)
+                y=ystore(i,j,k,ispec)
+                z=zstore(i,j,k,ispec)
+                r=dsqrt(x*x+y*y+z*z)
+                ! take out ellipticity
+                if(ELLIPTICITY) then
+                  call xyz_2_rthetaphi_dble(x,y,z,r,theta,phi_dummy)
+                  cost=dcos(theta)
+                  p20=0.5d0*(3.0d0*cost*cost-1.0d0)
+                  call spline_evaluation(rspl,espl,espl2,nspl,r,ell)
+                  factor=ONE-(TWO/3.0d0)*ell*p20
+                  r=r/factor
+                endif
+
+                ! gets reference model values: rho,vpv,vph,vsv,vsh and eta_aniso
+                call meshfem3D_models_get1D_val(myrank,iregion_code, &
+                            idoubling(ispec), &
+                            r,rho,vpv,vph,vsv,vsh,eta_aniso, &
+                            Qkappa,Qmu,RICB,RCMB, &
+                            RTOPDDOUBLEPRIME,R80,R120,R220,R400,R600,R670,R771, &
+                            RMOHO,RMIDDLE_CRUST,ROCEAN)
+
+                ! calculates isotropic values
+                vp = sqrt(((8.d0+4.d0*eta_aniso)*vph*vph + 3.d0*vpv*vpv &
+                    + (8.d0 - 8.d0*eta_aniso)*vsv*vsv)/15.d0)
+                vs = sqrt(((1.d0-2.d0*eta_aniso)*vph*vph + vpv*vpv &
+                    + 5.d0*vsh*vsh + (6.d0+4.d0*eta_aniso)*vsv*vsv)/15.d0)
+
+                if( abs(rhostore(i,j,k,ispec))< 1.e-20 ) then
+                  print*,' attention: rhostore close to zero', &
+                      rhostore(i,j,k,ispec),r,i,j,k,ispec
+                  dvp = 0.0
+                  dvs = 0.0
+                else if( abs(sngl(vp))< 1.e-20 ) then
+                  print*,' attention: vp close to zero',sngl(vp),r,i,j,k,ispec
+                  dvp = 0.0
+                else if( abs(sngl(vs))< 1.e-20 ) then
+                  print*,' attention: vs close to zero',sngl(vs),r,i,j,k,ispec
+                  dvs = 0.0
+                else
+                  dvp = dvp + (sqrt((kappavstore(i,j,k,ispec) &
+                      + 4.*muvstore(i,j,k,ispec)/3.) &
+                      / rhostore(i,j,k,ispec)) - sngl(vp))/sngl(vp)
+                  dvs = dvs &
+                      + (sqrt(muvstore(i,j,k,ispec)/rhostore(i,j,k,ispec)) &
+                      - sngl(vs))/sngl(vs)
+                endif
+
+              enddo
+            enddo
+          enddo
+          dvp = dvp / np
+          dvs = dvs / np
+        else
+          dvp = 0.0
+          dvs = 0.0
+        endif
+      endif
+
+      ! top face
+      ispecface = ispecface + 1
+      avs_dx_adios%idoubling(ispecface) = idoubling(ispec)
+      avs_dx_adios%iglob1(ispecface) = num_ibool_AVS_DX(iglobval(5))
+      avs_dx_adios%iglob2(ispecface) = num_ibool_AVS_DX(iglobval(6))
+      avs_dx_adios%iglob3(ispecface) = num_ibool_AVS_DX(iglobval(7))
+      avs_dx_adios%iglob4(ispecface) = num_ibool_AVS_DX(iglobval(8))
+      if(ISOTROPIC_3D_MANTLE) then
+        avs_dx_adios%dvp(ispecface) = dvp
+        avs_dx_adios%dvs(ispecface) = dvs
+      endif
+
+    endif
+  enddo
+
+  ! check that number of surface elements output is okay
+  if(ispecface /= nspecface) &
+      call exit_MPI(myrank,'&
+          incorrect number of surface elements in AVS or DX file creation')
+
+end subroutine prepare_AVS_DX_surfaces_data_adios 
+
+!===============================================================================
+subroutine write_AVS_DX_surfaces_data_adios(adios_handle, myrank, &
+    sizeprocs, avs_dx_adios, ISOTROPIC_3D_MANTLE)
+  use mpi
+  use adios_write_mod
+  implicit none
+  !--- Arguments
+  integer(kind=8), intent(in) :: adios_handle
+  integer, intent(in) :: myrank, sizeprocs
+  type(avs_dx_surface_t), intent(inout) :: avs_dx_adios ! out for adios_write
+  logical ISOTROPIC_3D_MANTLE
+  !--- Variables
+  integer :: npoin, nspec
+  integer :: ierr
+  
+  npoin = avs_dx_adios%npoin
+  nspec = avs_dx_adios%nspecface
+
+  call adios_set_path(adios_handle, "points_surfaces/x_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%x_adios, ierr)
+
+  call adios_set_path(adios_handle, "points_surfaces/y_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%y_adios, ierr)
+
+  call adios_set_path(adios_handle, "points_surfaces/z_value", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      npoin, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%z_adios, ierr)
+
+
+  call adios_set_path(adios_handle, "elements_surfaces/idoubling", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%idoubling, ierr)
+
+
+  call adios_set_path(adios_handle, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob1", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob1, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob2", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob2, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob3", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob3, ierr)
+  
+  call adios_set_path(adios_handle, &
+      "elements_surfaces/num_ibool_AVS_DX_iglob4", ierr)
+  call write_1D_global_array_adios_dims(adios_handle, myrank, &
+      nspec, sizeprocs)
+  call adios_write(adios_handle, "array", avs_dx_adios%iglob4, ierr)
+  
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    call adios_set_path(adios_handle, "elements_surfaces/dvp", ierr)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        nspec, sizeprocs)
+    call adios_write(adios_handle, "array", avs_dx_adios%dvp, ierr)
+    call adios_set_path(adios_handle, "elements_surfaces/dvs", ierr)
+    call write_1D_global_array_adios_dims(adios_handle, myrank, &
+        nspec, sizeprocs)
+    call adios_write(adios_handle, "array", avs_dx_adios%dvs, ierr)
+  endif
+end subroutine write_AVS_DX_surfaces_data_adios
+
+!===============================================================================
+subroutine free_AVS_DX_surfaces_data_adios(myrank, avs_dx_adios, &
+    ISOTROPIC_3D_MANTLE)
+  implicit none
+  !--- Arguments
+  integer, intent(in) :: myrank
+  type(avs_dx_surface_t), intent(inout) :: avs_dx_adios
+  logical ISOTROPIC_3D_MANTLE
+  !--- Variables
+  !--- Variables
+  integer :: ierr
+
+  deallocate(avs_dx_adios%x_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating x_adios.")
+  deallocate(avs_dx_adios%y_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating y_adios.")
+  deallocate(avs_dx_adios%z_adios, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, "Error deallocating z_adios.")
+
+  deallocate(avs_dx_adios%idoubling, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob1, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob1.")
+  deallocate(avs_dx_adios%iglob2, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob2.")
+  deallocate(avs_dx_adios%iglob3, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob3.")
+  deallocate(avs_dx_adios%iglob4, stat=ierr)
+  if (ierr /= 0) call exit_MPI(myrank, &
+      "Error deallocating num_ibool_AVS_DX_iglob4.")
+
+  if(ISOTROPIC_3D_MANTLE) then 
+    deallocate(avs_dx_adios%dvp, stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, &
+        "Error deallocating dvp.")
+    deallocate(avs_dx_adios%dvs, stat=ierr)
+    if (ierr /= 0) call exit_MPI(myrank, &
+        "Error deallocating dvs.")
+  endif
+
+  avs_dx_adios%npoin = 0 
+  avs_dx_adios%nspecface = 0
+end subroutine free_AVS_DX_surfaces_data_adios
+
+
+end module



More information about the CIG-COMMITS mailing list