[cig-commits] r20048 - in seismo/3D/SPECFEM3D_GLOBE/trunk: setup src/shared src/specfem3D

danielpeter at geodynamics.org danielpeter at geodynamics.org
Mon May 7 13:37:29 PDT 2012


Author: danielpeter
Date: 2012-05-07 13:37:29 -0700 (Mon, 07 May 2012)
New Revision: 20048

Modified:
   seismo/3D/SPECFEM3D_GLOBE/trunk/setup/constants.h.in
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/shared/exit_mpi.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_arrays_buffers_solver.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_mesh_databases.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/specfem3D.F90
Log:
adds synchronizations when reading mesh files

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/setup/constants.h.in
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/setup/constants.h.in	2012-05-07 17:22:04 UTC (rev 20047)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/setup/constants.h.in	2012-05-07 20:37:29 UTC (rev 20048)
@@ -88,6 +88,9 @@
   integer, parameter :: IOVTK = 98
 
 
+! I/O Bluegene serial access
+  logical, parameter :: IO_BLUEGENE_SERIAL = .false.
+
 ! R_EARTH is the radius of the bottom of the oceans (radius of Earth in m)
   double precision, parameter :: R_EARTH = 6371000.d0
 ! uncomment line below for PREM with oceans

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/shared/exit_mpi.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/shared/exit_mpi.f90	2012-05-07 17:22:04 UTC (rev 20047)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/shared/exit_mpi.f90	2012-05-07 20:37:29 UTC (rev 20048)
@@ -105,3 +105,25 @@
 
   end subroutine exit_MPI_without_rank
 
+
+!
+!-------------------------------------------------------------------------------------------------
+!
+
+  subroutine sync_all()
+
+  implicit none
+
+  ! standard include of the MPI library
+  include 'mpif.h'
+
+  integer :: ier,rank
+
+  ! gets callers rank
+  call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ier)
+
+  ! synchronizes MPI processes
+  call MPI_BARRIER(MPI_COMM_WORLD,ier)
+  if( ier /= 0 ) call exit_mpi(rank,'error synchronize MPI processes')
+
+  end subroutine sync_all

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_arrays_buffers_solver.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_arrays_buffers_solver.f90	2012-05-07 17:22:04 UTC (rev 20047)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_arrays_buffers_solver.f90	2012-05-07 20:37:29 UTC (rev 20048)
@@ -32,7 +32,8 @@
      iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
      iboolfaces,npoin2D_faces,iboolcorner, &
      NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NGLOB1D_RADIAL, &
-     NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA,LOCAL_PATH,NCHUNKS)
+     NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT,NPROC_XI,NPROC_ETA, &
+     LOCAL_PATH,NCHUNKS)
 
   implicit none
 
@@ -41,7 +42,7 @@
 
   include "constants.h"
 
-  integer iregion_code,myrank,NCHUNKS,ier
+  integer iregion_code,myrank,NCHUNKS
 
   integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
   integer NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,NGLOB2DMAX_XY,NGLOB1D_RADIAL
@@ -61,29 +62,137 @@
 ! allocate array for messages for corners
   integer, dimension(NCORNERSCHUNKS) :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
 
+  ! local parameters
+  integer ier
+  integer imsg
+
+
+! processor identification
+  character(len=150) OUTPUT_FILES
+
+! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
+
+
+! read 2-D addressing for summation between slices along xi with MPI
+  call read_arrays_buffers_1(iregion_code,myrank, &
+     iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
+     npoin2D_xi,npoin2D_eta, &
+     NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,LOCAL_PATH)
+
+
+! read chunk messages only if more than one chunk
+  if(NCHUNKS /= 1) then
+
+    ! read messages to assemble between chunks with MPI
+    if(myrank == 0) then
+
+      ! get the base pathname for output files
+      call get_value_string(OUTPUT_FILES, 'OUTPUT_FILES', 'OUTPUT_FILES')
+
+      ! file with the list of processors for each message for faces
+      open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_faces.txt',status='old',action='read')
+      do imsg = 1,NUMMSGS_FACES
+      read(IIN,*) imsg_type(imsg),iprocfrom_faces(imsg),iprocto_faces(imsg)
+      if      (iprocfrom_faces(imsg) < 0 &
+          .or. iprocto_faces(imsg) < 0 &
+          .or. iprocfrom_faces(imsg) > NPROCTOT-1 &
+          .or. iprocto_faces(imsg) > NPROCTOT-1) &
+        call exit_MPI(myrank,'incorrect chunk faces numbering')
+      if (imsg_type(imsg) < 1 .or. imsg_type(imsg) > 3) &
+        call exit_MPI(myrank,'incorrect message type labeling')
+      enddo
+      close(IIN)
+
+
+      ! file with the list of processors for each message for corners
+      open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_corners.txt',status='old',action='read')
+      do imsg = 1,NCORNERSCHUNKS
+      read(IIN,*) iproc_master_corners(imsg),iproc_worker1_corners(imsg), &
+                              iproc_worker2_corners(imsg)
+      if    (iproc_master_corners(imsg) < 0 &
+        .or. iproc_worker1_corners(imsg) < 0 &
+        .or. iproc_worker2_corners(imsg) < 0 &
+        .or. iproc_master_corners(imsg) > NPROCTOT-1 &
+        .or. iproc_worker1_corners(imsg) > NPROCTOT-1 &
+        .or. iproc_worker2_corners(imsg) > NPROCTOT-1) &
+          call exit_MPI(myrank,'incorrect chunk corner numbering')
+      enddo
+      close(IIN)
+
+    endif
+
+    ! broadcast the information read on the master to the nodes
+    call MPI_BCAST(imsg_type,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+    call MPI_BCAST(iprocfrom_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+    call MPI_BCAST(iprocto_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+
+    call MPI_BCAST(iproc_master_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+    call MPI_BCAST(iproc_worker1_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+    call MPI_BCAST(iproc_worker2_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+
+    ! synchronizes processes
+    call sync_all()
+
+    call read_arrays_buffers_2(iregion_code,myrank, &
+     iprocfrom_faces,iprocto_faces, &
+     iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+     iboolfaces,npoin2D_faces,iboolcorner, &
+     NGLOB2DMAX_XY,NGLOB1D_RADIAL, &
+     NUMMSGS_FACES,NCORNERSCHUNKS,NPROC_XI,NPROC_ETA,LOCAL_PATH)
+
+  
+  endif
+  
+
+  end subroutine read_arrays_buffers_solver
+
+!
+!------------------------------------------------------------------------------------------------------
+!
+
+
+  subroutine read_arrays_buffers_1(iregion_code,myrank, &
+     iboolleft_xi,iboolright_xi,iboolleft_eta,iboolright_eta, &
+     npoin2D_xi,npoin2D_eta, &
+     NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX,LOCAL_PATH)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  include "constants.h"
+
+  integer iregion_code,myrank
+
+  integer, dimension(NB_SQUARE_EDGES_ONEDIR) :: npoin2D_xi,npoin2D_eta
+  integer NGLOB2DMAX_XMIN_XMAX,NGLOB2DMAX_YMIN_YMAX
+
+  character(len=150) LOCAL_PATH
+
+  integer, dimension(NGLOB2DMAX_XMIN_XMAX) :: iboolleft_xi,iboolright_xi
+  integer, dimension(NGLOB2DMAX_YMIN_YMAX) :: iboolleft_eta,iboolright_eta
+
+  ! local parameters
+  integer ier
   integer npoin2D_xi_mesher,npoin2D_eta_mesher
-  integer npoin1D_corner
 
-  integer imsg,icount_faces,icount_corners
-  integer ipoin1D,ipoin2D
-
   double precision xdummy,ydummy,zdummy
 
 ! processor identification
-  character(len=150) OUTPUT_FILES,prname,filename
+  character(len=150) OUTPUT_FILES,prname
 
-! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
-
 ! get the base pathname for output files
   call get_value_string(OUTPUT_FILES, 'OUTPUT_FILES', 'OUTPUT_FILES')
 
 ! create the name for the database of the current slide and region
   call create_name_database(prname,myrank,iregion_code,LOCAL_PATH)
 
-! read 2-D addressing for summation between slices along xi with MPI
-
 ! read iboolleft_xi of this slice
-  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolleft_xi.txt',status='old',action='read')
+  open(unit=IIN,file=prname(1:len_trim(prname))//'iboolleft_xi.txt', &
+        status='old',action='read',iostat=ier)
+  if( ier /= 0 ) call exit_mpi(myrank,'error opening iboolleft_xi file')
+  
   npoin2D_xi(1) = 1
  350  continue
   read(IIN,*) iboolleft_xi(npoin2D_xi(1)),xdummy,ydummy,zdummy
@@ -99,6 +208,9 @@
       call exit_MPI(myrank,'incorrect iboolleft_xi read')
   close(IIN)
 
+  ! synchronizes processes
+  call sync_all()
+
 ! read iboolright_xi of this slice
   open(unit=IIN,file=prname(1:len_trim(prname))//'iboolright_xi.txt',status='old',action='read')
   npoin2D_xi(2) = 1
@@ -125,6 +237,10 @@
     write(IMAIN,*)
   endif
 
+  ! synchronizes processes
+  call sync_all()
+
+  
 ! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
 
 ! read 2-D addressing for summation between slices along eta with MPI
@@ -146,6 +262,9 @@
       call exit_MPI(myrank,'incorrect iboolleft_eta read')
   close(IIN)
 
+  ! synchronizes processes
+  call sync_all()
+  
 ! read iboolright_eta of this slice
   open(unit=IIN,file=prname(1:len_trim(prname))//'iboolright_eta.txt',status='old',action='read')
   npoin2D_eta(2) = 1
@@ -172,81 +291,94 @@
     write(IMAIN,*)
   endif
 
-
+  ! synchronizes processes
+  call sync_all()
+  
 !! $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
 
-! read chunk messages only if more than one chunk
-  if(NCHUNKS /= 1) then
+  end subroutine read_arrays_buffers_1
 
-! read messages to assemble between chunks with MPI
 
-  if(myrank == 0) then
+!
+!-----------------------------------------------------------------------------------------------
+!
 
-! file with the list of processors for each message for faces
-  open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_faces.txt',status='old',action='read')
-  do imsg = 1,NUMMSGS_FACES
-  read(IIN,*) imsg_type(imsg),iprocfrom_faces(imsg),iprocto_faces(imsg)
-  if      (iprocfrom_faces(imsg) < 0 &
-      .or. iprocto_faces(imsg) < 0 &
-      .or. iprocfrom_faces(imsg) > NPROCTOT-1 &
-      .or. iprocto_faces(imsg) > NPROCTOT-1) &
-    call exit_MPI(myrank,'incorrect chunk faces numbering')
-  if (imsg_type(imsg) < 1 .or. imsg_type(imsg) > 3) &
-    call exit_MPI(myrank,'incorrect message type labeling')
-  enddo
-  close(IIN)
 
-! file with the list of processors for each message for corners
-  open(unit=IIN,file=trim(OUTPUT_FILES)//'/list_messages_corners.txt',status='old',action='read')
-  do imsg = 1,NCORNERSCHUNKS
-  read(IIN,*) iproc_master_corners(imsg),iproc_worker1_corners(imsg), &
-                          iproc_worker2_corners(imsg)
-  if    (iproc_master_corners(imsg) < 0 &
-    .or. iproc_worker1_corners(imsg) < 0 &
-    .or. iproc_worker2_corners(imsg) < 0 &
-    .or. iproc_master_corners(imsg) > NPROCTOT-1 &
-    .or. iproc_worker1_corners(imsg) > NPROCTOT-1 &
-    .or. iproc_worker2_corners(imsg) > NPROCTOT-1) &
-      call exit_MPI(myrank,'incorrect chunk corner numbering')
-  enddo
-  close(IIN)
+  subroutine read_arrays_buffers_2(iregion_code,myrank, &
+     iprocfrom_faces,iprocto_faces, &
+     iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners, &
+     iboolfaces,npoin2D_faces,iboolcorner, &
+     NGLOB2DMAX_XY,NGLOB1D_RADIAL, &
+     NUMMSGS_FACES,NCORNERSCHUNKS,NPROC_XI,NPROC_ETA,LOCAL_PATH)
 
-  endif
+  implicit none
 
-! broadcast the information read on the master to the nodes
-  call MPI_BCAST(imsg_type,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iprocfrom_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iprocto_faces,NUMMSGS_FACES,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+! standard include of the MPI library
+  include 'mpif.h'
 
-  call MPI_BCAST(iproc_master_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iproc_worker1_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
-  call MPI_BCAST(iproc_worker2_corners,NCORNERSCHUNKS,MPI_INTEGER,0,MPI_COMM_WORLD,ier)
+  include "constants.h"
 
+  integer iregion_code,myrank
+
+  integer NGLOB2DMAX_XY,NGLOB1D_RADIAL
+  integer NUMMSGS_FACES,NCORNERSCHUNKS,NPROC_XI,NPROC_ETA
+
+  integer npoin2D_faces(NUMFACES_SHARED)
+
+  character(len=150) LOCAL_PATH
+
+  integer, dimension(NGLOB2DMAX_XY,NUMFACES_SHARED) :: iboolfaces
+  integer, dimension(NGLOB1D_RADIAL,NUMCORNERS_SHARED) :: iboolcorner
+
+  integer, dimension(NUMMSGS_FACES) :: iprocfrom_faces,iprocto_faces
+
+! allocate array for messages for corners
+  integer, dimension(NCORNERSCHUNKS) :: iproc_master_corners,iproc_worker1_corners,iproc_worker2_corners
+
+  ! local parameters
+  double precision xdummy,ydummy,zdummy
+  integer imsg
+  integer npoin1D_corner
+
+  integer icount_faces,icount_corners
+  integer ipoin1D,ipoin2D
+
+! processor identification
+  character(len=150) OUTPUT_FILES,prname,filename
+
+! get the base pathname for output files
+  call get_value_string(OUTPUT_FILES, 'OUTPUT_FILES', 'OUTPUT_FILES')
+
+! create the name for the database of the current slide and region
+  call create_name_database(prname,myrank,iregion_code,LOCAL_PATH)
+
 !---- read indirect addressing for each message for faces of the chunks
 !---- a given slice can belong to at most two faces
   icount_faces = 0
   do imsg = 1,NUMMSGS_FACES
-  if(myrank == iprocfrom_faces(imsg) .or. myrank == iprocto_faces(imsg)) then
-    icount_faces = icount_faces + 1
-    if(icount_faces>NUMFACES_SHARED) call exit_MPI(myrank,'more than NUMFACES_SHARED faces for this slice')
-    if(icount_faces>2 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) call exit_MPI(myrank,'more than two faces for this slice')
+    if(myrank == iprocfrom_faces(imsg) .or. myrank == iprocto_faces(imsg)) then
+      icount_faces = icount_faces + 1
+      if(icount_faces>NUMFACES_SHARED) &
+        call exit_MPI(myrank,'more than NUMFACES_SHARED faces for this slice')
+      if(icount_faces>2 .and. (NPROC_XI > 1 .or. NPROC_ETA > 1)) &
+        call exit_MPI(myrank,'more than two faces for this slice')
 
-! read file with 2D buffer for faces
-    if(myrank == iprocfrom_faces(imsg)) then
-      write(filename,"('buffer_faces_chunks_sender_msg',i6.6,'.txt')") imsg
-    else if(myrank == iprocto_faces(imsg)) then
-      write(filename,"('buffer_faces_chunks_receiver_msg',i6.6,'.txt')") imsg
+      ! read file with 2D buffer for faces
+      if(myrank == iprocfrom_faces(imsg)) then
+        write(filename,"('buffer_faces_chunks_sender_msg',i6.6,'.txt')") imsg
+      else if(myrank == iprocto_faces(imsg)) then
+        write(filename,"('buffer_faces_chunks_receiver_msg',i6.6,'.txt')") imsg
+      endif
+
+      open(unit=IIN,file=prname(1:len_trim(prname))//filename,status='old',action='read')
+      read(IIN,*) npoin2D_faces(icount_faces)
+      if(npoin2D_faces(icount_faces) > NGLOB2DMAX_XY) &
+        call exit_MPI(myrank,'incorrect nb of points in face buffer')
+      do ipoin2D = 1,npoin2D_faces(icount_faces)
+        read(IIN,*) iboolfaces(ipoin2D,icount_faces),xdummy,ydummy,zdummy
+      enddo
+      close(IIN)
     endif
-
-    open(unit=IIN,file=prname(1:len_trim(prname))//filename,status='old',action='read')
-    read(IIN,*) npoin2D_faces(icount_faces)
-    if(npoin2D_faces(icount_faces) > NGLOB2DMAX_XY) &
-      call exit_MPI(myrank,'incorrect nb of points in face buffer')
-    do ipoin2D = 1,npoin2D_faces(icount_faces)
-      read(IIN,*) iboolfaces(ipoin2D,icount_faces),xdummy,ydummy,zdummy
-    enddo
-    close(IIN)
-  endif
   enddo
 
 
@@ -283,7 +415,5 @@
   endif
   enddo
 
-  endif
+  end subroutine read_arrays_buffers_2
 
-  end subroutine read_arrays_buffers_solver
-

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_mesh_databases.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_mesh_databases.f90	2012-05-07 17:22:04 UTC (rev 20047)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/read_mesh_databases.f90	2012-05-07 20:37:29 UTC (rev 20048)
@@ -153,7 +153,13 @@
   logical, dimension(NSPEC_INNER_CORE) :: is_on_a_slice_edge_inner_core
 
   ! start reading the databases
+
   ! read arrays created by the mesher
+  ! user output
+  if( myrank == 0 ) write(IMAIN,*) 'reading mesh databases...'
+  
+  ! synchronizes processes
+  call sync_all()
 
   ! crust and mantle
   if(ANISOTROPIC_3D_MANTLE_VAL) then
@@ -197,6 +203,9 @@
             READ_KAPPA_MU,READ_TISO,TRANSVERSE_ISOTROPY_VAL,ANISOTROPIC_3D_MANTLE_VAL, &
             ANISOTROPIC_INNER_CORE_VAL,OCEANS_VAL,LOCAL_PATH,ABSORBING_CONDITIONS)
 
+  ! synchronizes processes
+  call sync_all()
+
   ! outer core (no anisotropy nor S velocity)
   ! rmass_ocean_load is not used in this routine because it is meaningless in the outer core
   READ_KAPPA_MU = .false.
@@ -227,6 +236,9 @@
             READ_KAPPA_MU,READ_TISO,TRANSVERSE_ISOTROPY_VAL,ANISOTROPIC_3D_MANTLE_VAL, &
             ANISOTROPIC_INNER_CORE_VAL,OCEANS_VAL,LOCAL_PATH,ABSORBING_CONDITIONS)
 
+  ! synchronizes processes
+  call sync_all()
+
   ! inner core (no anisotropy)
   ! rmass_ocean_load is not used in this routine because it is meaningless in the inner core
   READ_KAPPA_MU = .true.
@@ -273,6 +285,9 @@
   if(minval(ibool_inner_core(:,:,:,:)) /= 1 .or. maxval(ibool_inner_core(:,:,:,:)) /= NGLOB_INNER_CORE) &
     call exit_MPI(myrank,'incorrect global numbering: iboolmax does not equal nglob in inner core')
 
+  ! synchronizes processes
+  call sync_all()
+
   end subroutine read_mesh_databases
 
 !
@@ -357,6 +372,12 @@
   integer :: ier,iproc,iproc_read
   integer :: NUM_FACES,NPROC_ONE_DIRECTION
 
+  ! user output
+  if( myrank == 0 ) write(IMAIN,*) 'reading MPI addressing...'
+  
+  ! synchronizes processes
+  call sync_all()
+
   ! open file with global slice number addressing
   if(myrank == 0) then
     open(unit=IIN,file=trim(OUTPUT_FILES)//'/addressing.txt',status='old',action='read',iostat=ier)
@@ -382,7 +403,8 @@
 !!!!!!!  if (myrank == 0 .and. NCHUNKS_VAL == 6) then
 !!!!!!! commented out because crashes when run on a very large machine
 !!!!!!! because the records become too long
-  if (.false.) then
+  if ( myrank == 0 .and. NCHUNKS_VAL == 6 .and. NPROCTOT_VAL < 500 ) then
+    write(IMAIN,*)
     write(IMAIN,*) 'Spatial distribution of the slices'
     do iproc_xi = NPROC_XI_VAL-1, 0, -1
       write(IMAIN,'(20x)',advance='no')
@@ -431,7 +453,10 @@
     enddo
     write(IMAIN, *) ' '
   endif
-
+  
+  ! synchronizes processes
+  call sync_all()
+  
   ! determine chunk number and local slice coordinates using addressing
   ichunk = ichunk_slice(myrank)
   iproc_xi = iproc_xi_slice(myrank)
@@ -482,6 +507,9 @@
      NGLOB2DMAX_YMIN_YMAX(IREGION_CRUST_MANTLE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_CRUST_MANTLE), &
      NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT_VAL,NPROC_XI_VAL,NPROC_ETA_VAL,LOCAL_PATH,NCHUNKS_VAL)
 
+  ! synchronizes processes
+  call sync_all()
+
   ! outer core
   call read_arrays_buffers_solver(IREGION_OUTER_CORE,myrank, &
      iboolleft_xi_outer_core,iboolright_xi_outer_core,iboolleft_eta_outer_core,iboolright_eta_outer_core, &
@@ -494,6 +522,9 @@
      NGLOB2DMAX_YMIN_YMAX(IREGION_OUTER_CORE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_OUTER_CORE), &
      NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT_VAL,NPROC_XI_VAL,NPROC_ETA_VAL,LOCAL_PATH,NCHUNKS_VAL)
 
+  ! synchronizes processes
+  call sync_all()
+
   ! inner core
   call read_arrays_buffers_solver(IREGION_INNER_CORE,myrank, &
      iboolleft_xi_inner_core,iboolright_xi_inner_core,iboolleft_eta_inner_core,iboolright_eta_inner_core, &
@@ -506,7 +537,9 @@
      NGLOB2DMAX_YMIN_YMAX(IREGION_INNER_CORE),NGLOB2DMAX_XY,NGLOB1D_RADIAL(IREGION_INNER_CORE), &
      NUMMSGS_FACES,NCORNERSCHUNKS,NPROCTOT_VAL,NPROC_XI_VAL,NPROC_ETA_VAL,LOCAL_PATH,NCHUNKS_VAL)
 
-
+  ! synchronizes processes
+  call sync_all()
+  
   end subroutine read_mesh_databases_addressing
 
 
@@ -620,6 +653,11 @@
   integer njunk1,njunk2,njunk3
   character(len=150) prname
 
+  ! user output
+  if( myrank == 0 ) write(IMAIN,*) 'reading coupling surfaces...'
+  
+  ! synchronizes processes
+  call sync_all()
 
   ! crust and mantle
   ! create name of database
@@ -658,7 +696,10 @@
   read(27) jacobian2D_top_crust_mantle
   close(27)
 
+  ! synchronizes processes
+  call sync_all()
 
+
   ! read parameters to couple fluid and solid regions
   !
   ! outer core
@@ -700,6 +741,8 @@
   read(27) jacobian2D_top_outer_core
   close(27)
 
+  ! synchronizes processes
+  call sync_all()
 
   !
   ! inner core
@@ -727,6 +770,8 @@
   read(27) ibelm_top_inner_core
   close(27)
 
+  ! synchronizes processes
+  call sync_all()
 
   ! -- Boundary Mesh for crust and mantle ---
   if (SAVE_BOUNDARY_MESH .and. SIMULATION_TYPE == 3) then
@@ -756,6 +801,9 @@
     moho_kl = 0.; d400_kl = 0.; d670_kl = 0.; cmb_kl = 0.; icb_kl = 0.
   endif
 
+  ! synchronizes processes
+  call sync_all()
+  
   end subroutine read_mesh_databases_coupling
 
 
@@ -820,7 +868,13 @@
   integer(kind=8) :: filesize
   character(len=150) prname
 
+  ! user output
+  if( myrank == 0 ) write(IMAIN,*) 'reading Stacey boundaries...'
+  
+  ! synchronizes processes
+  call sync_all()
 
+
   ! crust and mantle
   ! create name of database
   call create_name_database(prname,myrank,IREGION_CRUST_MANTLE,LOCAL_PATH)
@@ -945,8 +999,12 @@
     endif
   endif
 
+  ! synchronizes processes
+  call sync_all()
 
+
   ! outer core
+
   ! create name of database
   call create_name_database(prname,myrank,IREGION_OUTER_CORE,LOCAL_PATH)
 
@@ -1099,4 +1157,7 @@
     endif
   endif
 
+  ! synchronizes processes
+  call sync_all()
+  
   end subroutine read_mesh_databases_stacey

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/specfem3D.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/specfem3D.F90	2012-05-07 17:22:04 UTC (rev 20047)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/specfem3D.F90	2012-05-07 20:37:29 UTC (rev 20048)
@@ -1057,7 +1057,7 @@
               ibool_inner_core,idoubling_inner_core,ispec_is_tiso_inner_core, &
               is_on_a_slice_edge_inner_core,rmass_inner_core, &
               ABSORBING_CONDITIONS,LOCAL_PATH)
-
+  
   ! read 2-D addressing for summation between slices with MPI
   call read_mesh_databases_addressing(myrank, &
               iboolleft_xi_crust_mantle,iboolright_xi_crust_mantle, &



More information about the CIG-COMMITS mailing list