[cig-commits] r19112 - seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D

dkomati1 at geodynamics.org dkomati1 at geodynamics.org
Mon Oct 24 05:50:49 PDT 2011


Author: dkomati1
Date: 2011-10-24 05:50:49 -0700 (Mon, 24 Oct 2011)
New Revision: 19112

Modified:
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_central_cube.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90
Log:
fixed a tiny bug on IBM BlueGene: expressions such as call MPI_IRECV(buffer_all_cube_from_slices(:,:,nb_msgs_theor_in_cube) were not recognized as pointing to a contiguous block of memory, thus a temporary copy was made by the compiler, resulting in incorrect results for non blocking MPI calls. Changed that to call MPI_IRECV(buffer_all_cube_from_slices(1,1,nb_msgs_theor_in_cube) to force the compiler to use a pointer to the beginning of the memory block instead of making a temporary copy.


Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_central_cube.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_central_cube.f90	2011-10-21 20:36:09 UTC (rev 19111)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_central_cube.f90	2011-10-24 12:50:49 UTC (rev 19112)
@@ -88,7 +88,7 @@
     do imsg = 1,nb_msgs_theor_in_cube-1
 ! receive buffers from slices
       sender = sender_from_slices_to_cube(imsg)
-      call MPI_IRECV(buffer_all_cube_from_slices(:,:,imsg), &
+      call MPI_IRECV(buffer_all_cube_from_slices(1,1,imsg), &
                 ndim_assemble*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,sender, &
                 itag,MPI_COMM_WORLD,request_receive_array(imsg),ier)
     enddo
@@ -153,7 +153,7 @@
 !       itag,buffer_slices2,ndim_assemble*npoin2D_cube_from_slices,&
 !       MPI_DOUBLE_PRECISION,sender,itag,MPI_COMM_WORLD,msg_status,ier)
 
-    call MPI_IRECV(buffer_all_cube_from_slices(:,:,nb_msgs_theor_in_cube), &
+    call MPI_IRECV(buffer_all_cube_from_slices(1,1,nb_msgs_theor_in_cube), &
         ndim_assemble*npoin2D_cube_from_slices,MPI_DOUBLE_PRECISION,sender,itag,MPI_COMM_WORLD,request_receive,ier)
 !! DK DK this merged with previous statement
 !   buffer_all_cube_from_slices(:,:,nb_msgs_theor_in_cube) = buffer_slices2(:,:)
@@ -273,7 +273,7 @@
     do imsg = 1,nb_msgs_theor_in_cube-1
 ! send buffers to slices
       receiver = sender_from_slices_to_cube(imsg)
-      call MPI_ISEND(buffer_all_cube_from_slices(:,:,imsg),ndim_assemble*npoin2D_cube_from_slices, &
+      call MPI_ISEND(buffer_all_cube_from_slices(1,1,imsg),ndim_assemble*npoin2D_cube_from_slices, &
               MPI_DOUBLE_PRECISION,receiver,itag,MPI_COMM_WORLD,request_send_array(imsg),ier)
     enddo
   endif

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90	2011-10-21 20:36:09 UTC (rev 19111)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90	2011-10-24 12:50:49 UTC (rev 19112)
@@ -331,7 +331,7 @@
   if(myrank==iprocto_faces(imsg)) then
     sender = iprocfrom_faces(imsg)
     npoin2D_chunks = npoin2D_faces(icount_faces)
-    call MPI_IRECV(buffer_received_faces_scalar(:,icount_faces), &
+    call MPI_IRECV(buffer_received_faces_scalar(1,icount_faces), &
               npoin2D_chunks,CUSTOM_MPI_TYPE,sender, &
               itag,MPI_COMM_WORLD,request_receive_array(icount_faces),ier)
 !   do ipoin2D=1,npoin2D_chunks
@@ -352,7 +352,7 @@
     do ipoin2D=1,npoin2D_chunks
       buffer_send_faces_scalar(ipoin2D,icount_faces) = array_val(iboolfaces(ipoin2D,icount_faces))
     enddo
-    call MPI_ISEND(buffer_send_faces_scalar(:,icount_faces),npoin2D_chunks, &
+    call MPI_ISEND(buffer_send_faces_scalar(1,icount_faces),npoin2D_chunks, &
               CUSTOM_MPI_TYPE,receiver,itag,MPI_COMM_WORLD,request_send_array(icount_faces),ier)
   endif
   enddo
@@ -406,7 +406,7 @@
   if(myrank==iprocfrom_faces(imsg)) then
     sender = iprocto_faces(imsg)
     npoin2D_chunks = npoin2D_faces(icount_faces)
-    call MPI_IRECV(buffer_received_faces_scalar(:,icount_faces), &
+    call MPI_IRECV(buffer_received_faces_scalar(1,icount_faces), &
               npoin2D_chunks,CUSTOM_MPI_TYPE,sender, &
               itag,MPI_COMM_WORLD,request_receive_array(icount_faces),ier)
 !   do ipoin2D=1,npoin2D_chunks
@@ -426,7 +426,7 @@
     do ipoin2D=1,npoin2D_chunks
       buffer_send_faces_scalar(ipoin2D,icount_faces) = array_val(iboolfaces(ipoin2D,icount_faces))
     enddo
-    call MPI_ISEND(buffer_send_faces_scalar(:,icount_faces),npoin2D_chunks, &
+    call MPI_ISEND(buffer_send_faces_scalar(1,icount_faces),npoin2D_chunks, &
               CUSTOM_MPI_TYPE,receiver,itag,MPI_COMM_WORLD,request_send_array(icount_faces),ier)
   endif
   enddo

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90	2011-10-21 20:36:09 UTC (rev 19111)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90	2011-10-24 12:50:49 UTC (rev 19112)
@@ -468,7 +468,7 @@
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
     npoin2D_chunks_all = npoin2D_faces_crust_mantle(icount_faces) + npoin2D_faces_inner_core(icount_faces)
 
-    call MPI_IRECV(buffer_received_faces_vector(:,:,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,sender, &
+    call MPI_IRECV(buffer_received_faces_vector(1,1,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,sender, &
               itag,MPI_COMM_WORLD,request_receive_array(icount_faces),ier)
 
 !   do ipoin2D = 1,npoin2D_faces_crust_mantle(icount_faces)
@@ -524,7 +524,7 @@
       buffer_send_faces_vector(3,ioffset + ipoin2D,icount_faces) = accel_inner_core(3,iboolfaces_inner_core(ipoin2D,icount_faces))
     enddo
 
-    call MPI_ISEND(buffer_send_faces_vector(:,:,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,receiver,itag, &
+    call MPI_ISEND(buffer_send_faces_vector(1,1,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,receiver,itag, &
                      MPI_COMM_WORLD,request_send_array(icount_faces),ier)
   endif
   enddo
@@ -602,7 +602,7 @@
 ! size of buffers is the sum of two sizes because we handle two regions in the same MPI call
     npoin2D_chunks_all = npoin2D_faces_crust_mantle(icount_faces) + npoin2D_faces_inner_core(icount_faces)
 
-    call MPI_IRECV(buffer_received_faces_vector(:,:,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,sender, &
+    call MPI_IRECV(buffer_received_faces_vector(1,1,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,sender, &
               itag,MPI_COMM_WORLD,request_receive_array(icount_faces),ier)
 
 !   do ipoin2D = 1,npoin2D_faces_crust_mantle(icount_faces)
@@ -652,7 +652,7 @@
       buffer_send_faces_vector(3,ioffset + ipoin2D,icount_faces) = accel_inner_core(3,iboolfaces_inner_core(ipoin2D,icount_faces))
     enddo
 
-    call MPI_ISEND(buffer_send_faces_vector(:,:,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,receiver,itag, &
+    call MPI_ISEND(buffer_send_faces_vector(1,1,icount_faces),NDIM*npoin2D_chunks_all,CUSTOM_MPI_TYPE,receiver,itag, &
                      MPI_COMM_WORLD,request_send_array(icount_faces),ier)
   endif
   enddo



More information about the CIG-COMMITS mailing list