[cig-commits] [commit] devel: added a stop statement in src/auxiliaries/smooth_vol_data.f90 and commented out all MPI statements because they break the build system; developers should use routines in src/shared/parallel.f90 only instead of calling MPI directly (6235d33)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Sat Apr 26 06:40:08 PDT 2014


Repository : ssh://geoshell/specfem3d

On branch  : devel
Link       : https://github.com/geodynamics/specfem3d/compare/aa19d657e7ebf41f81216672453c6411d6ab3fdf...251b0d9bed902304e83f0a37ffdc0a1376e3780d

>---------------------------------------------------------------

commit 6235d334be0497735126f24d0b7816dd5f5111c0
Author: Dimitri Komatitsch <komatitsch at lma.cnrs-mrs.fr>
Date:   Sat Apr 26 15:37:33 2014 +0200

    added a stop statement in src/auxiliaries/smooth_vol_data.f90 and commented out all MPI statements because they break the build system; developers should use routines in src/shared/parallel.f90 only instead of calling MPI directly


>---------------------------------------------------------------

6235d334be0497735126f24d0b7816dd5f5111c0
 src/auxiliaries/smooth_vol_data.f90 | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/src/auxiliaries/smooth_vol_data.f90 b/src/auxiliaries/smooth_vol_data.f90
index 315dbe3..95f87c4 100644
--- a/src/auxiliaries/smooth_vol_data.f90
+++ b/src/auxiliaries/smooth_vol_data.f90
@@ -60,7 +60,7 @@ program smooth_vol_data
 ! NOTE:  smoothing can be different in vertical & horizontal directions; mesh is in Cartesian geometry.
 !              algorithm uses vertical as Z, horizontal as X/Y direction
 
-  use :: mpi
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  use :: mpi
 
   implicit none
   include "constants.h"
@@ -144,13 +144,15 @@ program smooth_vol_data
   integer :: myrank,sizeprocs,rcl
 !------------------
 
+  stop 'DK DK program src/auxiliaries/smooth_vol_data.f90 currently does not work because some modifications broke the build system'
+
   ! initialize the MPI communicator and start the NPROCTOT MPI processes
-  call MPI_INIT(ier)
-  call MPI_COMM_SIZE(MPI_COMM_WORLD,sizeprocs,ier)
-  call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call MPI_INIT(ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call MPI_COMM_SIZE(MPI_COMM_WORLD,sizeprocs,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ier)
 
   if (myrank == 0) print*,"smooth_vol_data:"
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call mpi_barrier(MPI_COMM_WORLD,ier)
 
   ! reads arguments
   do i = 1, 5
@@ -238,7 +240,7 @@ program smooth_vol_data
     endif
     call exit_mpi(myrank,'Error total number of slices')
   endif
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call mpi_barrier(MPI_COMM_WORLD,ier)
 
   ! GLL points weights
   call zwgljd(xigll,wxgll,NGLLX,GAUSSALPHA,GAUSSBETA)
@@ -565,7 +567,7 @@ program smooth_vol_data
   node_list(num_interfaces_ext_mesh+1) = myrank
 
   ! synchronizes
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call mpi_barrier(MPI_COMM_WORLD,ier)
 
 
 !----------------------
@@ -772,13 +774,13 @@ program smooth_vol_data
   deallocate(dat_smooth)
 
   ! synchronizes
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call mpi_barrier(MPI_COMM_WORLD,ier)
 
   ! the maximum value for the smoothed kernel
   norm = max_old
-  call mpi_reduce(norm,max_old,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call mpi_reduce(norm,max_old,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
   norm = max_new
-  call mpi_reduce(norm,max_new,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call mpi_reduce(norm,max_new,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
   if( myrank == 0 ) then
     print *
     print *,'  Maximum data value before smoothing = ', max_old
@@ -787,7 +789,7 @@ program smooth_vol_data
   endif
 
   ! stop all the MPI processes, and exit
-  call MPI_FINALIZE(ier)
+!!!! DK DK this breaks the build system, use routines in src/shared/parallel.f90 instead  call MPI_FINALIZE(ier)
 
 end program smooth_vol_data
 



More information about the CIG-COMMITS mailing list