[cig-commits] [commit] QA: Fix serial build for optional programs. (2800b40)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Mon Jan 20 07:11:49 PST 2014


Repository : ssh://geoshell/specfem3d

On branch  : QA
Link       : https://github.com/geodynamics/specfem3d/compare/4359ed56c14ef8f87387f46c8b705a1d395a40ba...8e7fca259d3e520c105549cfacfb92b6c83f8971

>---------------------------------------------------------------

commit 2800b409b1059f19fa22c5b8a8c7b04399fb4c44
Author: Elliott Sales de Andrade <esalesde at physics.utoronto.ca>
Date:   Sat Jan 18 16:07:22 2014 -0500

    Fix serial build for optional programs.


>---------------------------------------------------------------

2800b409b1059f19fa22c5b8a8c7b04399fb4c44
 src/shared/combine_vol_data.f90 |  10 ++--
 src/shared/smooth_vol_data.f90  |  25 ++++----
 src/shared/sum_kernels.f90      |  39 +++++--------
 src/specfem3D/Makefile.in       |  14 ++---
 src/specfem3D/model_update.f90  | 124 +++++++++++++++++++---------------------
 5 files changed, 94 insertions(+), 118 deletions(-)

diff --git a/src/shared/combine_vol_data.f90 b/src/shared/combine_vol_data.f90
index cee2157..13693da 100644
--- a/src/shared/combine_vol_data.f90
+++ b/src/shared/combine_vol_data.f90
@@ -57,7 +57,6 @@
 !
 ! works for external, unregular meshes
 
-  use mpi
   use combine_vol_data_mod
   use combine_vol_data_adios_mod
 
@@ -114,7 +113,6 @@
              ADIOS_FOR_FORWARD_ARRAYS, ADIOS_FOR_KERNELS
 
   ! Variables to read ADIOS files
-  integer :: mpier
   integer :: sizeprocs, sel_num
   character(len=256) :: var_name , value_file_name, mesh_file_name
   integer(kind=8) :: value_handle, mesh_handle
@@ -123,11 +121,11 @@
   integer :: ibool_offset, x_global_offset
   integer(kind=8), dimension(1) :: start, count_ad
 
-  call MPI_Init(ier)
-  call MPI_Comm_size(MPI_COMM_WORLD, sizeprocs, ier)
+  call init()
+  call world_size(sizeprocs)
   if (sizeprocs .ne. 1) then
     print *, "sequential program. Only mpirun -np 1 ..."
-    call MPI_Abort(MPI_COMM_WORLD, ier, mpier)
+    call stop_all()
   endif
 
 ! checks given arguments
@@ -402,7 +400,7 @@
     call clean_adios(mesh_handle, value_handle)
   endif
 
-  call MPI_Finalize(ier)
+  call finalize()
 
   print *, 'Done writing '//trim(mesh_file)
 
diff --git a/src/shared/smooth_vol_data.f90 b/src/shared/smooth_vol_data.f90
index e4012c6..67e467c 100644
--- a/src/shared/smooth_vol_data.f90
+++ b/src/shared/smooth_vol_data.f90
@@ -60,11 +60,8 @@ program smooth_vol_data
 ! NOTE:  smoothing can be different in vertical & horizontal directions; mesh is in Cartesian geometry.
 !              algorithm uses vertical as Z, horizontal as X/Y direction
 
-  use :: mpi
-
   implicit none
   include "constants.h"
-  include "precision.h"
 
  ! data must be of dimension: (NGLLX,NGLLY,NGLLZ,NSPEC_AB)
   real(kind=CUSTOM_REAL), dimension(:,:,:,:),allocatable :: dat,dat_smooth
@@ -143,12 +140,12 @@ program smooth_vol_data
 !------------------
 
   ! initialize the MPI communicator and start the NPROCTOT MPI processes
-  call MPI_INIT(ier)
-  call MPI_COMM_SIZE(MPI_COMM_WORLD,sizeprocs,ier)
-  call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ier)
+  call init()
+  call world_size(sizeprocs)
+  call world_rank(myrank)
 
   if (myrank == 0) print*,"smooth_vol_data:"
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
   ! reads arguments
   do i = 1, 5
@@ -237,7 +234,7 @@ program smooth_vol_data
     endif
     call exit_mpi(myrank,'Error total number of slices')
   endif
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
   ! GLL points weights
   call zwgljd(xigll,wxgll,NGLLX,GAUSSALPHA,GAUSSBETA)
@@ -526,7 +523,7 @@ program smooth_vol_data
   !enddo
 
   ! synchronizes
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
 
 !----------------------
@@ -735,13 +732,13 @@ program smooth_vol_data
   deallocate(dat_smooth)
 
   ! synchronizes
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
   ! the maximum value for the smoothed kernel
   norm = max_old
-  call mpi_reduce(norm,max_old,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call max_all_cr(norm, max_old)
   norm = max_new
-  call mpi_reduce(norm,max_new,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call max_all_cr(norm, max_new)
   if( myrank == 0 ) then
     print *
     print *,'  Maximum data value before smoothing = ', max_old
@@ -749,8 +746,8 @@ program smooth_vol_data
     print *
   endif
 
-  ! stop all the MPI processes, and exit
-  call MPI_FINALIZE(ier)
+  ! stop all the processes, and exit
+  call finalize()
 
 end program smooth_vol_data
 
diff --git a/src/shared/sum_kernels.f90 b/src/shared/sum_kernels.f90
index 20a2ac4..0f8532d 100644
--- a/src/shared/sum_kernels.f90
+++ b/src/shared/sum_kernels.f90
@@ -89,16 +89,12 @@ end module sum_par
 
 program sum_kernels
 
-  use :: mpi
   use sum_par
   implicit none
 
-  include 'precision.h'
-
-
   character(len=150) :: kernel_list(MAX_NUM_NODES)
   character(len=150) :: sline, kernel_name,prname_lp
-  integer :: nker, myrank, sizeprocs,  ier
+  integer :: nker, myrank, sizeprocs
   integer :: ios
 
   double precision :: DT
@@ -119,16 +115,16 @@ program sum_kernels
 
   ! ============ program starts here =====================
   ! initialize the MPI communicator and start the NPROCTOT MPI processes
-  call MPI_INIT(ier)
-  call MPI_COMM_SIZE(MPI_COMM_WORLD,sizeprocs,ier)
-  call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ier)
+  call init()
+  call world_size(sizeprocs)
+  call world_rank(myrank)
 
   if(myrank==0) then
     write(*,*) 'sum_preconditioned_kernels:'
     write(*,*)
     write(*,*) 'reading kernel list: '
   endif
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
   ! reads in event list
   nker=0
@@ -175,7 +171,7 @@ program sum_kernels
     endif
     call exit_mpi(myrank,'Error total number of slices')
   endif
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
   ! reads mesh file
   !
@@ -185,7 +181,7 @@ program sum_kernels
   write(prname_lp,'(a,i6.6,a)') trim(LOCAL_PATH)//'/proc',myrank,'_'//'external_mesh.bin'
   open(unit=27,file=trim(prname_lp),&
           status='old',action='read',form='unformatted',iostat=ios)
-  if( ier /= 0 ) then
+  if( ios /= 0 ) then
     print*,'error: could not open database '
     print*,'path: ',trim(prname_lp)
     call exit_mpi(myrank, 'error reading external mesh file')
@@ -206,7 +202,7 @@ program sum_kernels
   endif
 
   ! synchronizes
-  call mpi_barrier(MPI_COMM_WORLD,ier)
+  call sync_all()
 
   ! sums up kernels
   if( USE_ISO_KERNELS ) then
@@ -258,8 +254,8 @@ program sum_kernels
 
   if(myrank==0) write(*,*) 'done writing all kernels'
 
-  ! stop all the MPI processes, and exit
-  call MPI_FINALIZE(ier)
+  ! stop all the processes, and exit
+  call finalize()
 
 end program sum_kernels
 
@@ -269,15 +265,12 @@ end program sum_kernels
 
 subroutine sum_kernel_pre(kernel_name,kernel_list,nker,myrank)
 
-  use :: mpi
   use sum_par
   implicit none
 
-  include 'precision.h'
-
   real(kind=CUSTOM_REAL) :: norm,norm_sum
   character(len=150) :: kernel_name,kernel_list(MAX_NUM_NODES)
-  integer :: nker,myrank,ier
+  integer :: nker,myrank
 
   ! local parameters
   character(len=150) :: k_file
@@ -327,7 +320,7 @@ subroutine sum_kernel_pre(kernel_name,kernel_list,nker,myrank)
 
     ! outputs norm of kernel
     norm = sum( kernel * kernel )
-    call mpi_reduce(norm,norm_sum,1,CUSTOM_MPI_TYPE,MPI_SUM,0,MPI_COMM_WORLD,ier)
+    call sum_all_cr(norm, norm_sum)
     if( myrank == 0 ) then
       print*,'  norm kernel: ',sqrt(norm_sum)
     endif
@@ -348,7 +341,7 @@ subroutine sum_kernel_pre(kernel_name,kernel_list,nker,myrank)
 
     ! outputs norm of preconditioner
     norm = sum( hess * hess )
-    call mpi_reduce(norm,norm_sum,1,CUSTOM_MPI_TYPE,MPI_SUM,0,MPI_COMM_WORLD,ier)
+    call sum_all_cr(norm, norm_sum)
     if( myrank == 0 ) then
       print*,'  norm preconditioner: ',sqrt(norm_sum)
       print*
@@ -440,25 +433,21 @@ subroutine invert_hess( myrank,hess_matrix )
 ! H_nn = \frac{ \partial^2 \chi }{ \partial \rho_n \partial \rho_n }
 ! on all GLL points, which are indexed (i,j,k,ispec)
 
-  use :: mpi
   use sum_par
   implicit none
 
-  include 'precision.h'
-
   integer :: myrank
   real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLY,NGLLZ,NSPEC_AB) :: &
     hess_matrix
 
   ! local parameters
   real(kind=CUSTOM_REAL) :: maxh,maxh_all
-  integer :: ier
 
   ! maximum value of hessian
   maxh = maxval( abs(hess_matrix) )
 
   ! determines maximum from all slices on master
-  call mpi_allreduce(maxh,maxh_all,1,CUSTOM_MPI_TYPE,MPI_MAX,MPI_COMM_WORLD,ier)
+  call max_all_all_cr(maxh, maxh_all)
 
   ! user output
   if( myrank == 0 ) then
diff --git a/src/specfem3D/Makefile.in b/src/specfem3D/Makefile.in
index 497801a..02979b3 100644
--- a/src/specfem3D/Makefile.in
+++ b/src/specfem3D/Makefile.in
@@ -396,13 +396,13 @@ $E/xconvolve_source_timefunction: $O/convolve_source_timefunction.shared.o
 	${FCCOMPILE_CHECK} -o  ${E}/xconvolve_source_timefunction $O/convolve_source_timefunction.shared.o
 
 @COND_PYRE_FALSE@$E/xcreate_movie_shakemap_AVS_DX_GMT: $O/create_movie_shakemap_AVS_DX_GMT.shared.o $(LIBSPECFEM) $(OUTPUT)/surface_from_mesher.h
- at COND_PYRE_FALSE@	${FCCOMPILE_CHECK} -o  ${E}/xcreate_movie_shakemap_AVS_DX_GMT $O/create_movie_shakemap_AVS_DX_GMT.shared.o $(LIBSPECFEM) -I$(OUTPUT)
+ at COND_PYRE_FALSE@	${FCLINK} -o  ${E}/xcreate_movie_shakemap_AVS_DX_GMT $O/create_movie_shakemap_AVS_DX_GMT.shared.o $(LIBSPECFEM) -I$(OUTPUT)
 
-$E/xcombine_vol_data: $(COMBINE_VOL_DATA_OBJECTS)
-	${MPIFCCOMPILE_CHECK} -o  ${E}/xcombine_vol_data $(COMBINE_VOL_DATA_OBJECTS) $(ADIOS_INC) $(ADIOS_LINK)
+$E/xcombine_vol_data: $(COMBINE_VOL_DATA_OBJECTS) $(COND_MPI_OBJECTS)
+	${FCLINK} -o  ${E}/xcombine_vol_data $(COMBINE_VOL_DATA_OBJECTS) $(ADIOS_LINK) $(COND_MPI_OBJECTS) $(MPILIBS)
 
 $E/xcombine_surf_data: $O/combine_surf_data.shared.o $O/write_c_binary.cc.o $O/param_reader.cc.o
-	${FCCOMPILE_CHECK} -o  ${E}/xcombine_surf_data  $O/combine_surf_data.shared.o $O/write_c_binary.cc.o $O/param_reader.cc.o
+	${FCLINK} -o  ${E}/xcombine_surf_data  $O/combine_surf_data.shared.o $O/write_c_binary.cc.o $O/param_reader.cc.o
 
 $E/xsmooth_vol_data: $O/smooth_vol_data.o $O/read_parameter_file.shared.o $O/read_value_parameters.shared.o $O/get_value_parameters.shared.o $O/param_reader.cc.o $O/gll_library.shared.o $O/exit_mpi.shared.o $(COND_MPI_OBJECTS)
 	${FCLINK} -o  ${E}/xsmooth_vol_data  $O/smooth_vol_data.o $O/read_parameter_file.shared.o $O/read_value_parameters.shared.o $O/get_value_parameters.shared.o $O/param_reader.cc.o $O/gll_library.shared.o $O/exit_mpi.shared.o $(COND_MPI_OBJECTS) $(MPILIBS)
@@ -612,10 +612,10 @@ $O/serial.o: $(SHARED)constants.h $(SHARED)serial.f90
 ##
 
 $O/smooth_vol_data.o: $(SHARED)constants.h $(SHARED)smooth_vol_data.f90
-	${MPIFCCOMPILE_CHECK} -c -o $O/smooth_vol_data.o $(SHARED)smooth_vol_data.f90
+	${FCCOMPILE_CHECK} -c -o $O/smooth_vol_data.o $(SHARED)smooth_vol_data.f90
 
 $O/sum_kernels.o: $(SHARED)constants.h $(SHARED)sum_kernels.f90
-	${MPIFCCOMPILE_CHECK} -c -o $O/sum_kernels.o $(SHARED)sum_kernels.f90
+	${FCCOMPILE_CHECK} -c -o $O/sum_kernels.o $(SHARED)sum_kernels.f90
 
 
 #######################################
@@ -625,7 +625,7 @@ $O/sum_kernels.o: $(SHARED)constants.h $(SHARED)sum_kernels.f90
 ##
 
 $O/model_update.o: $(SHARED)constants.h model_update.f90
-	${MPIFCCOMPILE_CHECK} -c -o $O/model_update.o model_update.f90
+	${FCCOMPILE_CHECK} -c -o $O/model_update.o model_update.f90
 
 $O/save_external_bin_m_up.o:  $(SHARED)constants.h save_external_bin_m_up.f90
 	${FCCOMPILE_CHECK} -c -o $O/save_external_bin_m_up.o save_external_bin_m_up.f90
diff --git a/src/specfem3D/model_update.f90 b/src/specfem3D/model_update.f90
index 1e16fb1..8198a43 100644
--- a/src/specfem3D/model_update.f90
+++ b/src/specfem3D/model_update.f90
@@ -27,15 +27,12 @@
 
 program model_update
 
-  use :: mpi
   use specfem_par
   use specfem_par_elastic
   use specfem_par_acoustic
   use specfem_par_poroelastic
   implicit none
 
-  include 'precision.h'
-
   ! ======================================================
   ! USER PARAMETERS
 
@@ -143,10 +140,9 @@ program model_update
 
   ! ============ program starts here =====================
   ! initialize the MPI communicator and start the NPROCTOT MPI processes
-  call MPI_INIT(ier)
-  call MPI_COMM_SIZE(MPI_COMM_WORLD,sizeprocs,ier)
-  call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ier)
-
+  call init()
+  call world_size(sizeprocs)
+  call world_rank(myrank)
 
   ! subjective step length to multiply to the gradient
   ! e.g. step_fac = 0.03
@@ -304,12 +300,12 @@ program model_update
   ! compute minmax values of current model
   ! NOTE: mpi_reduce operates on the values from all procs,
   !       but the reduced value only exists on the root proc.
-  call mpi_reduce(minval(model_vs(:,:,:,1:nspec)), vsmin_before, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_vs(:,:,:,1:nspec)), vsmax_before, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_vp(:,:,:,1:nspec)), vpmin_before, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_vp(:,:,:,1:nspec)), vpmax_before, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_rho(:,:,:,1:nspec)), rhomin_before,1,CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_rho(:,:,:,1:nspec)), rhomax_before,1,CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
+  call min_all_cr(minval(model_vs(:,:,:,1:nspec)), vsmin_before)
+  call max_all_cr(maxval(model_vs(:,:,:,1:nspec)), vsmax_before)
+  call min_all_cr(minval(model_vp(:,:,:,1:nspec)), vpmin_before)
+  call max_all_cr(maxval(model_vp(:,:,:,1:nspec)), vpmax_before)
+  call min_all_cr(minval(model_rho(:,:,:,1:nspec)), rhomin_before)
+  call max_all_cr(maxval(model_rho(:,:,:,1:nspec)), rhomax_before)
 
 
   if( PRINT_OUT_FILES ) then
@@ -353,12 +349,12 @@ program model_update
      enddo
 
      ! compute minmax values of the thresholded current model
-     call mpi_reduce(minval(model_vs(:,:,:,1:nspec)), vsmin_after, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-     call mpi_reduce(maxval(model_vs(:,:,:,1:nspec)), vsmax_after, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-     call mpi_reduce(minval(model_vp(:,:,:,1:nspec)), vpmin_after, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-     call mpi_reduce(maxval(model_vp(:,:,:,1:nspec)), vpmax_after, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-!      call mpi_reduce(minval(model_rho(:,:,:,1:nspec)), rhomin_after,1,CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-!      call mpi_reduce(maxval(model_rho(:,:,:,1:nspec)), rhomax_after,1,CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
+     call min_all_cr(minval(model_vs(:,:,:,1:nspec)), vsmin_after)
+     call max_all_cr(maxval(model_vs(:,:,:,1:nspec)), vsmax_after)
+     call min_all_cr(minval(model_vp(:,:,:,1:nspec)), vpmin_after)
+     call max_all_cr(maxval(model_vp(:,:,:,1:nspec)), vpmax_after)
+!      call min_all_cr(minval(model_rho(:,:,:,1:nspec)), rhomin_after)
+!      call max_all_cr(maxval(model_rho(:,:,:,1:nspec)), rhomax_after)
 
 
     if( PRINT_OUT_FILES ) then
@@ -463,13 +459,12 @@ program model_update
   endif
 
   ! statistics
-  call mpi_reduce(minval(kernel_a),min_vp_k,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(kernel_a),max_vp_k,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(kernel_b),min_vs_k,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(kernel_b),max_vs_k,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(kernel_rho),min_rho_k,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(kernel_rho),max_rho_k,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-
+  call min_all_cr(minval(kernel_a), min_vp_k)
+  call max_all_cr(maxval(kernel_a), max_vp_k)
+  call min_all_cr(minval(kernel_b), min_vs_k)
+  call max_all_cr(maxval(kernel_b), max_vs_k)
+  call min_all_cr(minval(kernel_rho), min_rho_k)
+  call max_all_cr(maxval(kernel_rho), max_rho_k)
 
   if( PRINT_OUT_FILES ) then
    if (myrank == 0) then
@@ -514,13 +509,12 @@ program model_update
   enddo
 
   ! statistics
-  call mpi_reduce(minval(model_dA),min_vp_g,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_dA),max_vp_g,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_dB),min_vs_g,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_dB),max_vs_g,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_dR),min_rho_g,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_dR),max_rho_g,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-
+  call min_all_cr(minval(model_dA), min_vp_g)
+  call max_all_cr(maxval(model_dA), max_vp_g)
+  call min_all_cr(minval(model_dB), min_vs_g)
+  call max_all_cr(maxval(model_dB), max_vs_g)
+  call min_all_cr(minval(model_dR), min_rho_g)
+  call max_all_cr(maxval(model_dR), max_rho_g)
 
   if( PRINT_OUT_FILES ) then
    if (myrank == 0) then
@@ -556,7 +550,7 @@ program model_update
     print*,'  step length : ',step_length,max
     print*
   endif
-  call mpi_bcast(step_length,1,CUSTOM_MPI_TYPE,0,MPI_COMM_WORLD,ier)
+  call bcast_all_cr(step_length, 1)
 
 
   !---------------------------------------------------------------------------------------------
@@ -571,10 +565,9 @@ program model_update
   max_rho = sqrt(max_rho)
 
   ! statistics
-  call mpi_reduce(max_vp,vp_sum,1,CUSTOM_MPI_TYPE,MPI_SUM,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(max_vs,vs_sum,1,CUSTOM_MPI_TYPE,MPI_SUM,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(max_rho,rho_sum,1,CUSTOM_MPI_TYPE,MPI_SUM,0,MPI_COMM_WORLD,ier)
-
+  call sum_all_cr(max_vp, vp_sum)
+  call sum_all_cr(max_vs, vs_sum)
+  call sum_all_cr(max_rho, rho_sum)
 
   if( PRINT_OUT_FILES ) then
    if (myrank == 0) then
@@ -604,12 +597,12 @@ program model_update
 
 
   ! statistics
-  call mpi_reduce(minval(model_dA),min_vp,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_dA),max_vp,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_dB),min_vs,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_dB),max_vs,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_dR),min_rho,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_dR),max_rho,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
+  call min_all_cr(minval(model_dA), min_vp)
+  call max_all_cr(maxval(model_dA), max_vp)
+  call min_all_cr(minval(model_dB), min_vs)
+  call max_all_cr(maxval(model_dB), max_vs)
+  call min_all_cr(minval(model_dR), min_rho)
+  call max_all_cr(maxval(model_dR), max_rho)
 
 
   if( PRINT_OUT_FILES ) then
@@ -655,15 +648,14 @@ program model_update
   model_rho_new = 0._CUSTOM_REAL
   model_rho_new = model_rho * exp( model_dR )
 
-  ! statistcs
+  ! statistics
   ! compute minmax values of new model
-  call mpi_reduce(minval(model_vs_new(:,:,:,1:nspec)), vsmin_new_before, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_vs_new(:,:,:,1:nspec)), vsmax_new_before, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_vp_new(:,:,:,1:nspec)), vpmin_new_before, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_vp_new(:,:,:,1:nspec)), vpmax_new_before, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_rho_new(:,:,:,1:nspec)),rhomin_new_before,1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_rho_new(:,:,:,1:nspec)),rhomax_new_before,1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-
+  call min_all_cr(minval(model_vs_new(:,:,:,1:nspec)), vsmin_new_before)
+  call max_all_cr(maxval(model_vs_new(:,:,:,1:nspec)), vsmax_new_before)
+  call min_all_cr(minval(model_vp_new(:,:,:,1:nspec)), vpmin_new_before)
+  call max_all_cr(maxval(model_vp_new(:,:,:,1:nspec)), vpmax_new_before)
+  call min_all_cr(minval(model_rho_new(:,:,:,1:nspec)), rhomin_new_before)
+  call max_all_cr(maxval(model_rho_new(:,:,:,1:nspec)), rhomax_new_before)
 
   if( PRINT_OUT_FILES ) then
 !    if (myrank == 0) then
@@ -711,12 +703,12 @@ program model_update
 
   ! write out new models and their global minmax values
   ! compute minmax values of new model
-  call mpi_reduce(minval(model_vs_new(:,:,:,1:nspec)), vsmin_new_after, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_vs_new(:,:,:,1:nspec)), vsmax_new_after, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_vp_new(:,:,:,1:nspec)), vpmin_new_after, 1, CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_vp_new(:,:,:,1:nspec)), vpmax_new_after, 1, CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_rho_new(:,:,:,1:nspec)), rhomin_new_after,1,CUSTOM_MPI_TYPE, MPI_MIN, 0, MPI_COMM_WORLD,ier)
-  call mpi_reduce(maxval(model_rho_new(:,:,:,1:nspec)), rhomax_new_after,1,CUSTOM_MPI_TYPE, MPI_MAX, 0, MPI_COMM_WORLD,ier)
+  call min_all_cr(minval(model_vs_new(:,:,:,1:nspec)), vsmin_new_after)
+  call max_all_cr(maxval(model_vs_new(:,:,:,1:nspec)), vsmax_new_after)
+  call min_all_cr(minval(model_vp_new(:,:,:,1:nspec)), vpmin_new_after)
+  call max_all_cr(maxval(model_vp_new(:,:,:,1:nspec)), vpmax_new_after)
+  call min_all_cr(minval(model_rho_new(:,:,:,1:nspec)), rhomin_new_after)
+  call max_all_cr(maxval(model_rho_new(:,:,:,1:nspec)), rhomax_new_after)
 
 
   ! this should only be different if using MINMAX_THRESHOLD_NEW
@@ -1030,8 +1022,8 @@ program model_update
   open(12,file=trim(m_file),form='unformatted',action='write')
   write(12) model_vp_rel(:,:,:,1:nspec)
   close(12)
-  call mpi_reduce(maxval(model_vp_rel),max_vp,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_vp_rel),min_vp,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
+  call min_all_cr(minval(model_vp_rel), min_vp)
+  call max_all_cr(maxval(model_vp_rel), max_vp)
 
   ! relative S model perturbations
   where ( model_vs > 1.e-10 ) model_vs_rel = ( model_vs_new - model_vs) / model_vs
@@ -1039,8 +1031,8 @@ program model_update
   open(12,file=trim(m_file),form='unformatted',action='write')
   write(12) model_vs_rel(:,:,:,1:nspec)
   close(12)
-  call mpi_reduce(maxval(model_vs_rel),max_vs,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-  call mpi_reduce(minval(model_vs_rel),min_vs,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
+  call min_all_cr(minval(model_vs_rel), min_vs)
+  call max_all_cr(maxval(model_vs_rel), max_vs)
 
   ! relative rho model perturbations
 !   where ( model_rho > 1.e-10 ) model_rho_rel = ( model_rho_new - model_rho) / model_rho
@@ -1048,8 +1040,8 @@ program model_update
 !   open(12,file=trim(m_file),form='unformatted',action='write')
 !   write(12) model_rho_rel(:,:,:,1:nspec)
 !   close(12)
-!   call mpi_reduce(maxval(model_rho_rel),max_rho,1,CUSTOM_MPI_TYPE,MPI_MAX,0,MPI_COMM_WORLD,ier)
-!   call mpi_reduce(minval(model_rho_rel),min_rho,1,CUSTOM_MPI_TYPE,MPI_MIN,0,MPI_COMM_WORLD,ier)
+!   call min_all_cr(minval(model_rho_rel), min_rho)
+!   call max_all_cr(maxval(model_rho_rel), max_rho)
 
 
   if( PRINT_OUT_FILES ) then
@@ -1144,7 +1136,7 @@ program model_update
 
   !-----------------------------------------------------
 
-  ! stop all the MPI processes, and exit
-  call MPI_FINALIZE(ier)
+  ! stop all the processes, and exit
+  call finalize()
 
 end program model_update



More information about the CIG-COMMITS mailing list