[cig-commits] [commit] QA: Use MPI_STATUS_IGNORE in MPI calls. (45deed9)
cig_noreply at geodynamics.org
cig_noreply at geodynamics.org
Mon Jan 20 11:49:35 PST 2014
Repository : ssh://geoshell/specfem2d
On branch : QA
Link : https://github.com/geodynamics/specfem2d/compare/28743f19b9f9fdb75d359c135053825c0ffd05b3...5e8aa55e68fd17b6f475fb65531b84195e497aa1
>---------------------------------------------------------------
commit 45deed9644a3d0e78631d92ec9d84898da5886d8
Author: Elliott Sales de Andrade <esalesde at physics.utoronto.ca>
Date: Mon Jan 6 20:21:02 2014 -0500
Use MPI_STATUS_IGNORE in MPI calls.
The status is not used, so this saves us an extra variable or two. As
suggested by @jedbrown in another PR.
>---------------------------------------------------------------
45deed9644a3d0e78631d92ec9d84898da5886d8
src/specfem2D/assemble_MPI.F90 | 18 +++++----------
src/specfem2D/checkgrid.F90 | 26 +++++++++------------
src/specfem2D/plotpost.F90 | 43 ++++++++++++++++-------------------
src/specfem2D/prepare_color_image.F90 | 5 ++--
src/specfem2D/specfem2D.F90 | 15 ++++++------
src/specfem2D/write_seismograms.F90 | 9 ++++----
6 files changed, 50 insertions(+), 66 deletions(-)
diff --git a/src/specfem2D/assemble_MPI.F90 b/src/specfem2D/assemble_MPI.F90
index e45da91..39f47be 100644
--- a/src/specfem2D/assemble_MPI.F90
+++ b/src/specfem2D/assemble_MPI.F90
@@ -100,7 +100,6 @@
2*max_ibool_interfaces_size_po, ninterface) :: &
buffer_send_faces_scalar, &
buffer_recv_faces_scalar
- integer, dimension(MPI_STATUS_SIZE) :: msg_status
integer, dimension(ninterface) :: msg_requests
buffer_send_faces_scalar(:,:) = 0.d0
@@ -162,7 +161,7 @@
2*nibool_interfaces_poroelastic(num_interface), &
MPI_DOUBLE_PRECISION, &
my_neighbours(num_interface), 11, &
- MPI_COMM_WORLD, msg_status(1), ier)
+ MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
ipoin = 0
do i = 1, nibool_interfaces_acoustic(num_interface)
@@ -255,7 +254,6 @@
! local parameters
integer :: ipoin, num_interface,iinterface,ier,iglob
- integer, dimension(MPI_STATUS_SIZE) :: status_acoustic
! initializes buffers
buffer_send_faces_vector_ac(:,:) = 0._CUSTOM_REAL
@@ -309,8 +307,8 @@
! waits for MPI requests to complete (recv)
! each wait returns once the specified MPI request completed
do iinterface = 1, ninterface_acoustic
- call MPI_Wait (tab_requests_send_recv_acoustic(ninterface_acoustic+iinterface), &
- status_acoustic, ier)
+ call MPI_Wait(tab_requests_send_recv_acoustic(ninterface_acoustic+iinterface), &
+ MPI_STATUS_IGNORE, ier)
enddo
! assembles the array values
@@ -332,7 +330,7 @@
! waits for MPI requests to complete (send)
! just to make sure that all sending is done
do iinterface = 1, ninterface_acoustic
- call MPI_Wait (tab_requests_send_recv_acoustic(iinterface), status_acoustic, ier)
+ call MPI_Wait(tab_requests_send_recv_acoustic(iinterface), MPI_STATUS_IGNORE, ier)
enddo
@@ -384,8 +382,6 @@
integer, dimension(ninterface), intent(in) :: my_neighbours
integer :: ipoin, num_interface, iinterface, ier, i
- integer, dimension(MPI_STATUS_SIZE) :: status_elastic
-
do iinterface = 1, ninterface_elastic
@@ -426,7 +422,7 @@
do iinterface = 1, ninterface_elastic*2
- call MPI_Wait (tab_requests_send_recv_elastic(iinterface), status_elastic, ier)
+ call MPI_Wait(tab_requests_send_recv_elastic(iinterface), MPI_STATUS_IGNORE, ier)
enddo
@@ -492,8 +488,6 @@
integer, dimension(ninterface), intent(in) :: my_neighbours
integer :: ipoin, num_interface, iinterface, ier, i
- integer, dimension(MPI_STATUS_SIZE) :: status_poroelastic
-
do iinterface = 1, ninterface_poroelastic
@@ -559,7 +553,7 @@
do iinterface = 1, ninterface_poroelastic*4
- call MPI_Wait (tab_requests_send_recv_poro(iinterface), status_poroelastic, ier)
+ call MPI_Wait (tab_requests_send_recv_poro(iinterface), MPI_STATUS_IGNORE, ier)
enddo
diff --git a/src/specfem2D/checkgrid.F90 b/src/specfem2D/checkgrid.F90
index 4e990ec..aaf9a70 100644
--- a/src/specfem2D/checkgrid.F90
+++ b/src/specfem2D/checkgrid.F90
@@ -166,10 +166,6 @@
logical :: create_wavelength_histogram
double precision :: current_percent,total_percent
-#ifdef USE_MPI
- integer, dimension(MPI_STATUS_SIZE) :: request_mpi_status
-#endif
-
! check
if(UPPER_LIMIT_DISPLAY > nspec) &
call exit_MPI('cannot have UPPER_LIMIT_DISPLAY > nspec in checkgrid.F90')
@@ -1150,13 +1146,13 @@
do iproc = 1, nproc-1
call MPI_RECV (nspec_recv, 1, MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
allocate(coorg_recv(2,nspec_recv*5))
allocate(RGB_recv(nspec_recv))
call MPI_RECV (coorg_recv(1,1), nspec_recv*5*2, MPI_DOUBLE_PRECISION, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV (RGB_recv(1), nspec_recv, MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do ispec = 1, nspec_recv
num_ispec = num_ispec + 1
@@ -1568,13 +1564,13 @@
do iproc = 1, nproc-1
call MPI_RECV (nspec_recv, 1, MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
allocate(coorg_recv(2,nspec_recv*5))
allocate(RGB_recv(nspec_recv))
call MPI_RECV (coorg_recv(1,1), nspec_recv*5*2, MPI_DOUBLE_PRECISION, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV (RGB_recv(1), nspec_recv, MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do ispec = 1, nspec_recv
num_ispec = num_ispec + 1
@@ -1883,13 +1879,13 @@ endif
do iproc = 1, nproc-1
call MPI_RECV (nspec_recv, 1, MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
allocate(coorg_recv(2,nspec_recv*5))
allocate(greyscale_recv(nspec_recv))
call MPI_RECV (coorg_recv(1,1), nspec_recv*5*2, MPI_DOUBLE_PRECISION, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV (greyscale_recv(1), nspec_recv, MPI_REAL, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do ispec = 1, nspec_recv
num_ispec = num_ispec + 1
@@ -2144,10 +2140,10 @@ endif
icol = mod(iproc, NUM_COLORS) + 1
call MPI_RECV (nspec_recv, 1, MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
allocate(coorg_recv(2,nspec_recv*5))
call MPI_RECV (coorg_recv(1,1), nspec_recv*5*2, MPI_DOUBLE_PRECISION, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do ispec = 1, nspec_recv
num_ispec = num_ispec + 1
diff --git a/src/specfem2D/plotpost.F90 b/src/specfem2D/plotpost.F90
index cae7384..1b87e60 100644
--- a/src/specfem2D/plotpost.F90
+++ b/src/specfem2D/plotpost.F90
@@ -180,9 +180,6 @@
integer :: ier
logical :: anyabs_glob, coupled_acoustic_elastic_glob, coupled_acoustic_poro_glob, &
coupled_elastic_poro_glob
-#ifdef USE_MPI
- integer, dimension(MPI_STATUS_SIZE) :: request_mpi_status
-#endif
integer :: myrank, nproc
! plotpost arrays for postscript output
@@ -1798,13 +1795,13 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV (coorg_recv_ps_velocity_model(1,1), &
2*nspec_recv*((NGLLX-subsamp_postscript)/subsamp_postscript)*((NGLLX-subsamp_postscript)/subsamp_postscript)*4, &
- MPI_DOUBLE_PRECISION, iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV (RGB_recv_ps_velocity_model(1,1), nspec_recv*((NGLLX-subsamp_postscript)/subsamp_postscript)* &
((NGLLX-subsamp_postscript)/subsamp_postscript), &
- MPI_DOUBLE_PRECISION, iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
RGB_offset = 0
@@ -2084,7 +2081,7 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 43, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 43, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
nb_coorg_per_elem = 1
if ( numbers == 1 ) then
nb_coorg_per_elem = nb_coorg_per_elem + 1
@@ -2103,9 +2100,9 @@ coorg_recv_ps_vector_field
endif
call MPI_RECV (coorg_recv_ps_element_mesh(1,1), 2*nspec_recv*nb_coorg_per_elem, &
- MPI_DOUBLE_PRECISION, iproc, 43, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 43, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV (color_recv_ps_element_mesh(1), nspec_recv*nb_coorg_per_elem, &
- MPI_INTEGER, iproc, 43, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_INTEGER, iproc, 43, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
RGB_offset = 0
@@ -2296,10 +2293,10 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 44, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 44, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
call MPI_RECV (coorg_recv_ps_abs(1,1), 4*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 44, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 44, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
@@ -2373,10 +2370,10 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 44, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 44, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
call MPI_RECV (coorg_recv_ps_free_surface(1,1), 4*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 44, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 44, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
@@ -2473,11 +2470,11 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 45, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 45, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
allocate(coorg_recv(4,nspec_recv))
call MPI_RECV (coorg_recv(1,1), 4*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 45, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 45, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
@@ -2578,11 +2575,11 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 45, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 45, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
allocate(coorg_recv(4,nspec_recv))
call MPI_RECV (coorg_recv(1,1), 4*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 45, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 45, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
@@ -2683,11 +2680,11 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 45, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 45, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
allocate(coorg_recv(4,nspec_recv))
call MPI_RECV (coorg_recv(1,1), 4*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 45, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 45, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
@@ -2865,10 +2862,10 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 46, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 46, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
call MPI_RECV (coorg_recv_ps_vector_field(1,1), 8*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 46, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 46, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
@@ -2996,10 +2993,10 @@ coorg_recv_ps_vector_field
if (myrank == 0 ) then
do iproc = 1, nproc-1
- call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 47, MPI_COMM_WORLD, request_mpi_status, ier)
+ call MPI_RECV (nspec_recv, 1, MPI_INTEGER, iproc, 47, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
if ( nspec_recv > 0 ) then
call MPI_RECV (coorg_recv_ps_vector_field(1,1), 8*nspec_recv, &
- MPI_DOUBLE_PRECISION, iproc, 47, MPI_COMM_WORLD, request_mpi_status, ier)
+ MPI_DOUBLE_PRECISION, iproc, 47, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
buffer_offset = 0
do ispec = 1, nspec_recv
diff --git a/src/specfem2D/prepare_color_image.F90 b/src/specfem2D/prepare_color_image.F90
index e9cfe85..f2e090b 100644
--- a/src/specfem2D/prepare_color_image.F90
+++ b/src/specfem2D/prepare_color_image.F90
@@ -344,7 +344,6 @@
double precision, dimension(:), allocatable :: data_pixel_send
integer, dimension(:,:), allocatable :: num_pixel_recv
integer, dimension(:), allocatable :: nb_pixel_per_proc
- integer, dimension(MPI_STATUS_SIZE) :: request_mpi_status
integer :: ier,iproc
#else
integer :: dummy
@@ -483,10 +482,10 @@
do iproc = 1, nproc-1
call MPI_RECV(num_pixel_recv(1,iproc+1),nb_pixel_per_proc(iproc+1), MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
call MPI_RECV(data_pixel_recv(1),nb_pixel_per_proc(iproc+1), MPI_DOUBLE_PRECISION, &
- iproc, 43, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 43, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do k = 1, nb_pixel_per_proc(iproc+1)
j = ceiling(real(num_pixel_recv(k,iproc+1)) / real(NX_IMAGE_color))
diff --git a/src/specfem2D/specfem2D.F90 b/src/specfem2D/specfem2D.F90
index b30c192..88d2c91 100644
--- a/src/specfem2D/specfem2D.F90
+++ b/src/specfem2D/specfem2D.F90
@@ -734,7 +734,6 @@
character(len=150) :: wavefield_file
#ifdef USE_MPI
- integer, dimension(MPI_STATUS_SIZE) :: request_mpi_status
integer, dimension(:), allocatable :: nb_pixel_per_proc
integer, dimension(:,:), allocatable :: num_pixel_recv
double precision, dimension(:), allocatable :: data_pixel_recv
@@ -2440,9 +2439,9 @@
else if ( myrank == 0 ) then
do i = 1, nb_proc_source(i_source) - is_proc_source(i_source)
call MPI_recv(source_courbe_eros(i_source),1,MPI_INTEGER, &
- MPI_ANY_SOURCE,42,MPI_COMM_WORLD,request_mpi_status,ier)
+ MPI_ANY_SOURCE,42,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ier)
call MPI_recv(anglesource_recv,1,MPI_DOUBLE_PRECISION, &
- MPI_ANY_SOURCE,43,MPI_COMM_WORLD,request_mpi_status,ier)
+ MPI_ANY_SOURCE,43,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ier)
enddo
else if ( is_proc_source(i_source) == 1 ) then
call MPI_send(n1_tangential_detection_curve,1,MPI_INTEGER,0,42,MPI_COMM_WORLD,ier)
@@ -2515,11 +2514,11 @@
else
call MPI_RECV(n1_tangential_detection_curve,1,MPI_INTEGER,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,request_mpi_status,ier)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ier)
call MPI_RECV(x_final_receiver_dummy,1,MPI_DOUBLE_PRECISION,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,request_mpi_status,ier)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ier)
call MPI_RECV(z_final_receiver_dummy,1,MPI_DOUBLE_PRECISION,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,request_mpi_status,ier)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ier)
#endif
endif
@@ -3582,7 +3581,7 @@
do iproc = 1, nproc-1
call MPI_RECV(num_pixel_recv(1,iproc+1),nb_pixel_per_proc(iproc+1), MPI_INTEGER, &
- iproc, 42, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do k = 1, nb_pixel_per_proc(iproc+1)
j = ceiling(real(num_pixel_recv(k,iproc+1)) / real(NX_IMAGE_color))
i = num_pixel_recv(k,iproc+1) - (j-1)*NX_IMAGE_color
@@ -8638,7 +8637,7 @@ if(coupled_elastic_poro) then
if (myrank == 0) then
do iproc = 1, nproc-1
call MPI_RECV(data_pixel_recv(1),nb_pixel_per_proc(iproc+1), MPI_DOUBLE_PRECISION, &
- iproc, 43, MPI_COMM_WORLD, request_mpi_status, ier)
+ iproc, 43, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ier)
do k = 1, nb_pixel_per_proc(iproc+1)
j = ceiling(real(num_pixel_recv(k,iproc+1)) / real(NX_IMAGE_color))
diff --git a/src/specfem2D/write_seismograms.F90 b/src/specfem2D/write_seismograms.F90
index f6a2c60..e12e044 100644
--- a/src/specfem2D/write_seismograms.F90
+++ b/src/specfem2D/write_seismograms.F90
@@ -104,7 +104,6 @@
#ifdef USE_MPI
integer :: ierror
- integer, dimension(MPI_STATUS_SIZE) :: status
#endif
!----
@@ -233,16 +232,16 @@
#ifdef USE_MPI
else
call MPI_RECV(buffer_binary(1,1),NSTEP_BETWEEN_OUTPUT_SEISMOS/subsamp_seismos,MPI_DOUBLE_PRECISION,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,status,ierror)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ierror)
if ( number_of_components == 2 ) then
call MPI_RECV(buffer_binary(1,2),NSTEP_BETWEEN_OUTPUT_SEISMOS/subsamp_seismos,MPI_DOUBLE_PRECISION,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,status,ierror)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ierror)
endif
if ( number_of_components == 3 ) then
call MPI_RECV(buffer_binary(1,2),NSTEP_BETWEEN_OUTPUT_SEISMOS/subsamp_seismos,MPI_DOUBLE_PRECISION,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,status,ierror)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ierror)
call MPI_RECV(buffer_binary(1,3),NSTEP_BETWEEN_OUTPUT_SEISMOS/subsamp_seismos,MPI_DOUBLE_PRECISION,&
- which_proc_receiver(irec),irec,MPI_COMM_WORLD,status,ierror)
+ which_proc_receiver(irec),irec,MPI_COMM_WORLD,MPI_STATUS_IGNORE,ierror)
endif
#endif
endif
More information about the CIG-COMMITS
mailing list