[cig-commits] [commit] Hiro_latest: Change MPI communicator in solver_SRs (778087d)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Mon Nov 18 16:20:37 PST 2013


Repository : ssh://geoshell/calypso

On branch  : Hiro_latest
Link       : https://github.com/geodynamics/calypso/compare/93e9f8f974c7a247c8f02e54ec18de063f86c8fb...3c548304673360ddedd7d68c8095b3fb74a2b9ce

>---------------------------------------------------------------

commit 778087dfcaa363a75962672b9b0b7a291734a81c
Author: Hiroaki Matsui <h_kemono at mac.com>
Date:   Fri Sep 27 21:34:08 2013 -0700

    Change MPI communicator in solver_SRs


>---------------------------------------------------------------

778087dfcaa363a75962672b9b0b7a291734a81c
 .../PARALLEL_src/COMM_src/hdf5_file_IO.F90         |  1 +
 .../PARALLEL_src/COMM_src/m_merged_ucd_data.f90    |  5 ++-
 .../PARALLEL_src/COMM_src/m_work_time.f90          |  1 +
 .../COMM_src/nodal_vector_send_recv.f90            |  8 ++--
 .../PARALLEL_src/COMM_src/solver_SR.f90            | 40 +++++++-----------
 .../PARALLEL_src/COMM_src/solver_SR_3.f90          | 49 +++++++++-------------
 .../PARALLEL_src/COMM_src/solver_SR_6.f90          | 25 ++++-------
 .../PARALLEL_src/COMM_src/solver_SR_N.f90          | 30 ++++---------
 .../PARALLEL_src/COMM_src/solver_SR_int.f90        | 21 +++-------
 .../SPH_SHELL_src/sum_sph_rms_data.f90             |  1 +
 10 files changed, 65 insertions(+), 116 deletions(-)

diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90
index 4d69b14..b6d4ad2 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90
@@ -23,6 +23,7 @@
       module hdf5_file_IO
 !
       use m_precision
+      use calypso_mpi
       use m_constants
       use m_ucd_data
       use m_parallel_var_dof
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90
index 9c35aba..2dbeeb8 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90
@@ -17,6 +17,7 @@
 !
       use m_precision
       use m_constants
+      use calypso_mpi
       use m_parallel_var_dof
 !
       implicit none
@@ -197,10 +198,10 @@
 !
       call solver_send_recv_i(nnod_ucd_local, NEIBPETOT, NEIBPE,        &
      &            STACK_IMPORT, NOD_IMPORT, STACK_EXPORT, NOD_EXPORT,   &
-     &            inod_local_ucd, SOLVER_COMM, my_rank)
+     &            inod_local_ucd)
       call solver_send_recv_i(nnod_ucd_local, NEIBPETOT, NEIBPE,        &
      &            STACK_IMPORT, NOD_IMPORT, STACK_EXPORT, NOD_EXPORT,   &
-     &            ihome_pe_ucd, SOLVER_COMM, my_rank)
+     &            ihome_pe_ucd)
 !
       end subroutine set_node_double_address
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90
index a5d3bda..bf16d94 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90
@@ -120,6 +120,7 @@
 !
       subroutine output_elapsed_times
 !
+      use calypso_mpi
       use m_parallel_var_dof
 !
       integer(kind = kint) :: i
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90
index 82d3194..ebd5aed 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90
@@ -60,8 +60,7 @@
       START_TIME= MPI_WTIME()
       call SOLVER_SEND_RECV(numnod, num_neib, id_neib,                  &
      &                      istack_import, item_import,                 &
-     &                      istack_export, item_export,                 &
-     &                      x_vec(1), SOLVER_COMM, my_rank )
+     &                      istack_export, item_export, x_vec(1) )
       END_TIME= MPI_WTIME()
       COMMtime = COMMtime + END_TIME - START_TIME
 !
@@ -94,8 +93,7 @@
       START_TIME= MPI_WTIME()
       call SOLVER_SEND_RECV_3(numnod, num_neib, id_neib,                &
      &                        istack_import, item_import,               &
-     &                        istack_export, item_export,               &
-     &                        x_vec(1), SOLVER_COMM, my_rank )
+     &                        istack_export, item_export, x_vec(1) )
       END_TIME= MPI_WTIME()
       COMMtime = COMMtime + END_TIME - START_TIME
 !
@@ -134,7 +132,7 @@
       call SOLVER_SEND_RECV_6(numnod, num_neib, id_neib,                &
      &                        istack_import, item_import,               &
      &                        istack_export, item_export,               &
-     &                        x_vec(1), SOLVER_COMM, my_rank )
+     &                        x_vec(1))
       END_TIME= MPI_WTIME()
       COMMtime = COMMtime + END_TIME - START_TIME
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR.f90
index 726a3a2..3fe0f69 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR.f90
@@ -4,19 +4,19 @@
 !!@author coded by K.Nakajima (RIST)
 !!@date coded by K.Nakajima (RIST) on jul. 1999 (ver 1.0)
 !!@n    modified by H. Matsui (U. of Chicago) on july 2007 (ver 1.1)
+!!@n    modified by H. Matsui (UC Davis) on Sep. 2013 (ver 1.2)
 !
 !>@brief  MPI SEND and RECEIVE routine for scalar fields
 !!        in overlapped partitioning
 !!
 !!@verbatim
 !!      subroutine  SOLVER_SEND_RECV                                    &
-!!     &                (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
-!!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &                 X, SOLVER_COMM,my_rank)
+!!     &          (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,      &
+!!     &                                 STACK_EXPORT, NOD_EXPORT, X)
 !!      subroutine  SOLVER_SEND_RECVx3                                  &
-!!     &                (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
-!!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &                 X1, X2, X3, SOLVER_COMM,my_rank)
+!!     &          (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,      &
+!!     &                                 STACK_EXPORT, NOD_EXPORT,      &
+!!     &           X1, X2, X3)
 !!@endverbatim
 !!
 !!@n @param  N     Number of data points
@@ -36,14 +36,12 @@
 !!@n @param  X1(N)  1st scalar field data
 !!@n @param  X2(N)  2nd scalar field data
 !!@n @param  X3(N)  3rd scalar field data
-!!
-!!@n @param  SOLVER_COMM      MPI communicator
-!!@n @param  my_rank          own process rank
 !
       module solver_SR
 !
       use m_precision
       use m_constants
+      use calypso_mpi
 !
       implicit none
 !
@@ -56,11 +54,9 @@
 !C*** SOLVER_SEND_RECV
 !C
       subroutine  SOLVER_SEND_RECV                                      &
-     &                ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
-     &                                        STACK_EXPORT, NOD_EXPORT, &
-     &                  X, SOLVER_COMM,my_rank)
+     &         ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,        &
+     &                                 STACK_EXPORT, NOD_EXPORT, X)
 
-      use calypso_mpi
       use m_solver_SR
 !
 !>       number of nodes
@@ -81,11 +77,6 @@
      &        :: NOD_EXPORT
 !>       communicated result vector
       real   (kind=kreal), dimension(N)  , intent(inout):: X
-!>       communicator for mpi
-      integer                            , intent(in)   ::SOLVER_COMM
-!>       Own process
-      integer                            , intent(in)   :: my_rank
-!>
 !
       integer (kind = kint) :: neib, istart, inum, iend, ierr, k
 !
@@ -104,7 +95,7 @@
         istart= STACK_EXPORT(neib-1) + 1
         inum  = STACK_EXPORT(neib  ) - STACK_EXPORT(neib-1)
         call MPI_ISEND (WS(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM,                   &
+     &                  NEIBPE(neib), 0, CALYPSO_COMM,                  &
      &                  req1(neib), ierr)
       enddo
 
@@ -115,7 +106,7 @@
         istart= STACK_IMPORT(neib-1) + 1
         inum  = STACK_IMPORT(neib  ) - STACK_IMPORT(neib-1)
         call MPI_IRECV (WR(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM,                   &
+     &                  NEIBPE(neib), 0, CALYPSO_COMM,                  &
      &                  req2(neib), ierr)
       enddo
 
@@ -138,9 +129,8 @@
       subroutine  SOLVER_SEND_RECVx3                                    &
      &                ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
      &                                        STACK_EXPORT, NOD_EXPORT, &
-     &                  X1,  X2, X3, SOLVER_COMM,my_rank)
+     &                  X1,  X2, X3)
 
-      use calypso_mpi
       use m_solver_SR
 !
       integer(kind=kint )                , intent(in)   ::  N
@@ -155,8 +145,6 @@
       real   (kind=kreal), dimension(N)  , intent(inout):: X1
       real   (kind=kreal), dimension(N)  , intent(inout):: X2
       real   (kind=kreal), dimension(N)  , intent(inout):: X3
-      integer                            , intent(in)   ::SOLVER_COMM
-      integer                            , intent(in)   :: my_rank
 !C
 !
       integer (kind = kint) :: neib, istart, inum, iend, ierr, k
@@ -179,7 +167,7 @@
         istart= 3 *   STACK_EXPORT(neib-1) + 1
         inum  = 3 * ( STACK_EXPORT(neib  ) - STACK_EXPORT(neib-1) )
         call MPI_ISEND (WS(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM,                   &
+     &                  NEIBPE(neib), 0, CALYPSO_COMM,                  &
      &                  req1(neib), ierr)
       enddo
 
@@ -190,7 +178,7 @@
         istart= 3 *   STACK_IMPORT(neib-1) + 1
         inum  = 3 * ( STACK_IMPORT(neib  ) - STACK_IMPORT(neib-1) )
         call MPI_IRECV (WR(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM,                   &
+     &                  NEIBPE(neib), 0, CALYPSO_COMM,                  &
      &                  req2(neib), ierr)
       enddo
 
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_3.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_3.f90
index 90634cc..48aedca 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_3.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_3.f90
@@ -10,13 +10,12 @@
 !!
 !!@verbatim
 !!      subroutine  SOLVER_SEND_RECV_3                                  &
-!!     &                (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
-!!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &                 X, SOLVER_COMM,my_rank)
+!!     &          (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,      &
+!!     &                                 STACK_EXPORT, NOD_EXPORT, X)
 !!      subroutine  solver_send_recv_3x3                                &
-!!     &                (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
-!!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &                 X1, X2, X3, SOLVER_COMM,my_rank)
+!!     &          (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,      &
+!!     &                                 STACK_EXPORT, NOD_EXPORT,      &
+!!     &           X1, X2, X3)
 !!@endverbatim
 !!
 !!@n @param  N     Number of data points
@@ -36,9 +35,6 @@
 !!@n @param  X1(3*N)  1st vector field data
 !!@n @param  X2(3*N)  2nd vector field data
 !!@n @param  X3(3*N)  3rd vector field data
-!!
-!!@n @param  SOLVER_COMM      MPI communicator
-!!@n @param  my_rank          own process rank
 !
       module solver_SR_3
 !
@@ -54,12 +50,11 @@
 ! ----------------------------------------------------------------------
 !
 !C
-!C*** SOLVER_SEND_RECV
+!C*** SOLVER_SEND_RECV_3
 !C
       subroutine  SOLVER_SEND_RECV_3                                    &
-     &                ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
-     &                                        STACK_EXPORT, NOD_EXPORT, &
-     &                  X, SOLVER_COMM,my_rank)
+     &         ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,        &
+     &                                 STACK_EXPORT, NOD_EXPORT, X)
 
       use calypso_mpi
       use m_solver_SR
@@ -82,10 +77,6 @@
      &        :: NOD_EXPORT
 !>       communicated result vector
       real   (kind=kreal), dimension(3*N), intent(inout):: X
-!<       communicator for mpi
-      integer                            , intent(in)   ::SOLVER_COMM
-!>       Own process
-      integer                            , intent(in)   :: my_rank
 !
       integer (kind = kint) :: neib, istart, inum, iend, ierr, k, ii
 !
@@ -108,8 +99,8 @@
         enddo
         istart= 3 * STACK_EXPORT(neib-1) + 1
         inum  = 3 * ( STACK_EXPORT(neib  ) - STACK_EXPORT(neib-1) )
-        call MPI_ISEND (WS(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req1(neib), ierr)
+        call MPI_ISEND(WS(istart), inum, MPI_DOUBLE_PRECISION,          &
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req1(neib), ierr)
       enddo
 
 !C
@@ -117,8 +108,8 @@
       do neib= 1, NEIBPETOT
         istart= 3 * STACK_IMPORT(neib-1) + 1
         inum  = 3 * ( STACK_IMPORT(neib  ) - STACK_IMPORT(neib-1) )
-        call MPI_IRECV (WR(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req2(neib), ierr)
+        call MPI_IRECV(WR(istart), inum, MPI_DOUBLE_PRECISION,          &
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req2(neib), ierr)
       enddo
 
       call MPI_WAITALL (NEIBPETOT, req2(1), sta2(1,1), ierr)
@@ -141,9 +132,9 @@
 !  ---------------------------------------------------------------------
 !
       subroutine  solver_send_recv_3x3                                  &
-     &                ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
-     &                                        STACK_EXPORT, NOD_EXPORT, &
-     &                  X1, X2, X3, SOLVER_COMM,my_rank)
+     &         ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,        &
+     &                                 STACK_EXPORT, NOD_EXPORT,        &
+     &           X1, X2, X3)
 
       use calypso_mpi
       use m_solver_SR
@@ -160,8 +151,6 @@
       real   (kind=kreal), dimension(3*N), intent(inout):: X1
       real   (kind=kreal), dimension(3*N), intent(inout):: X2
       real   (kind=kreal), dimension(3*N), intent(inout):: X3
-      integer                            , intent(in)   ::SOLVER_COMM
-      integer                            , intent(in)   :: my_rank
 !
       integer (kind = kint) :: neib, istart, inum, iend, ierr, k, ii
 !
@@ -189,8 +178,8 @@
         enddo
         istart= 9 * STACK_EXPORT(neib-1) + 1
         inum  = 9 * (STACK_EXPORT(neib  ) - STACK_EXPORT(neib-1))
-        call MPI_ISEND (WS(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req1(neib), ierr)
+        call MPI_ISEND(WS(istart), inum, MPI_DOUBLE_PRECISION,          &
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req1(neib), ierr)
       enddo
 
 !C
@@ -198,8 +187,8 @@
       do neib= 1, NEIBPETOT
         istart= 9 * STACK_IMPORT(neib-1) + 1
         inum  = 9 * ( STACK_IMPORT(neib  ) - STACK_IMPORT(neib-1) )
-        call MPI_IRECV (WR(istart), inum, MPI_DOUBLE_PRECISION,         &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req2(neib), ierr)
+        call MPI_IRECV(WR(istart), inum, MPI_DOUBLE_PRECISION,          &
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req2(neib), ierr)
       enddo
 
       call MPI_WAITALL (NEIBPETOT, req2(1), sta2(1,1), ierr)
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_6.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_6.f90
index faef9db..bc9cea1 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_6.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_6.f90
@@ -10,9 +10,8 @@
 !!
 !!@verbatim
 !!      subroutine  SOLVER_SEND_RECV_6                                  &
-!!     &                (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
-!!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &                 X, SOLVER_COMM,my_rank)
+!!     &          ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,     &
+!!     &                                  STACK_EXPORT, NOD_EXPORT, X)
 !!@endverbatim
 !!
 !!@n @param  N     Number of data points
@@ -29,9 +28,6 @@
 !!                    local node ID to copy in export buffer
 !!
 !!@n @param  X(6*N)   field data with 6 components
-!!
-!!@n @param  SOLVER_COMM      MPI communicator
-!!@n @param  my_rank          own process rank
 !
       module solver_SR_6
 !
@@ -47,9 +43,8 @@
 ! ----------------------------------------------------------------------
 !C
       subroutine  SOLVER_SEND_RECV_6                                    &
-     &                ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
-     &                                        STACK_EXPORT, NOD_EXPORT, &
-     &                  X, SOLVER_COMM,my_rank)
+     &          ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,       &
+     &                                  STACK_EXPORT, NOD_EXPORT, X)
 !
       use calypso_mpi
       use m_solver_SR
@@ -72,10 +67,6 @@
      &        :: NOD_EXPORT
 !>       communicated result vector
       real   (kind=kreal), dimension(6*N), intent(inout):: X
-!>       communicator for mpi
-      integer                            , intent(in)   ::SOLVER_COMM
-!>       Process ID
-      integer                            , intent(in)   :: my_rank
 !
       integer (kind = kint) :: neib, istart, inum, ierr, k, ii
 !
@@ -98,8 +89,8 @@
            WS(6*k-1)= X(ii-1)
            WS(6*k  )= X(ii  )
         enddo
-        call MPI_ISEND (WS(6*istart+1), 6*inum,MPI_DOUBLE_PRECISION,    &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req1(neib), ierr)
+        call MPI_ISEND(WS(6*istart+1), 6*inum,MPI_DOUBLE_PRECISION,     &
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req1(neib), ierr)
       enddo
 
 !C
@@ -107,8 +98,8 @@
       do neib= 1, NEIBPETOT
         istart= STACK_IMPORT(neib-1)
         inum  = STACK_IMPORT(neib  ) - istart
-        call MPI_IRECV (WR(6*istart+1), 6*inum, MPI_DOUBLE_PRECISION,   &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req2(neib), ierr)
+        call MPI_IRECV(WR(6*istart+1), 6*inum, MPI_DOUBLE_PRECISION,    &
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req2(neib), ierr)
       enddo
 
       call MPI_WAITALL (NEIBPETOT, req2(1), sta2(1,1), ierr)
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_N.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_N.f90
index ff31260..744675c 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_N.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_N.f90
@@ -12,11 +12,11 @@
 !!      subroutine  SOLVER_SEND_RECV_N                                  &
 !!     &            (N, NB, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
 !!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &             X, SOLVER_COMM,my_rank)
+!!     &             X)
 !!      subroutine  SOLVER_SEND_RECV_Nx3                                &
 !!     &            (N, NB, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
 !!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &             X1, X2, X3, SOLVER_COMM,my_rank)
+!!     &             X1, X2, X3)
 !!@endverbatim
 !!
 !!@n @param  N     Number of data points
@@ -37,10 +37,6 @@
 !!@n @param  X1(NB*N)  1st field data with NB components
 !!@n @param  X2(NB*N)  2nd field data with NB components
 !!@n @param  X3(NB*N)  3rd field data with NB components
-!!
-!!@n @param  SOLVER_COMM      MPI communicator
-!!@n @param  my_rank          own process rank
-!
 !
       module solver_SR_N
 !
@@ -55,9 +51,8 @@
 ! ----------------------------------------------------------------------
 !
       subroutine  SOLVER_SEND_RECV_N                                    &
-     &            ( N, NB, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
-     &                                        STACK_EXPORT, NOD_EXPORT, &
-     &              X, SOLVER_COMM, my_rank)
+     &          (N, NB, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,    &
+     &                                     STACK_EXPORT, NOD_EXPORT, X)
 !
       use calypso_mpi
       use m_solver_SR
@@ -80,10 +75,6 @@
      &        :: NOD_EXPORT(STACK_EXPORT(NEIBPETOT))
 !>       communicated result vector
       real   (kind=kreal), intent(inout):: X(NB*N)
-!>       communicator for mpi
-      integer                            , intent(in)   ::SOLVER_COMM
-!>       Own process
-      integer                            , intent(in)   :: my_rank
 !
       integer (kind = kint) :: neib, istart, inum, iend
       integer (kind = kint) :: ierr, k, ii, ix, nd
@@ -109,7 +100,7 @@
         istart= NB *  STACK_EXPORT(neib-1) + 1
         inum  = NB * (STACK_EXPORT(neib  ) - STACK_EXPORT(neib-1) )
         call MPI_ISEND(WS(istart), inum, MPI_DOUBLE_PRECISION,          &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req1(neib), ierr)
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req1(neib), ierr)
       end do
 
 !C
@@ -118,7 +109,7 @@
         istart= NB *  STACK_IMPORT(neib-1) + 1
         inum  = NB * (STACK_IMPORT(neib  ) - STACK_IMPORT(neib-1) )
         call MPI_IRECV(WR(istart), inum, MPI_DOUBLE_PRECISION,          &
-     &                 NEIBPE(neib), 0, SOLVER_COMM, req2(neib), ierr)
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req2(neib), ierr)
       enddo
 
       call MPI_WAITALL (NEIBPETOT, req2(1), sta2(1,1), ierr)
@@ -144,7 +135,7 @@
       subroutine  SOLVER_SEND_RECV_Nx3                                  &
      &            ( N, NB, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
      &                                        STACK_EXPORT, NOD_EXPORT, &
-     &              X1, X2, X3, SOLVER_COMM, my_rank)
+     &              X1, X2, X3)
 
       use calypso_mpi
 !
@@ -166,9 +157,6 @@
       real   (kind=kreal), intent(inout):: X2(NB*N)
       real   (kind=kreal), intent(inout):: X3(NB*N)
 !
-      integer                            , intent(in)   ::SOLVER_COMM
-      integer                            , intent(in)   :: my_rank
-!
       integer (kind = kint) :: neib, istart, inum, iend
       integer (kind = kint) :: ierr, k, ii, ix, nd, NB3
 !
@@ -196,7 +184,7 @@
         istart= 3*NB *  STACK_EXPORT(neib-1) + 1
         inum  = 3*NB * (STACK_EXPORT(neib  ) - STACK_EXPORT(neib-1) )
         call MPI_ISEND(WS(istart), inum, MPI_DOUBLE_PRECISION,          &
-     &                  NEIBPE(neib), 0, SOLVER_COMM, req1(neib), ierr)
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req1(neib), ierr)
       end do
 
 !C
@@ -205,7 +193,7 @@
         istart= 3*NB *  STACK_IMPORT(neib-1) + 1
         inum  = 3*NB * (STACK_IMPORT(neib  ) - STACK_IMPORT(neib-1) )
         call MPI_IRECV(WR(istart), inum, MPI_DOUBLE_PRECISION,          &
-     &                 NEIBPE(neib), 0, SOLVER_COMM, req2(neib), ierr)
+     &                 NEIBPE(neib), 0, CALYPSO_COMM, req2(neib), ierr)
       enddo
 
       call MPI_WAITALL (NEIBPETOT, req2(1), sta2(1,1), ierr)
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_int.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_int.f90
index ed4f779..732f045 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_int.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/solver_SR_int.f90
@@ -10,9 +10,8 @@
 !!
 !!@verbatim
 !!      subroutine  solver_send_recv_i                                  &
-!!     &                (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,&
-!!     &                                       STACK_EXPORT, NOD_EXPORT,&
-!!     &                 ix, SOLVER_COMM,my_rank)
+!!     &          (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,      &
+!!     &           STACK_EXPORT, NOD_EXPORT, ix)
 !!@endverbatim
 !!
 !!@n @param  N     Number of data points
@@ -29,9 +28,6 @@
 !!                    local node ID to copy in export buffer
 !!
 !!@n @param  ix(N)     integer data with NB components
-!!
-!!@n @param  SOLVER_COMM      MPI communicator
-!!@n @param  my_rank          own process rank
 !
       module solver_SR_int
 !
@@ -46,9 +42,8 @@
 ! ----------------------------------------------------------------------
 !
       subroutine  solver_send_recv_i                                    &
-     &                ( N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT, &
-     &                                        STACK_EXPORT, NOD_EXPORT, &
-     &                  ix, SOLVER_COMM,my_rank)
+     &          (N, NEIBPETOT, NEIBPE, STACK_IMPORT, NOD_IMPORT,        &
+     &           STACK_EXPORT, NOD_EXPORT, ix)
 !
       use calypso_mpi
       use m_solver_SR
@@ -71,10 +66,6 @@
      &        :: NOD_EXPORT
 !>       communicated result vector
       integer (kind=kint), dimension(N)  , intent(inout):: iX
-!>       communicator for mpi
-      integer                            , intent(in)   ::SOLVER_COMM
-!>       Own process
-      integer                            , intent(in)   :: my_rank
 !C
 !
       integer (kind = kint) :: neib, istart, inum, ierr, k
@@ -93,7 +84,7 @@
            iWS(k)= iX(NOD_EXPORT(k))
         enddo
         call MPI_ISEND (iWS(istart+1), inum, MPI_INTEGER,               &
-     &                  NEIBPE(neib), 0, SOLVER_COMM,                   &
+     &                  NEIBPE(neib), 0, CALYPSO_COMM,                  &
      &                  req1(neib), ierr)
       enddo
 
@@ -104,7 +95,7 @@
         istart= STACK_IMPORT(neib-1)
         inum  = STACK_IMPORT(neib  ) - istart
         call MPI_IRECV (iWR(istart+1), inum, MPI_INTEGER,               &
-     &                  NEIBPE(neib), 0, SOLVER_COMM,                   &
+     &                  NEIBPE(neib), 0, CALYPSO_COMM,                  &
      &                  req2(neib), ierr)
       enddo
 
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90
index 967b6c6..2ab8cad 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90
@@ -168,6 +168,7 @@
 !
       subroutine sum_sph_layerd_rms
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_spheric_parameter
       use m_rms_4_sph_spectr



More information about the CIG-COMMITS mailing list