[cig-commits] [commit] Hiro_latest: switch MPI communicator (a42a502)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Mon Nov 18 16:20:41 PST 2013


Repository : ssh://geoshell/calypso

On branch  : Hiro_latest
Link       : https://github.com/geodynamics/calypso/compare/93e9f8f974c7a247c8f02e54ec18de063f86c8fb...3c548304673360ddedd7d68c8095b3fb74a2b9ce

>---------------------------------------------------------------

commit a42a5020e1abbd548b42fc7e2faee9b1f854b21f
Author: Hiroaki Matsui <h_kemono at mac.com>
Date:   Sat Sep 28 15:45:27 2013 -0700

    switch MPI communicator


>---------------------------------------------------------------

a42a5020e1abbd548b42fc7e2faee9b1f854b21f
 .../MHD_src/field_data/cal_max_indices.f90         |  41 +++---
 .../MHD_src/sph_MHD/const_wz_coriolis_rtp.f90      |   3 +-
 .../MHD_src/sph_MHD/global_field_4_dynamobench.f90 |   6 +-
 .../MHD_src/sph_MHD/set_comm_tbl_sph_coriolis.f90  |   9 +-
 .../MHD_src/sph_MHD/solver_sph_coriolis_sr.f90     |  33 ++---
 .../MHD_src/sph_MHD/sph_MHD_circle_transform.f90   |   3 +-
 .../MHD_src/sph_MHD/sph_transforms_4_MHD.f90       |   3 +-
 .../PARALLEL_src/COMM_src/calypso_mpi.f90          |  16 ++-
 .../PARALLEL_src/COMM_src/hdf5_file_IO.F90         |   8 +-
 .../COMM_src/m_array_for_send_recv.f90             | 118 ++++++++++++++++++
 .../PARALLEL_src/COMM_src/m_merged_ucd_data.f90    |  15 +--
 .../PARALLEL_src/COMM_src/m_parallel_var_dof.f90   |  11 +-
 .../PARALLEL_src/COMM_src/m_work_time.f90          |  42 ++++---
 .../PARALLEL_src/COMM_src/merged_ucd_data_IO.f90   |  39 +++---
 .../PARALLEL_src/COMM_src/merged_vtk_data_IO.f90   |  17 +--
 .../COMM_src/nodal_vector_send_recv.f90            |   8 +-
 .../SPH_SHELL_src/cal_rms_fields_by_sph.f90        |  28 +++--
 .../SPH_SHELL_src/pickup_gauss_coefficients.f90    |  20 +--
 .../SPH_SHELL_src/pickup_sph_coefs.f90             |  22 ++--
 .../SPH_SHELL_src/pickup_sph_rms_spectr.f90        |  33 +++--
 .../SPH_SHELL_src/select_fourier_transform.F90     |   4 +-
 .../SPH_SHELL_src/sum_b_trans_at_pole.f90          | 137 +++++++++++----------
 .../SPH_SHELL_src/sum_sph_rms_data.f90             |  27 ++--
 23 files changed, 415 insertions(+), 228 deletions(-)

diff --git a/src/Fortran_libraries/MHD_src/field_data/cal_max_indices.f90 b/src/Fortran_libraries/MHD_src/field_data/cal_max_indices.f90
index 1823fa6..aabf8f0 100644
--- a/src/Fortran_libraries/MHD_src/field_data/cal_max_indices.f90
+++ b/src/Fortran_libraries/MHD_src/field_data/cal_max_indices.f90
@@ -1,11 +1,15 @@
-!
-!      module cal_max_indices
-!
-!        programmed by H.Matsui and H.Okuda
-!                                    on July 2000 (ver 1.1)
-!        Modified by H. Matsui on Aug., 2007
-!
-!      subroutine s_cal_max_indices
+!>@file   cal_max_indices.f90
+!!@brief  module cal_max_indices
+!!
+!!@author H. Matsui
+!!@date Programmed by H.Matsui and H.Okuda in July 2000
+!!@n     Modified by H. Matsui on Aug., 2007
+!
+!>@brief  Find node positions of maximum values
+!!
+!!@verbatim
+!!      subroutine s_cal_max_indices
+!!@endverbatim
 !
       module cal_max_indices
 !
@@ -21,6 +25,7 @@
 !
       subroutine s_cal_max_indices
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_cal_max_indices
       use m_geometry_parameter
@@ -50,17 +55,17 @@
        end do
 !
        do nd = 1, num_tot_nod_phys_vis
-        phys_max_local(nd) = d_nod(node_max(nd),nd)
-        phys_min_local(nd) = d_nod(node_min(nd),nd)
+         phys_max_local(nd) = d_nod(node_max(nd),nd)
+         phys_min_local(nd) = d_nod(node_min(nd),nd)
        end do
 !
         call MPI_allREDUCE (phys_max_local(1), phys_max(1),             &
-     &       num_tot_nod_phys_vis, MPI_DOUBLE_PRECISION, MPI_MAX,       &
-     &       SOLVER_COMM, ierr)
+     &      num_tot_nod_phys_vis, CALYPSO_REAL, MPI_MAX,                &
+     &      CALYPSO_COMM, ierr)
 !
         call MPI_allREDUCE (phys_min_local(1), phys_min(1),             &
-     &        num_tot_nod_phys_vis, MPI_DOUBLE_PRECISION, MPI_MIN,      &
-     &        SOLVER_COMM, ierr)
+     &      num_tot_nod_phys_vis, CALYPSO_REAL, MPI_MIN,                &
+     &      CALYPSO_COMM, ierr)
 !
         node_max_local = 0
         node_min_local = 0
@@ -77,12 +82,12 @@
        end do
 !
         call MPI_allREDUCE (node_max_local(1), node_max(1),             &
-     &        num_tot_nod_phys_vis, MPI_INTEGER, MPI_SUM,               &
-     &        SOLVER_COMM, ierr)
+     &      num_tot_nod_phys_vis, CALYPSO_INTEGER, MPI_SUM,             &
+     &      CALYPSO_COMM, ierr)
 !
         call MPI_allREDUCE (node_min_local(1), node_min(1),             &
-     &        num_tot_nod_phys_vis, MPI_INTEGER, MPI_SUM,               &
-     &        SOLVER_COMM, ierr)
+     &      num_tot_nod_phys_vis, CALYPSO_INTEGER, MPI_SUM,             &
+     &      CALYPSO_COMM, ierr)
 !
 !
       end subroutine s_cal_max_indices
diff --git a/src/Fortran_libraries/MHD_src/sph_MHD/const_wz_coriolis_rtp.f90 b/src/Fortran_libraries/MHD_src/sph_MHD/const_wz_coriolis_rtp.f90
index 1914e68..9fcc2bd 100644
--- a/src/Fortran_libraries/MHD_src/sph_MHD/const_wz_coriolis_rtp.f90
+++ b/src/Fortran_libraries/MHD_src/sph_MHD/const_wz_coriolis_rtp.f90
@@ -172,6 +172,7 @@
 !
       subroutine subtract_sphere_ave_coriolis
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_sph_phys_address
       use m_work_4_sph_trans
@@ -190,7 +191,7 @@
       call clear_rj_degree0_scalar_smp(ipol%i_coriolis)
 !
       call MPI_Allreduce(sphere_ave_coriolis_l, sphere_ave_coriolis_g,  &
-     &    nidx_rj(1), MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    nidx_rj(1), CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
 !
 !$omp do private(mphi,l_rtp,kr,k_gl,inod)
diff --git a/src/Fortran_libraries/MHD_src/sph_MHD/global_field_4_dynamobench.f90 b/src/Fortran_libraries/MHD_src/sph_MHD/global_field_4_dynamobench.f90
index 6f549a5..f1ab5ac 100644
--- a/src/Fortran_libraries/MHD_src/sph_MHD/global_field_4_dynamobench.f90
+++ b/src/Fortran_libraries/MHD_src/sph_MHD/global_field_4_dynamobench.f90
@@ -87,6 +87,7 @@
 !
       subroutine pick_inner_core_rotation
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_spheric_parameter
       use m_sph_spectr_data
@@ -107,7 +108,7 @@
       end do
 !
       call MPI_allREDUCE (rotate_ic_local, rotate_icore, ithree,        &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine pick_inner_core_rotation
 !
@@ -115,6 +116,7 @@
 !
       subroutine pick_mag_torque_inner_core
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_spheric_parameter
       use m_sph_spectr_data
@@ -136,7 +138,7 @@
       end do
 !
       call MPI_allREDUCE (m_torque_local, m_torque_icore, ithree,       &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine pick_mag_torque_inner_core
 !
diff --git a/src/Fortran_libraries/MHD_src/sph_MHD/set_comm_tbl_sph_coriolis.f90 b/src/Fortran_libraries/MHD_src/sph_MHD/set_comm_tbl_sph_coriolis.f90
index be9c0dd..a417ca4 100644
--- a/src/Fortran_libraries/MHD_src/sph_MHD/set_comm_tbl_sph_coriolis.f90
+++ b/src/Fortran_libraries/MHD_src/sph_MHD/set_comm_tbl_sph_coriolis.f90
@@ -188,6 +188,7 @@
 !
       subroutine set_sph_range_coriolis
 !
+      use calypso_mpi
       use m_constants
       use m_spheric_parameter
       use m_comm_tbl_sph_coriolis
@@ -234,10 +235,10 @@
 !
 !
       do ip = 1, nprocs
-        call MPI_Bcast(jminmax_gl(1,ip), itwo, MPI_INTEGER, (ip-1),     &
-     &      SOLVER_COMM, ierr)
-        call MPI_Bcast(jminmax_coriolis(1,ip), itwo, MPI_INTEGER,       &
-     &      (ip-1), SOLVER_COMM, ierr)
+        call MPI_Bcast(jminmax_gl(1,ip), itwo, CALYPSO_INTEGER, (ip-1), &
+     &      CALYPSO_COMM, ierr)
+        call MPI_Bcast(jminmax_coriolis(1,ip), itwo, CALYPSO_INTEGER,   &
+     &      (ip-1), CALYPSO_COMM, ierr)
       end do
 !
       end subroutine set_sph_range_coriolis
diff --git a/src/Fortran_libraries/MHD_src/sph_MHD/solver_sph_coriolis_sr.f90 b/src/Fortran_libraries/MHD_src/sph_MHD/solver_sph_coriolis_sr.f90
index 4f1427c..0cfbd88 100644
--- a/src/Fortran_libraries/MHD_src/sph_MHD/solver_sph_coriolis_sr.f90
+++ b/src/Fortran_libraries/MHD_src/sph_MHD/solver_sph_coriolis_sr.f90
@@ -31,6 +31,7 @@
 !
       use m_precision
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_comm_tbl_sph_coriolis
 !
@@ -68,16 +69,16 @@
       do i = 1, nneib_send_cor
         ist = nri*istack_send_cor(i-1) + 1
         num = nri*(istack_send_cor(i) - istack_send_cor(i-1))
-        call MPI_ISEND(send_sph_cor(ist), num, MPI_DOUBLE_PRECISION,    &
-     &      ip_send_cor(i), 0, SOLVER_COMM, req1_cor(i), ierr)
+        call MPI_ISEND(send_sph_cor(ist), num, CALYPSO_REAL,            &
+     &      ip_send_cor(i), 0, CALYPSO_COMM, req1_cor(i), ierr)
       end do
 !C
 !C-- RECEIVE
       do i= 1, nneib_recv_cor
         ist = nri*istack_recv_cor(i-1) + 1
         num = nri*(istack_recv_cor(i) - istack_recv_cor(i-1))
-        call MPI_IRECV(recv_sph_cor(ist), num, MPI_DOUBLE_PRECISION,    &
-     &      ip_recv_cor(i), 0, SOLVER_COMM, req2_cor(i), ierr)
+        call MPI_IRECV(recv_sph_cor(ist), num, CALYPSO_REAL,            &
+     &      ip_recv_cor(i), 0, CALYPSO_COMM, req2_cor(i), ierr)
       end do
 !
       call MPI_WAITALL (nneib_recv_cor, req2_cor, sta2_cor,  ierr)
@@ -132,16 +133,16 @@
       do i = 1, nneib_send_cor
         ist = 3*nri*istack_send_cor(i-1) + 1
         num = 3*nri*(istack_send_cor(i) - istack_send_cor(i-1))
-        call MPI_ISEND(send_sph_cor(ist), num, MPI_DOUBLE_PRECISION,    &
-     &      ip_send_cor(i), 0, SOLVER_COMM, req1_cor(i), ierr)
+        call MPI_ISEND(send_sph_cor(ist), num, CALYPSO_REAL,            &
+     &      ip_send_cor(i), 0, CALYPSO_COMM, req1_cor(i), ierr)
       end do
 !C
 !C-- RECEIVE
       do i= 1, nneib_recv_cor
         ist = 3*nri*istack_recv_cor(i-1) + 1
         num = 3*nri*(istack_recv_cor(i) - istack_recv_cor(i-1))
-        call MPI_IRECV(recv_sph_cor(ist), num, MPI_DOUBLE_PRECISION,    &
-     &      ip_recv_cor(i), 0, SOLVER_COMM, req2_cor(i), ierr)
+        call MPI_IRECV(recv_sph_cor(ist), num, CALYPSO_REAL,            &
+     &      ip_recv_cor(i), 0, CALYPSO_COMM, req2_cor(i), ierr)
       end do
 !
       call MPI_WAITALL (nneib_recv_cor, req2_cor, sta2_cor,  ierr)
@@ -202,16 +203,16 @@
       do i = 1, nneib_send_cor
         ist = 5*nri*istack_send_cor(i-1) + 1
         num = 5*nri*(istack_send_cor(i) - istack_send_cor(i-1))
-        call MPI_ISEND(send_sph_cor(ist), num, MPI_DOUBLE_PRECISION,    &
-     &      ip_send_cor(i), 0, SOLVER_COMM, req1_cor(i), ierr)
+        call MPI_ISEND(send_sph_cor(ist), num, CALYPSO_REAL,            &
+     &      ip_send_cor(i), 0, CALYPSO_COMM, req1_cor(i), ierr)
       end do
 !C
 !C-- RECEIVE
       do i= 1, nneib_recv_cor
         ist = 5*nri*istack_recv_cor(i-1) + 1
         num = 5*nri*(istack_recv_cor(i) - istack_recv_cor(i-1))
-        call MPI_IRECV(recv_sph_cor(ist), num, MPI_DOUBLE_PRECISION,    &
-     &      ip_recv_cor(i), 0, SOLVER_COMM, req2_cor(i), ierr)
+        call MPI_IRECV(recv_sph_cor(ist), num, CALYPSO_REAL,            &
+     &      ip_recv_cor(i), 0, CALYPSO_COMM, req2_cor(i), ierr)
       end do
 !
       call MPI_WAITALL (nneib_recv_cor, req2_cor, sta2_cor,  ierr)
@@ -268,16 +269,16 @@
       do i = 1, nneib_send_cor
         ist = istack_send_cor(i-1) + 1
         num = istack_send_cor(i) - istack_send_cor(i-1)
-        call MPI_ISEND(isend_sph_cor(ist), num, MPI_INTEGER,            &
-     &      ip_send_cor(i), 0, SOLVER_COMM, req1_cor(i),  ierr)
+        call MPI_ISEND(isend_sph_cor(ist), num, CALYPSO_INTEGER,        &
+     &      ip_send_cor(i), 0, CALYPSO_COMM, req1_cor(i),  ierr)
       end do
 !C
 !C-- RECEIVE
       do i= 1, nneib_recv_cor
         ist = istack_recv_cor(i-1) + 1
         num = istack_recv_cor(i) - istack_recv_cor(i-1)
-        call MPI_IRECV(irecv_sph_cor(ist), num, MPI_INTEGER,            &
-     &      ip_recv_cor(i), 0, SOLVER_COMM, req2_cor(i), ierr)
+        call MPI_IRECV(irecv_sph_cor(ist), num, CALYPSO_INTEGER,        &
+     &      ip_recv_cor(i), 0, CALYPSO_COMM, req2_cor(i), ierr)
       end do
 !
       call MPI_WAITALL (nneib_recv_cor, req2_cor, sta2_cor,  ierr)
diff --git a/src/Fortran_libraries/MHD_src/sph_MHD/sph_MHD_circle_transform.f90 b/src/Fortran_libraries/MHD_src/sph_MHD/sph_MHD_circle_transform.f90
index 7e9daa9..f2cb08f 100644
--- a/src/Fortran_libraries/MHD_src/sph_MHD/sph_MHD_circle_transform.f90
+++ b/src/Fortran_libraries/MHD_src/sph_MHD/sph_MHD_circle_transform.f90
@@ -115,6 +115,7 @@
 !
       subroutine collect_spectr_for_circle
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_sph_spectr_data
       use m_sph_phys_address
@@ -154,7 +155,7 @@
       num = d_circle%ntot_phys * (nidx_global_rj(2) + 1)
       if(my_rank .eq. 0) d_rj_circle =   zero
       call MPI_Reduce(d_rj_circ_lc(0,1), d_rj_circle(0,1), num,         &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, izero, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, izero, CALYPSO_COMM, ierr)
 !
       end subroutine collect_spectr_for_circle
 !
diff --git a/src/Fortran_libraries/MHD_src/sph_MHD/sph_transforms_4_MHD.f90 b/src/Fortran_libraries/MHD_src/sph_MHD/sph_transforms_4_MHD.f90
index 18dc363..991e1ad 100644
--- a/src/Fortran_libraries/MHD_src/sph_MHD/sph_transforms_4_MHD.f90
+++ b/src/Fortran_libraries/MHD_src/sph_MHD/sph_transforms_4_MHD.f90
@@ -157,6 +157,7 @@
 !
       subroutine select_legendre_transform
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_machine_parameter
       use m_work_4_sph_trans
@@ -184,7 +185,7 @@
       etime(id_legendre_transfer) = MPI_WTIME() - stime
 !
       call MPI_allREDUCE (etime, etime_trans, ifour,                    &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       id_legendre_transfer = iflag_leg_orginal_loop
       etime_shortest =       etime_trans(iflag_leg_orginal_loop)
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/calypso_mpi.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/calypso_mpi.f90
index 5ff9612..76c15d2 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/calypso_mpi.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/calypso_mpi.f90
@@ -8,7 +8,7 @@
 !> @brief MPI wrapper for Calypso
 !!
 !!@verbatim
-!!      subroutine calypso_MPI_init(ierr)
+!!      subroutine calypso_MPI_init
 !!      subroutine calypso_MPI_finalize
 !!      subroutine calypso_MPI_abort(code, message)
 !!
@@ -35,6 +35,11 @@
       integer :: CALYPSO_INTEGER
 !>     real size for MPI
       integer :: CALYPSO_REAL
+!>     character size for MPI
+      integer :: CALYPSO_CHARACTER
+!
+!>      total number of processes
+      integer(kind=kint) :: nprocs
 !
 ! ----------------------------------------------------------------------
 !
@@ -42,15 +47,16 @@
 !
 ! ----------------------------------------------------------------------
 !
-      subroutine calypso_MPI_init(ierr)
-!
-      use m_machine_parameter
+      subroutine calypso_MPI_init
 !
-      integer(kind=kint), intent(inout) :: ierr
+      integer(kind=kint) :: ierr
 !
 !
       call  MPI_INIT(ierr)
       call  MPI_COMM_DUP (MPI_COMM_WORLD, CALYPSO_COMM, ierr)
+      call  MPI_COMM_SIZE(CALYPSO_COMM, nprocs, ierr)
+!
+      CALYPSO_CHARACTER = MPI_CHARACTER
 !
       if(kint .eq. 4) then
         CALYPSO_INTEGER = MPI_INTEGER
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90
index b6d4ad2..5e5820c 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/hdf5_file_IO.F90
@@ -113,7 +113,7 @@
 !        write(*,*) 'start vals: ', local_node_elem_count(1), &
 !        local_node_elem_count(2)
       call mpi_scan(local_node_elem_count, start_node_elem,             &
-     &    2, MPI_INTEGER, MPI_SUM, SOLVER_COMM, mpi_err)
+     &    2, CALYPSO_INTEGER, MPI_SUM, CALYPSO_COMM, mpi_err)
       total_node_elem(1) = istack_internod_ucd_list(nprocs)
       total_node_elem(2) = istack_ele_ucd_list(nprocs)
 !
@@ -129,7 +129,7 @@
 ! Setup file access property list with parallel I/O access.
 !
       call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr)
-      call h5pset_fapl_mpio_f(plist_id, SOLVER_COMM, info, hdferr)
+      call h5pset_fapl_mpio_f(plist_id, CALYPSO_COMM, info, hdferr)
 !
 ! Create new file collectively.
 !
@@ -306,7 +306,7 @@
       local_node_count = istack_internod_ucd_list(my_rank+1)            &
      &                  - istack_internod_ucd_list(my_rank)
       call mpi_scan(local_node_count, start_node,                       &
-     &        1, MPI_INTEGER, MPI_SUM, SOLVER_COMM, mpi_err)
+     &        1, CALYPSO_INTEGER, MPI_SUM, CALYPSO_COMM, mpi_err)
       total_node = istack_internod_ucd_list(nprocs)
 !
 ! Remove our own counts from the offset
@@ -318,7 +318,7 @@
 ! Setup file access property list with parallel I/O access.
 !
       call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr)
-      call h5pset_fapl_mpio_f(plist_id, SOLVER_COMM, info, hdferr)
+      call h5pset_fapl_mpio_f(plist_id, CALYPSO_COMM, info, hdferr)
 !
 ! Create new file collectively.
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_array_for_send_recv.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_array_for_send_recv.f90
new file mode 100644
index 0000000..e7efa48
--- /dev/null
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_array_for_send_recv.f90
@@ -0,0 +1,118 @@
+!>@file   m_array_for_send_recv.f90
+!!@brief      module m_array_for_send_recv
+!!
+!!@author H. Matsui and H. Okuda
+!!@date Programmed in 2000
+!!@n    Modified on Apr., 2008
+!!@n    Modified on Dec., 2012
+!!@n    Modified on sep., 2013
+!
+!>@brief Work array for data communication of FEM data
+!!
+!!@verbatim
+!!      subroutine verify_vector_for_solver(NB, N)
+!!      subroutine allocate_vector_for_solver(NB, N)
+!!      subroutine deallocate_vector_for_solver
+!!
+!!      subroutine allocate_iccg_int_matrix(N)
+!!      subroutine deallocate_iccg_int_matrix
+!!@endverbatim
+!!
+!!@param  N    length of vector
+!!@param  NB   number of vectors to solve
+!
+      module   m_array_for_send_recv
+!
+      use m_precision
+      use calypso_mpi
+!
+      implicit  none
+!
+!>      Vector for solution vector
+      real(kind=kreal), allocatable :: x_vec(:)
+!>      Vector for right hand side vector
+      real(kind=kreal), allocatable :: b_vec(:)
+!>      Size of allocated vectors
+      integer(kind = kint) :: isize_solver_vect = -1
+!
+!>      Work area for integer data
+      integer(kind=kint), allocatable :: ix_vec(:)
+!
+      private :: isize_solver_vect
+!
+! ----------------------------------------------------------------------
+!
+      contains
+!
+! ----------------------------------------------------------------------
+!
+       subroutine verify_vector_for_solver(NB, N)
+!
+       integer(kind = kint), intent(in) :: NB, N
+       integer(kind = kint) :: ncomp
+!
+!
+       ncomp = NB*N
+       if (isize_solver_vect .lt. 0) then
+         call allocate_vector_for_solver(NB,N)
+       else
+         if (isize_solver_vect .lt. ncomp) then
+           call deallocate_vector_for_solver
+           call allocate_vector_for_solver(NB,N)
+         end if
+       end if
+!
+       end subroutine verify_vector_for_solver
+!
+!  ---------------------------------------------------------------------
+!
+       subroutine allocate_vector_for_solver(NB, N)
+!
+       integer(kind = kint), intent(in) :: NB, N
+!
+!
+       allocate(x_vec(NB*N))
+       allocate(b_vec(NB*N))
+       isize_solver_vect = NB*N
+!
+       if(N*NB .gt. 0) then
+         b_vec  = 0.0d00
+         x_vec  =0.0d00
+       end if
+!
+       end subroutine allocate_vector_for_solver
+!
+!  ---------------------------------------------------------------------
+!
+       subroutine deallocate_vector_for_solver
+!
+!
+       deallocate(x_vec, b_vec)
+       isize_solver_vect = 0
+!
+       end subroutine deallocate_vector_for_solver
+!
+!  ---------------------------------------------------------------------
+!  ---------------------------------------------------------------------
+!
+       subroutine allocate_iccg_int_matrix(N)
+!
+       integer(kind = kint), intent(in) :: N
+!
+!
+       allocate(ix_vec(N))
+       ix_vec  = 0
+!
+       end subroutine allocate_iccg_int_matrix
+!
+!  ---------------------------------------------------------------------
+!
+       subroutine deallocate_iccg_int_matrix
+!
+       deallocate(ix_vec)
+!
+       end subroutine deallocate_iccg_int_matrix
+!
+!  ---------------------------------------------------------------------
+!
+      end module   m_array_for_send_recv
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90
index 2dbeeb8..cc0c51d 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_merged_ucd_data.f90
@@ -17,7 +17,6 @@
 !
       use m_precision
       use m_constants
-      use calypso_mpi
       use m_parallel_var_dof
 !
       implicit none
@@ -144,16 +143,18 @@
 !
       subroutine count_merged_ucd(nnod, internal_node, nele)
 !
+      use calypso_mpi
+!
       integer(kind = kint), intent(in) :: nnod, internal_node, nele
 !
       integer(kind = kint) :: ip
 !
-      call MPI_Allgather(nnod, ione, MPI_INTEGER,                       &
-     &    nnod_ucd_list, ione, MPI_INTEGER, SOLVER_COMM, ierr)
-      call MPI_Allgather(nele, ione, MPI_INTEGER,                       &
-     &    nele_ucd_list, ione, MPI_INTEGER, SOLVER_COMM, ierr)
-      call MPI_Allgather(internal_node, ione, MPI_INTEGER,              &
-     &    internod_ucd_list, ione, MPI_INTEGER, SOLVER_COMM, ierr)
+      call MPI_Allgather(nnod, ione, CALYPSO_INTEGER,                   &
+     &    nnod_ucd_list, ione, CALYPSO_INTEGER, CALYPSO_COMM, ierr)
+      call MPI_Allgather(nele, ione, CALYPSO_INTEGER,                   &
+     &    nele_ucd_list, ione, CALYPSO_INTEGER, CALYPSO_COMM, ierr)
+      call MPI_Allgather(internal_node, ione, CALYPSO_INTEGER,          &
+     &    internod_ucd_list, ione, CALYPSO_INTEGER, CALYPSO_COMM, ierr)
 !
       do ip = 1,  nprocs
         istack_nod_ucd_list(ip) = istack_nod_ucd_list(ip-1)             &
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_parallel_var_dof.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_parallel_var_dof.f90
index 79eed0c..ca4108b 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_parallel_var_dof.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_parallel_var_dof.f90
@@ -17,11 +17,6 @@
 !
       implicit  none
 !
-!>      MPI communicator for CALYPSO
-      integer(kind=kint) :: SOLVER_COMM
-!>      total number of processes
-      integer(kind=kint) :: nprocs
-! 
 !>      process ID (start from 0)
       integer(kind=kint) :: my_rank
 !>      error flag
@@ -40,10 +35,8 @@
       use m_machine_parameter
 !
 !
-      call  MPI_INIT(ierr)
-      call  MPI_COMM_DUP (MPI_COMM_WORLD, SOLVER_COMM, ierr)
-      call  MPI_COMM_SIZE(SOLVER_COMM, nprocs, ierr)
-      call  MPI_COMM_RANK(SOLVER_COMM, my_rank  , ierr)
+      call calypso_MPI_init
+      call  MPI_COMM_RANK(CALYPSO_COMM, my_rank  , ierr)
 !
       end subroutine parallel_cal_init
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90
index bf16d94..4ace94c 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/m_work_time.f90
@@ -1,17 +1,23 @@
-!
-!      module m_work_time
-!.......................................................................
-!
-!      Written by H. Matsui on 2001
-!
-!      subroutine allocate_elapsed_times
-!      subroutine deallocate_elapsed_times
-!
-!      subroutine start_eleps_time(iflag_elps)
-!      subroutine end_eleps_time(iflag_elps)
-!      subroutine copy_COMM_TIME_to_eleps(iflag_elps)
-!
-!      subroutine output_elapsed_times
+!>@file  m_work_time.f90
+!!       module m_work_time
+!!
+!!@author H. Matsui
+!!@date   Programmed by H. Matsui in 2001
+!
+!> @brief routines to count elapsed time
+!!
+!!@verbatim
+!!      subroutine allocate_elapsed_times
+!!      subroutine deallocate_elapsed_times
+!!
+!!      subroutine start_eleps_time(iflag_elps)
+!!      subroutine end_eleps_time(iflag_elps)
+!!      subroutine copy_COMM_TIME_to_eleps(iflag_elps)
+!!
+!!      subroutine output_elapsed_times
+!!@endverbatim
+!!
+!!@params  timer ID
 !
       module m_work_time
 !
@@ -78,6 +84,7 @@
 !
       subroutine start_eleps_time(iflag_elps)
 !
+      use calypso_mpi
       use m_parallel_var_dof
 !
       integer(kind = kint), intent(in) :: iflag_elps
@@ -91,6 +98,7 @@
 !
       subroutine end_eleps_time(iflag_elps)
 !
+      use calypso_mpi
       use m_parallel_var_dof
 !
       integer(kind = kint), intent(in) :: iflag_elps
@@ -127,11 +135,11 @@
 !
 !
       call MPI_allREDUCE(elapsed, elapsed_total, num_elapsed,           &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
       call MPI_allREDUCE(elapsed, elapsed_min, num_elapsed,             &
-     &    MPI_DOUBLE_PRECISION, MPI_MIN, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_MIN, CALYPSO_COMM, ierr)
       call MPI_allREDUCE(elapsed, elapsed_max, num_elapsed,             &
-     &    MPI_DOUBLE_PRECISION, MPI_MAX, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_MAX, CALYPSO_COMM, ierr)
 !
       if (my_rank.eq.0) then
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_ucd_data_IO.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_ucd_data_IO.f90
index 9b53531..103feac 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_ucd_data_IO.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_ucd_data_IO.f90
@@ -1,12 +1,17 @@
-!
-!      module merged_ucd_data_IO
-!
-!      Written by H. Matsui on Feb., 2007
-!
-!      subroutine write_merged_ucd_fields(id_ucd, nnod, num_field,      &
-!     &          ntot_comp, ncomp_field, field_name, d_nod)
-!      subroutine write_merged_ucd_mesh(id_ucd, nnod, nele, nnod_ele,   &
-!     &           xx, ie, ntot_comp)
+!>@file  merged_ucd_data_IO.f90
+!!       module merged_ucd_data_IO
+!!
+!!@author H. Matsui
+!!@date   Programmed by H. Matsui in Feb., 2013
+!
+!> @brief Output routine for merged UCD data segments
+!!
+!!@verbatim
+!!      subroutine write_merged_ucd_fields(id_ucd, nnod, num_field,     &
+!!     &          ntot_comp, ncomp_field, field_name, d_nod)
+!!      subroutine write_merged_ucd_mesh(id_ucd, nnod, nele, nnod_ele,  &
+!!     &           xx, ie, ntot_comp)
+!!@endverbatim
 !
       module merged_ucd_data_IO
 !
@@ -107,16 +112,16 @@
 !C-- SEND
         if(my_rank .eq. isend_rank ) then
         num = nele*nnod_ele
-        call MPI_ISEND(ie(1,1), num, MPI_INTEGER,                       &
-     &      izero, 0, SOLVER_COMM, req1, ierr)
+        call MPI_ISEND(ie(1,1), num, CALYPSO_INTEGER,                   &
+     &      izero, 0, CALYPSO_REAL, req1, ierr)
         end if
 !
 !C
 !C-- RECV
         if(my_rank .eq. 0) then
           num = nele_ucd_list(ip)*nnod_ele
-          call MPI_IRECV(ie_single_ucd(1), num, MPI_INTEGER,            &
-     &        (ip-1), 0, SOLVER_COMM, req2, ierr)
+          call MPI_IRECV(ie_single_ucd(1), num, CALYPSO_INTEGER,        &
+     &        (ip-1), 0, CALYPSO_REAL, req2, ierr)
 !
           call MPI_WAITALL (ione, req2, sta2, ierr)
 !
@@ -165,15 +170,15 @@
 !C-- SEND
         if(my_rank .eq. isend_rank) then
           num = nnod*ncomp_field
-          call MPI_ISEND(d_nod(1,1), num, MPI_DOUBLE_PRECISION,         &
-     &      izero, 0, SOLVER_COMM, req1, ierr)
+          call MPI_ISEND(d_nod(1,1), num, CALYPSO_REAL,                 &
+     &      izero, 0, CALYPSO_REAL, req1, ierr)
         end if
 !C
 !C-- RECV
         if(my_rank .eq. 0) then
           num = nnod_ucd_list(ip)*ncomp_field
-          call MPI_IRECV(d_single_ucd(1), num, MPI_DOUBLE_PRECISION,    &
-     &        (ip-1), 0, SOLVER_COMM, req2, ierr)
+          call MPI_IRECV(d_single_ucd(1), num, CALYPSO_REAL,            &
+     &        (ip-1), 0, CALYPSO_REAL, req2, ierr)
 !
           call MPI_WAITALL (ione, req2, sta2, ierr)
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_vtk_data_IO.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_vtk_data_IO.f90
index 0a88f10..bde4eab 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_vtk_data_IO.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/merged_vtk_data_IO.f90
@@ -17,6 +17,7 @@
       module merged_vtk_data_IO
 !
       use m_precision
+!
       use calypso_mpi
       use m_constants
       use m_parallel_var_dof
@@ -132,15 +133,15 @@
 !
         if(my_rank .eq. isend_rank) then
           num = nele*nnod_ele
-          call MPI_ISEND(ie(1,1), num, MPI_INTEGER,                     &
-     &        izero, 0, SOLVER_COMM, req1, ierr)
+          call MPI_ISEND(ie(1,1), num, CALYPSO_INTEGER,                 &
+     &        izero, 0, CALYPSO_COMM, req1, ierr)
         end if
 !C
 !C-- RECV
         if(my_rank .eq. 0) then
           num = (istack_numele(ip) - istack_numele(ip-1)) * nnod_ele
-          call MPI_IRECV(ie_single_ucd(1), num, MPI_INTEGER,            &
-     &        isend_rank, 0, SOLVER_COMM, req2, ierr)
+          call MPI_IRECV(ie_single_ucd(1), num, CALYPSO_INTEGER,        &
+     &        isend_rank, 0, CALYPSO_COMM, req2, ierr)
 !
           call MPI_WAITALL (ione, req2, sta2, ierr)
 !
@@ -189,15 +190,15 @@
 !C-- SEND
         if(my_rank .eq. isend_rank ) then
           num = numnod*ncomp_field
-          call MPI_ISEND(d_nod(1,1), num, MPI_DOUBLE_PRECISION,         &
-     &        izero, 0, SOLVER_COMM, req1, ierr)
+          call MPI_ISEND(d_nod(1,1), num, CALYPSO_REAL,                 &
+     &        izero, 0, CALYPSO_COMM, req1, ierr)
         end if
 !C
 !C-- RECV
         if(my_rank .eq. 0) then
           num = (istack_numnod(ip) - istack_numnod(ip-1)) * ncomp_field
-          call MPI_IRECV(d_single_ucd(1), num, MPI_DOUBLE_PRECISION,    &
-     &        isend_rank, 0, SOLVER_COMM, req2, ierr)
+          call MPI_IRECV(d_single_ucd(1), num, CALYPSO_REAL,            &
+     &        isend_rank, 0, CALYPSO_COMM, req2, ierr)
 !
           call MPI_WAITALL (ione, req2, sta2, ierr)
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90 b/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90
index ebd5aed..ed0b0c7 100644
--- a/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90
+++ b/src/Fortran_libraries/PARALLEL_src/COMM_src/nodal_vector_send_recv.f90
@@ -76,6 +76,7 @@
 !
       subroutine nod_vector_send_recv(vec_nod)
 !
+      use m_array_for_send_recv
       use solver_SR_3
 !
       real(kind = kreal), intent(inout) :: vec_nod(numnod,3)
@@ -93,7 +94,8 @@
       START_TIME= MPI_WTIME()
       call SOLVER_SEND_RECV_3(numnod, num_neib, id_neib,                &
      &                        istack_import, item_import,               &
-     &                        istack_export, item_export, x_vec(1) )
+     &                        istack_export, item_export,               &
+     &                        x_vec(1) )
       END_TIME= MPI_WTIME()
       COMMtime = COMMtime + END_TIME - START_TIME
 !
@@ -111,6 +113,7 @@
 !
       subroutine nod_tensor_send_recv(tsr_nod)
 !
+      use m_array_for_send_recv
       use solver_SR_6
 !
       real(kind = kreal), intent(inout) :: tsr_nod(numnod,6)
@@ -131,8 +134,7 @@
       START_TIME= MPI_WTIME()
       call SOLVER_SEND_RECV_6(numnod, num_neib, id_neib,                &
      &                        istack_import, item_import,               &
-     &                        istack_export, item_export,               &
-     &                        x_vec(1))
+     &                        istack_export, item_export, x_vec(1) )
       END_TIME= MPI_WTIME()
       COMMtime = COMMtime + END_TIME - START_TIME
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/cal_rms_fields_by_sph.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/cal_rms_fields_by_sph.f90
index 63b47f0..749f466 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/cal_rms_fields_by_sph.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/cal_rms_fields_by_sph.f90
@@ -1,13 +1,18 @@
-!cal_rms_fields_by_sph.f90
-!      module cal_rms_fields_by_sph
-!
-!     Written by H. Matsui on Feb., 2008
-!
-!      subroutine init_rms_4_sph_spectr
-!
-!      subroutine cal_rms_sph_spec_rms_whole
-!      subroutine cal_rms_sph_outer_core
-!      subroutine cal_rms_sph_inner_core
+!>@file   cal_rms_fields_by_sph.f90
+!!@brief      module cal_rms_fields_by_sph
+!!
+!!@author H. Matsui and H. Okuda
+!!@date Programmed in  Dec., 2012
+!
+!> @brief evaluate mean square data from spectr data
+!!
+!!@verbatim
+!!      subroutine init_rms_4_sph_spectr
+!!
+!!      subroutine cal_rms_sph_spec_rms_whole
+!!      subroutine cal_rms_sph_outer_core
+!!      subroutine cal_rms_sph_inner_core
+!!@endverbatim
 !
       module cal_rms_fields_by_sph
 !
@@ -106,6 +111,7 @@
 !
       subroutine cal_rms_sph_spec_local
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_spheric_parameter
       use m_phys_constants
@@ -147,7 +153,7 @@
 !
       num = ntot_rms_rj * nidx_rj(1)
       call MPI_allREDUCE (ave_sph_lc(1,1), ave_sph(1,1), num,           &
-     &  MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       call sum_sph_layerd_rms
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_gauss_coefficients.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_gauss_coefficients.f90
index dc5c343..69680d7 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_gauss_coefficients.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_gauss_coefficients.f90
@@ -1,10 +1,15 @@
-!pickup_gauss_coefficients.f90
-!      module pickup_gauss_coefficients
-!
-!        programmed by H.Matsui on Dec., 2012
-!
+!>@file   pickup_gauss_coefficients.f90
+!!@brief      module pickup_gauss_coefficients
+!!
+!!@author H. Matsui and H. Okuda
+!!@date Programmed in  Dec., 2012
+!
+!> @brief choose Gauss coefficients to output
+!!
+!!@verbatim
 !      subroutine init_gauss_coefs_4_monitor
 !      subroutine cal_gauss_coefficients
+!!@endverbatim
 !
       module pickup_gauss_coefficients
 !
@@ -62,6 +67,7 @@
 !
       subroutine cal_gauss_coefficients
 !
+      use calypso_mpi
       use m_parallel_var_dof
       use m_sph_spectr_data
       use m_sph_phys_address
@@ -93,8 +99,8 @@
 !$omp end parallel do
 !
       call MPI_allREDUCE(gauss_coef_lc(1), gauss_coef_gl(1),            &
-     &    num_pick_gauss_mode, MPI_DOUBLE_PRECISION, MPI_SUM,           &
-     &    SOLVER_COMM, ierr)
+     &    num_pick_gauss_mode, CALYPSO_REAL, MPI_SUM, CALYPSO_COMM,     &
+     &    ierr)
 !
       end subroutine cal_gauss_coefficients
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_coefs.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_coefs.f90
index d43d6d7..2b3820a 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_coefs.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_coefs.f90
@@ -1,10 +1,15 @@
-!pickup_sph_coefs.f90
-!      module pickup_sph_coefs
-!
-!        programmed by H.Matsui on Dec., 2012
-!
-!      subroutine init_sph_spec_4_monitor
-!      subroutine pickup_sph_spec_4_monitor
+!>@file   pickup_sph_coefs.f90
+!!@brief      module pickup_sph_coefs
+!!
+!!@author H. Matsui and H. Okuda
+!!@date Programmed in  Dec., 2012
+!
+!> @brief choose spectr data to output
+!!
+!!@verbatim
+!!      subroutine init_sph_spec_4_monitor
+!!      subroutine pickup_sph_spec_4_monitor
+!!@endverbatim
 !
       module pickup_sph_coefs
 !
@@ -71,6 +76,7 @@
 !
       subroutine pickup_sph_spec_4_monitor
 !
+      use calypso_mpi
       use m_parallel_var_dof
 !
       integer(kind = kint) :: inum, knum, j, k, nd,icou, i_fld
@@ -120,7 +126,7 @@
 !
       num = ncomp_pick_sph_coef*num_pick_layer*num_pick_sph_mode
       call MPI_allREDUCE(d_rj_pick_sph_lc(1,1), d_rj_pick_sph_gl(1,1),  &
-     &    num, MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    num, CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine pickup_sph_spec_4_monitor
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_rms_spectr.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_rms_spectr.f90
index 9ccf0d9..a5e775a 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_rms_spectr.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/pickup_sph_rms_spectr.f90
@@ -1,12 +1,17 @@
-!pickup_sph_rms_spectr.f90
-!      module pickup_sph_rms_spectr
-!
-!        programmed by H.Matsui on Dec., 2012
-!
-!      subroutine init_sph_rms_4_monitor
-!
-!      subroutine pickup_sph_rms_4_monitor
-!      subroutine pickup_sph_rms_vol_monitor
+!>@file   pickup_sph_rms_spectr.f90
+!!@brief      module pickup_sph_rms_spectr
+!!
+!!@author H. Matsui and H. Okuda
+!!@date Programmed in  Dec., 2012
+!
+!> @brief choose mean square data to output
+!!
+!!@verbatim
+!!      subroutine init_sph_rms_4_monitor
+!!
+!!      subroutine pickup_sph_rms_4_monitor
+!!      subroutine pickup_sph_rms_vol_monitor
+!!@endverbatim
 !
       module pickup_sph_rms_spectr
 !
@@ -79,6 +84,7 @@
 !
       subroutine pickup_sph_rms_4_monitor
 !
+      use calypso_mpi
       use m_parallel_var_dof
 !
       integer(kind = kint) :: inum, knum, j, k, nd
@@ -111,8 +117,8 @@
 !
       num = ntot_rms_rj*num_pick_rms_layer*num_pick_sph_rms_mode
       call MPI_allREDUCE(d_rms_pick_sph_lc(1,1),                        &
-     &    d_rms_pick_sph_gl(1,1), num, MPI_DOUBLE_PRECISION, MPI_SUM,   &
-     &    SOLVER_COMM, ierr)
+     &    d_rms_pick_sph_gl(1,1), num, CALYPSO_REAL, MPI_SUM,           &
+     &    CALYPSO_COMM, ierr)
 !
       end subroutine pickup_sph_rms_4_monitor
 !
@@ -120,6 +126,7 @@
 !
       subroutine pickup_sph_rms_vol_monitor
 !
+      use calypso_mpi
       use m_parallel_var_dof
 !
       integer(kind = kint) :: inum, j, nd, num
@@ -146,8 +153,8 @@
 !
       num = ntot_rms_rj*num_pick_sph_rms_mode
       call MPI_allREDUCE(d_rms_pick_sph_lc(1,1),                        &
-     &    d_rms_pick_sph_gl(1,1), num, MPI_DOUBLE_PRECISION, MPI_SUM,   &
-     &    SOLVER_COMM, ierr)
+     &    d_rms_pick_sph_gl(1,1), num, CALYPSO_REAL, MPI_SUM,           &
+     &    CALYPSO_COMM, ierr)
 !
       end subroutine pickup_sph_rms_vol_monitor
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/select_fourier_transform.F90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/select_fourier_transform.F90
index 71eb752..2c353f3 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/select_fourier_transform.F90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/select_fourier_transform.F90
@@ -94,6 +94,8 @@
 !
       subroutine test_fourier_trans_vector(ncomp, Nstacksmp, etime_fft)
 !
+      use calypso_mpi
+!
       integer(kind = kint), intent(in) :: ncomp
       integer(kind = kint), intent(in) :: Nstacksmp(0:np_smp)
       real(kind = kreal), intent(inout) :: etime_fft
@@ -114,7 +116,7 @@
       call finalize_FFT_select(np_smp)
 !
       call MPI_allREDUCE (etime, etime_fft, ione,                       &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine test_fourier_trans_vector
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_b_trans_at_pole.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_b_trans_at_pole.f90
index 860da6b..2185e03 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_b_trans_at_pole.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_b_trans_at_pole.f90
@@ -1,65 +1,74 @@
-!sum_b_trans_at_pole.f90
-!      module sum_b_trans_at_pole
-!
-!     Written by H. Matsui on July, 2007
-!
-!
-!      subroutine set_pole_flag_4_sph_trans(numnod, internal_node)
-!
-!      subroutine sum_b_trans_pole_scalar(nb)
-!      subroutine sum_b_trans_pole_vect(nb)
-!      subroutine sum_b_trans_center_scalar(nb)
-!      subroutine sum_b_trans_center_vect(nb)
-!
-!------------------------------------------------------------------
-!
-!      vr =  l*(l+1)*Y(l,0)* S(l,0) / r**2
-!      vt =  (dYdt(l,1s)*dSdr(l,1s)
-!           + dYdt(l,1c)*dSdr(l,1c))  / r
-!         + cos(theta) * (d2Ydtdp(l,1s)*T(l,1s) 
-!                       + d2Ydtdp(l,1c)*T(l,1c))  / r
-!      vp = cos(theta) * (d2Ydtdp(l,1s)*dSdr(l,1s)
-!                       + d2Ydtdp(l,1c)*dSdr(l,1c))  / r
-!           -(dYdt(l,1s)*T(l,1s)
-!           + dYdt(l,1c)*T(l,1c))  / r
-!
-!  if phi = 0
-!
-!      vr =  l*(l+1)*P(l,0)* S(l,0) / r**2
-!      vt =  dPdt(l,1)*dSdr(l,1c)  / r
-!         + cos(theta) * dPdt(l,1)*T(l,1s) / r
-!      vp = cos(theta) * dPdt(l,1)*dSdr(l,1s) / r
-!           - dPdt(l,1)*T(l,1c)  / r
-!
-! if z > 0 (North pole)
-!
-!      vx = vt
-!      vy = vp
-!      vz = vr
-!
-! if z < 0 (South pole)
-!
-!      vx = -vt
-!      vy =  vp
-!      vz = -vr
-!
-!------------------------------------------------------------------
-!------------------------------------------------------------------
-!
-! if r= 0 (Center)
-!
-!      vz =  2 * P(1,0) * S(1,0) / r_c**2
-!         =  2 * S(1,0) / r_c**2
-!      vx =  2 * dPdt(l,1) * S(1,1c) / r_c**2
-!         = - 2 * S(1,1c) / r_c**2
-!      vy =  2 * dPdt(l,1) * S(1,1s) / r_c**2
-!         = - 2 * S(1,1s) / r_c**2
-!------------------------------------------------------------------
+!>@file   sum_b_trans_at_pole.f90
+!!@brief      module sum_b_trans_at_pole
+!!
+!!@author H. Matsui
+!!@date Programmed in 2009
+!
+!> @brief  Evaluate mean square by spherical hermonics coefficients
+!!
+!!@verbatim
+!!      subroutine set_pole_flag_4_sph_trans(numnod, internal_node)
+!!
+!!      subroutine sum_b_trans_pole_scalar(nb)
+!!      subroutine sum_b_trans_pole_vect(nb)
+!!      subroutine sum_b_trans_center_scalar(nb)
+!!      subroutine sum_b_trans_center_vect(nb)
+!!
+!!------------------------------------------------------------------
+!!
+!!      vr =  l*(l+1)*Y(l,0)* S(l,0) / r**2
+!!      vt =  (dYdt(l,1s)*dSdr(l,1s)
+!!           + dYdt(l,1c)*dSdr(l,1c))  / r
+!!         + cos(theta) * (d2Ydtdp(l,1s)*T(l,1s) 
+!!                       + d2Ydtdp(l,1c)*T(l,1c))  / r
+!!      vp = cos(theta) * (d2Ydtdp(l,1s)*dSdr(l,1s)
+!!                       + d2Ydtdp(l,1c)*dSdr(l,1c))  / r
+!!           -(dYdt(l,1s)*T(l,1s)
+!!           + dYdt(l,1c)*T(l,1c))  / r
+!!
+!!  if phi = 0
+!!
+!!      vr =  l*(l+1)*P(l,0)* S(l,0) / r**2
+!!      vt =  dPdt(l,1)*dSdr(l,1c)  / r
+!!         + cos(theta) * dPdt(l,1)*T(l,1s) / r
+!!      vp = cos(theta) * dPdt(l,1)*dSdr(l,1s) / r
+!!           - dPdt(l,1)*T(l,1c)  / r
+!!
+!! if z > 0 (North pole)
+!!
+!!      vx = vt
+!!      vy = vp
+!!      vz = vr
+!!
+!! if z < 0 (South pole)
+!!
+!!      vx = -vt
+!!      vy =  vp
+!!      vz = -vr
+!!
+!!------------------------------------------------------------------
+!!------------------------------------------------------------------
+!!
+!! if r= 0 (Center)
+!!
+!!      vz =  2 * P(1,0) * S(1,0) / r_c**2
+!!         =  2 * S(1,0) / r_c**2
+!!      vx =  2 * dPdt(l,1) * S(1,1c) / r_c**2
+!!         = - 2 * S(1,1c) / r_c**2
+!!      vy =  2 * dPdt(l,1) * S(1,1s) / r_c**2
+!!         = - 2 * S(1,1s) / r_c**2
+!!------------------------------------------------------------------
+!!@verbatim
+!!
+!! @param  numnod           number of node for FEM mesh
+!! @param  internal_node    number of internal node for FEM mesh
+!! @param  nb               number of field
 !
       module sum_b_trans_at_pole
 !
       use m_precision
 !
+      use calypso_mpi
       use m_constants
       use m_machine_parameter
       use m_spheric_parameter
@@ -100,7 +109,7 @@
       if(i_debug .eq. iflag_full_msg) write(*,*) 'iflag_shell_local',   &
      &     my_rank, iflag_shell_local, internal_node, nnod_rtp
       call MPI_allreduce(iflag_shell_local, iflag_shell_mode, ione,     &
-     &    MPI_INTEGER, MPI_MAX, SOLVER_COMM, ierr)
+     &    CALYPSO_INTEGER, MPI_MAX, CALYPSO_COMM, ierr)
       if(i_debug .eq. iflag_full_msg) write(*,*) 'iflag_shell_mode',    &
      &     my_rank, iflag_shell_mode
 !
@@ -126,9 +135,9 @@
 !
       ncomp = nb*nidx_rj(1)
       call MPI_allreduce(v_np_local, v_n_pole, ncomp,                   &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
       call MPI_allreduce(v_sp_local, v_s_pole, ncomp,                   &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine sum_b_trans_pole_scalar
 !
@@ -146,9 +155,9 @@
 !
       ncomp = n_vector*nb*nidx_rj(1)
       call MPI_allreduce(v_np_local, v_n_pole, ncomp,                   &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
       call MPI_allreduce(v_sp_local, v_s_pole, ncomp,                   &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine sum_b_trans_pole_vect
 !
@@ -163,7 +172,7 @@
       v_center =   zero
 !
       call MPI_allreduce(v_ct_local, v_center, nb,                      &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine sum_b_trans_center_scalar
 !
@@ -180,7 +189,7 @@
 !
       ncomp = n_vector*nb
       call MPI_allreduce(v_ct_local, v_center, ncomp,                   &
-     &    MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       end subroutine sum_b_trans_center_vect
 !
diff --git a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90 b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90
index 2ab8cad..8fa3137 100644
--- a/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90
+++ b/src/Fortran_libraries/PARALLEL_src/SPH_SHELL_src/sum_sph_rms_data.f90
@@ -1,11 +1,16 @@
-!
-!      module sum_sph_rms_data
-!
-!      subroutine deallocate_rms_sph_local_data
-!
-!      subroutine set_sum_table_4_sph_spectr
-!
-!      subroutine sum_sph_layerd_rms
+!>@file   sum_sph_rms_data.f90
+!!@brief      module sum_sph_rms_data
+!!
+!!@author H. Matsui
+!!@date Programmed in 2009
+!
+!> @brief  Evaluate mean square by spherical hermonics coefficients
+!!
+!!@verbatim
+!!      subroutine deallocate_rms_sph_local_data
+!!      subroutine set_sum_table_4_sph_spectr
+!!      subroutine sum_sph_layerd_rms
+!!@endverbatim
 !
       module sum_sph_rms_data
 !
@@ -189,11 +194,11 @@
 !
       num = ntot_rms_rj * nidx_rj(1) * (l_truncation + 1)
       call MPI_allREDUCE (rms_sph_l_local(1,0,1), rms_sph_l(1,0,1),     &
-     &    num, MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    num, CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
       call MPI_allREDUCE (rms_sph_m_local(1,0,1), rms_sph_m(1,0,1),     &
-     &    num, MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    num, CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
       call MPI_allREDUCE (rms_sph_lm_local(1,0,1), rms_sph_lm(1,0,1),   &
-     &    num, MPI_DOUBLE_PRECISION, MPI_SUM, SOLVER_COMM, ierr)
+     &    num, CALYPSO_REAL, MPI_SUM, CALYPSO_COMM, ierr)
 !
       if(my_rank .gt. 0) return
 !



More information about the CIG-COMMITS mailing list