[cig-commits] r23005 - seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D

dkomati1 at geodynamics.org dkomati1 at geodynamics.org
Thu Mar 6 19:05:46 PST 2014


Author: dkomati1
Date: 2014-03-06 19:05:46 -0800 (Thu, 06 Mar 2014)
New Revision: 23005

Modified:
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_acoustic_calling_routine.F90
   seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_viscoelastic_calling_routine.F90
Log:
removed two unused variables in src/specfem3D/compute_forces_viscoelastic_calling_routine.F90 and src/specfem3D/compute_forces_acoustic_calling_routine.F90


Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90	2014-03-07 02:56:42 UTC (rev 23004)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_scalar.f90	2014-03-07 03:05:46 UTC (rev 23005)
@@ -246,7 +246,7 @@
 !-------------------------------------------------------------------------------------------------
 
 
-  subroutine assemble_MPI_scalar_send_cuda(Mesh_pointer,NPROC, &
+  subroutine assemble_MPI_scalar_send_cuda(NPROC, &
                                           buffer_send_scalar,buffer_recv_scalar, &
                                           num_interfaces,max_nibool_interfaces, &
                                           nibool_interfaces, &
@@ -262,8 +262,6 @@
 
   implicit none
 
-  integer(kind=8) :: Mesh_pointer
-
   integer :: NPROC
   integer :: num_interfaces,max_nibool_interfaces
 

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90	2014-03-07 02:56:42 UTC (rev 23004)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/assemble_MPI_vector.f90	2014-03-07 03:05:46 UTC (rev 23005)
@@ -170,7 +170,7 @@
 ! check: MP_CSS_INTERRUPT environment variable on IBM systems
 
 
-  subroutine assemble_MPI_vector_send_cuda(Mesh_pointer,NPROC, &
+  subroutine assemble_MPI_vector_send_cuda(NPROC, &
                                           buffer_send_vector,buffer_recv_vector, &
                                           num_interfaces,max_nibool_interfaces, &
                                           nibool_interfaces, &
@@ -183,8 +183,6 @@
 
   implicit none
 
-  integer(kind=8) :: Mesh_pointer
-
   integer :: NPROC
 
   integer :: num_interfaces,max_nibool_interfaces

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_acoustic_calling_routine.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_acoustic_calling_routine.F90	2014-03-07 02:56:42 UTC (rev 23004)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_acoustic_calling_routine.F90	2014-03-07 03:05:46 UTC (rev 23005)
@@ -111,7 +111,7 @@
         ! wait for asynchronous copy to finish
         call sync_copy_from_device(Mesh_pointer,iphase,buffer_send_scalar_outer_core,IREGION_OUTER_CORE,1)
         ! sends mpi buffers
-        call assemble_MPI_scalar_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+        call assemble_MPI_scalar_send_cuda(NPROCTOT_VAL, &
                                            buffer_send_scalar_outer_core,buffer_recv_scalar_outer_core, &
                                            num_interfaces_outer_core,max_nibool_interfaces_oc, &
                                            nibool_interfaces_outer_core,&
@@ -198,7 +198,7 @@
         if( .not. GPU_ASYNC_COPY ) then
           ! for synchronuous transfers, sending over mpi can directly proceed
           ! outer core
-          call assemble_MPI_scalar_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+          call assemble_MPI_scalar_send_cuda(NPROCTOT_VAL, &
                                              buffer_send_scalar_outer_core,buffer_recv_scalar_outer_core, &
                                              num_interfaces_outer_core,max_nibool_interfaces_oc, &
                                              nibool_interfaces_outer_core,&
@@ -372,7 +372,7 @@
         ! wait for asynchronous copy to finish
         call sync_copy_from_device(Mesh_pointer,iphase,b_buffer_send_scalar_outer_core,IREGION_OUTER_CORE,3)
         ! sends mpi buffers
-        call assemble_MPI_scalar_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+        call assemble_MPI_scalar_send_cuda(NPROCTOT_VAL, &
                               b_buffer_send_scalar_outer_core,b_buffer_recv_scalar_outer_core, &
                               num_interfaces_outer_core,max_nibool_interfaces_oc, &
                               nibool_interfaces_outer_core,&
@@ -466,7 +466,7 @@
         if( .not. GPU_ASYNC_COPY ) then
           ! for synchronuous transfers, sending over mpi can directly proceed
           ! outer core
-          call assemble_MPI_scalar_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+          call assemble_MPI_scalar_send_cuda(NPROCTOT_VAL, &
                                 b_buffer_send_scalar_outer_core,b_buffer_recv_scalar_outer_core, &
                                 num_interfaces_outer_core,max_nibool_interfaces_oc, &
                                 nibool_interfaces_outer_core,&

Modified: seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_viscoelastic_calling_routine.F90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_viscoelastic_calling_routine.F90	2014-03-07 02:56:42 UTC (rev 23004)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/src/specfem3D/compute_forces_viscoelastic_calling_routine.F90	2014-03-07 03:05:46 UTC (rev 23005)
@@ -159,7 +159,7 @@
         ! wait for asynchronous copy to finish
         call sync_copy_from_device(Mesh_pointer,iphase,buffer_send_vector_crust_mantle,IREGION_CRUST_MANTLE,1)
         ! sends mpi buffers
-        call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+        call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                       buffer_send_vector_crust_mantle,buffer_recv_vector_crust_mantle, &
                       num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
                       nibool_interfaces_crust_mantle,&
@@ -176,7 +176,7 @@
         ! wait for asynchronous copy to finish
         call sync_copy_from_device(Mesh_pointer,iphase,buffer_send_vector_inner_core,IREGION_INNER_CORE,1)
         ! sends mpi buffers
-        call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+        call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                       buffer_send_vector_inner_core,buffer_recv_vector_inner_core, &
                       num_interfaces_inner_core,max_nibool_interfaces_ic, &
                       nibool_interfaces_inner_core,&
@@ -321,14 +321,14 @@
         if( .not. GPU_ASYNC_COPY ) then
           ! for synchronuous transfers, sending over mpi can directly proceed
           ! crust mantle
-          call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+          call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                         buffer_send_vector_crust_mantle,buffer_recv_vector_crust_mantle, &
                         num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
                         nibool_interfaces_crust_mantle,&
                         my_neighbours_crust_mantle, &
                         request_send_vector_cm,request_recv_vector_cm)
           ! inner core
-          call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+          call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                         buffer_send_vector_inner_core,buffer_recv_vector_inner_core, &
                         num_interfaces_inner_core,max_nibool_interfaces_ic, &
                         nibool_interfaces_inner_core,&
@@ -609,7 +609,7 @@
         ! wait for asynchronous copy to finish
         call sync_copy_from_device(Mesh_pointer,iphase,b_buffer_send_vector_cm,IREGION_CRUST_MANTLE,3)
         ! sends mpi buffers
-        call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+        call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                       b_buffer_send_vector_cm,b_buffer_recv_vector_cm, &
                       num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
                       nibool_interfaces_crust_mantle,&
@@ -627,7 +627,7 @@
         call sync_copy_from_device(Mesh_pointer,iphase,b_buffer_send_vector_inner_core,IREGION_INNER_CORE,3)
 
         ! sends mpi buffers
-        call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+        call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                       b_buffer_send_vector_inner_core,b_buffer_recv_vector_inner_core, &
                       num_interfaces_inner_core,max_nibool_interfaces_ic, &
                       nibool_interfaces_inner_core,&
@@ -757,14 +757,14 @@
         if( .not. GPU_ASYNC_COPY ) then
           ! for synchronuous transfers, sending over mpi can directly proceed
           ! crust mantle
-          call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+          call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                       b_buffer_send_vector_cm,b_buffer_recv_vector_cm, &
                       num_interfaces_crust_mantle,max_nibool_interfaces_cm, &
                       nibool_interfaces_crust_mantle,&
                       my_neighbours_crust_mantle, &
                       b_request_send_vector_cm,b_request_recv_vector_cm)
           ! inner core
-          call assemble_MPI_vector_send_cuda(Mesh_pointer,NPROCTOT_VAL, &
+          call assemble_MPI_vector_send_cuda(NPROCTOT_VAL, &
                       b_buffer_send_vector_inner_core,b_buffer_recv_vector_inner_core, &
                       num_interfaces_inner_core,max_nibool_interfaces_ic, &
                       nibool_interfaces_inner_core,&



More information about the CIG-COMMITS mailing list