[cig-commits] [commit] master: tidy up (953380e)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Fri Oct 17 05:30:10 PDT 2014


Repository : https://github.com/geodynamics/axisem

On branch  : master
Link       : https://github.com/geodynamics/axisem/compare/607f803cf074063627513d235f9ed0837fc1dd44...b6457db24acdde4a4e1c08935ae1b22adf87f5bf

>---------------------------------------------------------------

commit 953380eedb4c3fbd4597d9160ad7948e3939bd3d
Author: martinvandriel <vandriel at erdw.ethz.ch>
Date:   Thu Oct 16 20:34:17 2014 +0200

    tidy up


>---------------------------------------------------------------

953380eedb4c3fbd4597d9160ad7948e3939bd3d
 SOLVER/def_grid.f90  | 68 ++++++++++++++++++++---------------------------
 SOLVER/meshes_io.F90 | 75 ----------------------------------------------------
 2 files changed, 29 insertions(+), 114 deletions(-)

diff --git a/SOLVER/def_grid.f90 b/SOLVER/def_grid.f90
index 7aa538f..796920b 100644
--- a/SOLVER/def_grid.f90
+++ b/SOLVER/def_grid.f90
@@ -18,7 +18,7 @@
 !    You should have received a copy of the GNU General Public License
 !    along with AxiSEM.  If not, see <http://www.gnu.org/licenses/>.
 !
-
+!=========================================================================================
 module def_grid
 
   use global_parameters
@@ -36,7 +36,7 @@ module def_grid
 
 contains
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> This routine defines the arrays related to the chosen spectral-element 
 !! discretization. In particular, it computes the reference coordinates of 
 !! the collocation points,the global coordinates of these points mapped in 
@@ -209,9 +209,9 @@ subroutine init_grid
 
 
 end subroutine init_grid
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> These memory-pricey arrays are not needed in the time loop and therefore 
 !! dynamically allocated and dropped at this point. 
 !! Any inevitable larger arrays are defined in data_matr or data_mesh.
@@ -260,9 +260,9 @@ subroutine deallocate_preloop_arrays
   if (lpr) write(6,*)'  Done deallocating mesh arrays.'; call flush(6)
 
 end subroutine deallocate_preloop_arrays
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> Computes the UNASSEMLED global mass matrix, i.e. spanning solid AND 
 !! fluid domains.
 !! (as opposed to routine def_mass_matrix_k which only computes those terms 
@@ -321,9 +321,9 @@ subroutine massmatrix(masstmp,nel,domain)
   end do
 
 end subroutine massmatrix
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !! Same as routine massmatrix above but in real(kind=dp)   .
 subroutine massmatrix_dble(masstmp,nel,domain)
 
@@ -374,9 +374,9 @@ subroutine massmatrix_dble(masstmp,nel,domain)
   end do
 
 end subroutine massmatrix_dble
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> A wrapper for a multitude of tests related to various aspects of the mesh 
 !! (that is, prior to adding elastic properties) such as volume, surfaces, 
 !! valence & global numbering, solid-fluid boundary indexing, axial arrays, 
@@ -386,9 +386,6 @@ end subroutine massmatrix_dble
 !! Yet again, maybe not...
 subroutine mesh_tests
 
-  !use commun, only: mpi_asynch_messaging_test_solid
-  !use commun, only: mpi_asynch_messaging_test_fluid
-  
   ! Checking coordinate conformity 
   if (lpr) write(6,*)'  dumping element information...'
   call dump_coarsing_element_info
@@ -413,20 +410,12 @@ subroutine mesh_tests
   if (lpr) write(6,*)'  Checking out solid-fluid boundaries...'
   call check_solid_fluid_boundaries
 
-  ! Check message passing <><><><><><><><><><><><><><>><><><><><><><><><><><>
-  !if (nproc>1) then
-  !   if (lpr) write(6,*)'  Checking message-passing for solid...'
-  !   call mpi_asynch_messaging_test_solid
-  !   if (lpr) write(6,*)'  Checking message-passing for fluid...'
-  !   call mpi_asynch_messaging_test_fluid
-  !endif
-
   if (lpr) write(6,'(/,a,/)')'  >>> FINISHED mesh tests.'
 
 end subroutine mesh_tests
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> Coarsing elements are all those that have any role in the non-spheroidal
 !! coarsening levels (i.e., contain some non-spheroidal edge), i.e. include
 !! "two entire depth levels" around the coarsening level.
@@ -516,9 +505,9 @@ subroutine dump_coarsing_element_info
 16 format('   ',a8,'has ',i6,a12,' elements')
 
 end subroutine dump_coarsing_element_info
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> Check whether s,z,theta, and r conform. Just here as a debugging relict,
 !! but lingering well since not exactly pricey...
 subroutine check_physical_coordinates
@@ -588,9 +577,9 @@ subroutine check_physical_coordinates
   enddo
 
 end subroutine check_physical_coordinates
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> Checks the various arrays related to the axis globally and in solid/fluid 
 !! subdomains; and runs test fields through the actual routines 
 !! used to mask those fields that vanish on the axis during the time loop.
@@ -999,9 +988,9 @@ subroutine check_axial_stuff
   deallocate(tmpflufield)
 
 end subroutine check_axial_stuff
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> Compute surface area of all radii in the spherical part of the domain 
 !! numerically and compare to analytical values.
 !! Constitutes an accuracy test of the GLL and GLJ(0,1) integration
@@ -1189,15 +1178,15 @@ subroutine compute_spherical_surfaces
   spher_radii(2:num_spher_radii) = radii2(1:irad,2) ! take the ones below 
 
   ! sort spher_radii by inverse bubble sort
-  call BSORT2(spher_radii,num_spher_radii)
+  call bsort2(spher_radii, num_spher_radii)
 
   deallocate(radii2)
   deallocate(radsurf)
 
 end subroutine compute_spherical_surfaces
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> A straight computation of the spherical volume of the sphere and its 
 !! solid and fluid sub-shells. 
 !! Accuracy for realkind=4 is typically O(1E-8) and for realkind=8 O(1E-12).
@@ -1354,9 +1343,9 @@ subroutine compute_volume
   deallocate(mass_fluid)
 
 end subroutine compute_volume
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> S/F boundary tests
 !! define field on fluid side, copy to solid, check difference
 !! This routine does not "check" as in exiting (except for counting boundary 
@@ -1443,9 +1432,9 @@ subroutine check_solid_fluid_boundaries
   deallocate(tmpflufield)
 
 end subroutine check_solid_fluid_boundaries
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> This routine returns the smallest grid-spacing
 !! between two neighbouring points in the meridional plane.
 subroutine compute_hmin_meri(hmin)
@@ -1481,9 +1470,9 @@ subroutine compute_hmin_meri(hmin)
   deallocate(dis2)
 
 end subroutine compute_hmin_meri
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
-!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------------------
 !> Inverse bubble sort routine adapted from Ratzer's F90,C and Algorithms:
 !! http://www.cs.mcgill.ca/~ratzer/progs15_3.html
 subroutine bsort2(list,n)
@@ -1515,6 +1504,7 @@ subroutine bsort2(list,n)
   end do
 
 end subroutine bsort2
-!=============================================================================
+!-----------------------------------------------------------------------------------------
 
 end module def_grid
+!=========================================================================================
diff --git a/SOLVER/meshes_io.F90 b/SOLVER/meshes_io.F90
index cf84483..bf3040d 100644
--- a/SOLVER/meshes_io.F90
+++ b/SOLVER/meshes_io.F90
@@ -40,8 +40,6 @@ module meshes_io
   public :: dump_wavefields_mesh_1d
   public :: dump_glob_grid_midpoint
   public :: dump_xdmf_grid
-  !public :: dump_solid_grid
-  !public :: dump_fluid_grid
   public :: prepare_mesh_memoryvar_vtk
   public :: build_kwf_grid
   public :: dump_kwf_midpoint_xdmf
@@ -1101,77 +1099,6 @@ end subroutine
 !-----------------------------------------------------------------------------------------
 
 !-----------------------------------------------------------------------------------------
-!> Dumps the mesh (s,z) [m] in ASCII format as needed to visualize snapshots 
-!! in the solid region only.
-!! Convention for order in the file: First the fluid, then the solid domain.
-!subroutine dump_solid_grid(ibeg,iend,jbeg,jend)
-!
-!  
-!  integer, intent(in) :: ibeg,iend,jbeg,jend 
-!  integer             :: iel, ipol,jpol
-!
-!  open(unit=2500+mynum,file=datapath(1:lfdata)//'/solid_grid_'&
-!                            //appmynum//'.dat')
-!  do iel=1,nel_solid
-!     do jpol=jbeg,jend
-!        do ipol=ibeg,iend
-!           write(2500+mynum,*)scoord(ipol,jpol,ielsolid(iel)), &
-!                              zcoord(ipol,jpol,ielsolid(iel))
-!        enddo
-!     enddo
-!  enddo
-!  close(2500+mynum)
-!
-!end subroutine dump_solid_grid
-!-----------------------------------------------------------------------------------------
-
-!-----------------------------------------------------------------------------------------
-!> Dumps the mesh (s,z) [m] in ASCII format as needed to visualize snapshots 
-!! in the fluid region only, and additionally the constant factors preceding 
-!! the displacement in the fluid, namely rho^{-1} and (rho s)^{-1}.
-!! When reading the fluid wavefield, one therefore needs to multiply all 
-!! components with inv_rho_fluid and the phi component with one/scoord!
-!! Convention for order in the file: First the fluid, then the solid domain.
-!subroutine dump_fluid_grid(ibeg,iend,jbeg,jend)
-!
-!  use data_pointwise, only : inv_rho_fluid
-!  
-!  
-!  integer, intent(in) :: ibeg,iend,jbeg,jend
-!  integer             :: iel, ipol,jpol
-!  
-!  ! When reading the fluid wavefield, one needs to multiply all components 
-!  ! with inv_rho_fluid and the phi component with one/scoord!!
-!
-!  open(unit=2500+mynum,file=datapath(1:lfdata)//&
-!                            '/fluid_grid_'//appmynum//'.dat')
-!  open(unit=2600+mynum,file=datapath(1:lfdata)//&
-!                            '/inv_rho_scoord_fluid_flusnaps_'&
-!                            //appmynum//'.dat', STATUS="REPLACE")
-!  do iel=1,nel_fluid
-!     do jpol=jbeg,jend
-!        do ipol=ibeg,iend
-!           write(2500+mynum,*)scoord(ipol,jpol,ielfluid(iel)), &
-!                              zcoord(ipol,jpol,ielfluid(iel))
-!           if ( axis_fluid(iel) .and. ipol==0 ) then
-!              ! Axis s=0! write 1 instead of 1/s and then multiply 
-!              ! with the correct factor dsdchi, obtained by L'Hospital's rule 
-!              ! (see routine fluid_snapshot below).
-!              write(2600+mynum,*)inv_rho_fluid(ipol,jpol,iel),one
-!           else  
-!              write(2600+mynum,*)inv_rho_fluid(ipol,jpol,iel), &
-!                                 one/scoord(ipol,jpol,ielfluid(iel))
-!           endif
-!        enddo
-!     enddo
-!  enddo
-!  close(2500+mynum)
-!  close(2600+mynum)
-!
-!end subroutine dump_fluid_grid
-!-----------------------------------------------------------------------------------------
-
-!-----------------------------------------------------------------------------------------
 !> Dumps the mesh (s,z) [m] and related constant fields in binary format as 
 !! needed to compute waveform kernels from the strain and velocity fields. 
 !! The distinction between different dumping methods is honored here, 
@@ -1183,8 +1110,6 @@ end subroutine
 !! The latter choice is more memory- and CPU-efficient, but requires 
 !! significant post-processing AND dumping the entire SEM mesh. 
 !! See compute_strain in time_evol_wave.f90 for more info.
-!! 
-!! CURRENTLY HARDCODED TO dump_type=='fullfields'
 subroutine dump_wavefields_mesh_1d
 
   use data_mesh



More information about the CIG-COMMITS mailing list