[cig-commits] r18342 - in seismo/3D/FAULT_SOURCE/branches/new_fault_db: . CUBIT decompose_mesh_SCOTCH
percygalvez at geodynamics.org
percygalvez at geodynamics.org
Wed May 11 10:38:08 PDT 2011
Author: percygalvez
Date: 2011-05-11 10:38:08 -0700 (Wed, 11 May 2011)
New Revision: 18342
Added:
seismo/3D/FAULT_SOURCE/branches/new_fault_db/DATABASES_MPI/
Modified:
seismo/3D/FAULT_SOURCE/branches/new_fault_db/CUBIT/crack_fault.py
seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/decompose_mesh_SCOTCH.f90
seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/fault_scotch.f90
Log:
new database 11.05.2011
Modified: seismo/3D/FAULT_SOURCE/branches/new_fault_db/CUBIT/crack_fault.py
===================================================================
--- seismo/3D/FAULT_SOURCE/branches/new_fault_db/CUBIT/crack_fault.py 2011-05-11 06:12:33 UTC (rev 18341)
+++ seismo/3D/FAULT_SOURCE/branches/new_fault_db/CUBIT/crack_fault.py 2011-05-11 17:38:08 UTC (rev 18342)
@@ -37,9 +37,9 @@
x.append(0*km) #x8
y.append(0.0) #y5
-y.append(0.5*km) #y6
+y.append(0.45) #y6
y.append(0.0) #y7
-y.append(-0.5*km) #y8
+y.append(-0.45) #y8
z=[z_surf]*4
@@ -128,7 +128,7 @@
for f in faces:
if dic_quads_fault_u.has_key(f):
nodes=cubit.get_connectivity('Face',f)
-# print 'nodes :',nodes[0],nodes[1],nodes[2],nodes[3]
+ print 'h,fault nodes side up :',h,nodes[0],nodes[1],nodes[2],nodes[3]
txt='%10i %10i %10i %10i %10i\n' % (h,nodes[0],\
nodes[1],nodes[2],nodes[3])
fault_file.write(txt)
@@ -139,7 +139,7 @@
for f in faces:
if dic_quads_fault_d.has_key(f):
nodes=cubit.get_connectivity('Face',f)
-# print 'nodes :',nodes[0],nodes[1],nodes[2],nodes[3]
+ print 'h,fault nodes side down :',h,nodes[0],nodes[1],nodes[2],nodes[3]
txt='%10i %10i %10i %10i %10i\n' % (h,nodes[0],\
nodes[1],nodes[2],nodes[3])
fault_file.write(txt)
Modified: seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/decompose_mesh_SCOTCH.f90
===================================================================
--- seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/decompose_mesh_SCOTCH.f90 2011-05-11 06:12:33 UTC (rev 18341)
+++ seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/decompose_mesh_SCOTCH.f90 2011-05-11 17:38:08 UTC (rev 18342)
@@ -390,8 +390,7 @@
call read_fault_files(localpath_name)
call close_faults(nodes_coords,elmnts,nspec,nnodes,esize)
- !TEST
- stop
+
end subroutine read_mesh_files
!----------------------------------------------------------------------------------------------
@@ -531,7 +530,7 @@
! move all fault elements to the same partition (proc=0)
call fault_collect_elements(nspec,nnodes,elmnts, &
sup_neighbour,esize,nsize,nparts,part)
-
+
! re-partitioning puts moho-surface coupled elements into same partition
call moho_surface_repartitioning (nspec, nnodes, elmnts, &
sup_neighbour, nsize, nparts, part, &
@@ -561,7 +560,6 @@
! count_def_mat, mat_prop(3,:), mat(1,:), nparts)
-
end subroutine scotch_partitioning
@@ -649,7 +647,7 @@
nspec2D_moho,ibelm_moho,nodes_ibelm_moho)
close(15)
-
+
! write fault database
write(prname, "(i6.6,'_Database_fault')") ipart
open(unit=16,file=outputpath_name(1:len_trim(outputpath_name))//'/proc'//prname,&
@@ -661,10 +659,10 @@
stop
endif
call write_fault_database(16, ipart, nspec, &
- glob2loc_elmnts, glob2loc_nodes_nparts, glob2loc_nodes_parts, part)
+ glob2loc_elmnts, glob2loc_nodes_nparts, glob2loc_nodes_parts, &
+ glob2loc_nodes, part)
close(16)
-
-
+
end do
print*, 'partitions: '
print*, ' num = ',nparts
Modified: seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/fault_scotch.f90
===================================================================
--- seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/fault_scotch.f90 2011-05-11 06:12:33 UTC (rev 18341)
+++ seismo/3D/FAULT_SOURCE/branches/new_fault_db/decompose_mesh_SCOTCH/fault_scotch.f90 2011-05-11 17:38:08 UTC (rev 18342)
@@ -14,8 +14,8 @@
integer, parameter :: long = SELECTED_INT_KIND(18)
integer, parameter :: SIZE_REAL = 4 ! single precision
integer, parameter :: SIZE_DOUBLE = 8 ! double precision
- integer, parameter :: CUSTOM_REAL = SIZE_REAL
- double precision, parameter :: FAULT_GAP_TOLERANCE = 1e-2_CUSTOM_REAL
+ integer, parameter :: CUSTOM_REAL = SIZE_DOUBLE
+ double precision, parameter :: FAULT_GAP_TOLERANCE = 1e0_CUSTOM_REAL
public :: read_fault_files, fault_collect_elements, close_faults, write_fault_database
@@ -158,41 +158,28 @@
do i = 1,size(ispec2)
do k2=1,esize
-
iglob2 = elmnts(k2,ispec2(i))
found_it = .false.
xyz_2 = nodes_coords(:,iglob2)
- ! TEST
- print*,'iblob2:',iglob2
- print*,'xzy2 :',xyz_2
+
do j = 1,size(ispec1)
do k1=1,esize
-
iglob1 = elmnts(k1,ispec1(j))
xyz_1 = nodes_coords(:,iglob1)
- ! TEST
- print*,'iblob1:',iglob1
- print*,'xzy1 :',xyz_1
xyz = xyz_2-xyz_1
dist = xyz(1)*xyz(1) + xyz(2)*xyz(2) + xyz(3)*xyz(3)
dist = sqrt(dist)
- if (dist <= FAULT_GAP_TOLERANCE) then
+ if ((0.0_CUSTOM_REAL < dist) .and. (dist <= FAULT_GAP_TOLERANCE)) then
xyz = (xyz_1 + xyz_2)*0.5_CUSTOM_REAL
- ! TEST
- write(6,*) 'iblob2:',iglob2
- write(6,*) 'iblob1:',iglob1
- write(6,*) 'xzy1 :',xyz_1
- write(6,*) 'xzy2 :',xyz_2
- write(6,*) 'xzy :',xyz
nodes_coords(:,iglob2) = xyz
nodes_coords(:,iglob1) = xyz
found_it = .true.
- cycle
+ exit
endif
enddo
- if (found_it) cycle
+ if (found_it) exit
enddo
enddo
@@ -254,7 +241,7 @@
integer :: max_neighbour
!SHILDING
- integer :: i,j, ipart,nproc_null
+ integer :: i,j, ipart,nproc_null,nproc_null_final
integer :: el, el_1, el_2, k1, k2
logical :: is_repartitioned
integer, dimension(:), allocatable :: elem_proc_null
@@ -265,30 +252,31 @@
print*, 'Elements proc = 0 redistributed in [{nproc}- nproc0] :'
print*, nproc_null
- allocate(elem_proc_null(nproc_null))
+ if ( nproc_null /= 0 ) then
+
+ allocate(elem_proc_null(nproc_null))
+ ! Filling up proc = 0 elements
+ nproc_null = 0
+ do i = 1,nelmnts
+ if ( part(i) == 0 ) then
+ nproc_null = nproc_null + 1
+ elem_proc_null(nproc_null) = i
+ end if
+ end do
+ ! Redistributing proc-0 elements on the rest of processors
+ !jpa: why do this? does it always help balancing ?
+ !pgb: Yes, bulk elements in processor 0 are taken out and redistributed.
+ !pgb: leaving more space for fault elements.
+ !jpa: But if the number of fault elements is much smaller than nproc_null
+ ! we will end up with a very UNbalanced proc 0 !
+ ipart=0
+ do i = 1, nproc_null
+ if ( ipart == nproc ) ipart = 0
+ ipart = ipart +1
+ part(elem_proc_null(i)) = ipart
+ end do
- ! Filling up proc = 0 elements
- nproc_null = 0
- do i = 1,nelmnts
- if ( part(i) == 0 ) then
- nproc_null = nproc_null + 1
- elem_proc_null(nproc_null) = i
- end if
- end do
-
- ! Redistributing proc-0 elements on the rest of processors
- !jpa: why do this? does it always help balancing ?
- !pgb: Yes, bulk elements in processor 0 are taken out and redistributed.
- !pgb: leaving more space for fault elements.
- !jpa: But if the number of fault elements is much smaller than nproc_null
- ! we will end up with a very UNbalanced proc 0 !
- ipart=0
- do i = 1, nproc_null
- if ( ipart == nproc ) ipart = 0
- ipart = ipart +1
- part(elem_proc_null(i)) = ipart
- end do
-
+ endif
call mesh2dual_ncommonnodes_fault(nelmnts, nnodes, nsize, sup_neighbour, &
elmnts, xadj, adjncy, nnodes_elmnts, &
nodes_elmnts, max_neighbour, 4, esize)
@@ -307,7 +295,7 @@
! 1 2
! Allocating elements with double shield layer
- print *, "Fault shield double-layer"
+ print *, "Fault shield double-layer :"
do el = 0, nelmnts-1
if ( is_on_fault(el+1) ) then
part(el) = 0
@@ -322,6 +310,9 @@
endif
enddo
+ nproc_null_final = count( part == 0 )
+ print *, nproc_null_final
+
end subroutine fault_repartition
! ---------------------------------------------------------------------------------------------------
@@ -333,17 +324,17 @@
! next lines: #id_(element containing the face) #id_node1_face .. #id_node4_face
! first for all faces on side 1, then side 2
- subroutine write_fault_database(IIN_database, iproc, nelmnts, &
- glob2loc_elmnts,glob2loc_nodes_nparts,glob2loc_nodes_parts,part)
+ subroutine write_fault_database(IIN_database, iproc, nelmnts, glob2loc_elmnts, &
+ glob2loc_nodes_nparts, glob2loc_nodes_parts, glob2loc_nodes, part)
integer, intent(in) :: IIN_database
integer, intent(in) :: iproc
integer(long), intent(in) :: nelmnts
integer, dimension(1:nelmnts), intent(in) :: part
integer, dimension(0:nelmnts-1), intent(in) :: glob2loc_elmnts
- integer, dimension(:), pointer :: glob2loc_nodes_nparts
- integer, dimension(:), pointer :: glob2loc_nodes_parts
- integer, dimension(:), pointer :: glob2loc_nodes
+ integer, dimension(:), intent(in) :: glob2loc_nodes_nparts
+ integer, dimension(:), intent(in) :: glob2loc_nodes_parts
+ integer, dimension(:), intent(in) :: glob2loc_nodes
integer :: i,j,k,iflt,e
integer :: nspec_fault_1,nspec_fault_2
More information about the CIG-COMMITS
mailing list