[cig-commits] r16283 - in seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS: . Cluster Cluster/IDRIS Cluster/MareNostrum_Barcelona Cluster/lsf Cluster/lsf/remap_database Cluster/pbs Cluster/sge

danielpeter at geodynamics.org danielpeter at geodynamics.org
Sat Feb 20 11:47:37 PST 2010


Author: danielpeter
Date: 2010-02-20 11:47:34 -0800 (Sat, 20 Feb 2010)
New Revision: 16283

Added:
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_MPI.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_OpenMP.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_serial.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/Makefile_things_to_add_to_use_ParaVer_and_PAPI.txt
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_important_paths_on_MareNostrum.txt
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_mpitrace_explained.xml
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/bash_profile_MareNostrum
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/generate_trace_marenostrum.sh
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/mpitrace.xml
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_merge_trace.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_with_trace_mx.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_without_trace_mx.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_mesher
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_solver
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase.pl
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_all_nodes.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_lsf_multi.pl
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.daniel.kernel
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.kernel
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/compile
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/exit_mpi.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/old_machines
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap_database.f90
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/run_lsf
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_mesher_pbs.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_solver_pbs.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/run_pbs.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/valgrind_go_solver_pbs.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/run3d.csh
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_mesher_sge.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_solver_sge.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/run_sge.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/test_cache_size.f90
Removed:
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/MareNostrum_Barcelona/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase.pl
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_all_nodes.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_lsf_multi.pl
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/estimate_best_values_runs/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.kernel
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_solver
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/remap_database/
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/remap_lsf_machines.pl
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run3d.csh
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.bash
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.kernel
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_MPI.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_OpenMP.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_serial.ll
   seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/test_cache_size.f90
Log:
files and scripts related to clusters in UTILS/ have been collected in subdirectory 
SPECFEM3D_GLOBE/UTILS/Cluster: 

new files have been added for different schedulers in:
SPECFEM3D_GLOBE/UTILS/Cluster/pbs/
SPECFEM3D_GLOBE/UTILS/Cluster/lsf/
SPECFEM3D_GLOBE/UTILS/Cluster/sge/ 



Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_MPI.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_MPI.ll	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_MPI.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,80 @@
+
+# Choix du shell
+# @ shell = /bin/ksh
+
+# Nombre de processus MPI demandes
+# @ total_tasks = 24
+
+# Temps CPU maximum par processus MPI en hh:mm:ss
+# @ cpu_limit = 04:59:59
+
+# Memoire maximum utilisee par processus dans data et dans stack
+# @ data_limit = 2.0Gb
+
+# @ stack_limit = 1.0Gb,1.0Gb
+
+# Nom arbitraire du travail LoadLeveler
+# @ job_name = run_SPECFEM3D_acoustic_MPI
+
+#----------------------------------------------------
+
+# Type de travail
+# @ job_type = parallel
+
+# Fichier de sortie standard du travail
+# @ output = $(job_name).$(jobid)
+
+# Fichier de sortie d'erreur du travail
+# @ error =  $(job_name).$(jobid)
+
+# @ queue
+
+#----------------------------------------------------
+
+# Pour avoir l'echo des commandes
+set -x
+
+##### nom du repertoire depuis ou est lance le code
+##### et nom du sous-repertoire ou sera stockee la base de donnees
+##### La variable LOADL_STEP_INITDIR est automatiquement positionnee par
+##### LoadLeveler au repertoire dans lequel on tape la commande llsubmit
+export repertoire_code=$( basename $LOADL_STEP_INITDIR )
+export repertoire_database=DATABASES_MPI_DIMITRI
+
+# vider les sous-repertoires dans le repertoire de depart
+rm -r -f $LOADL_STEP_INITDIR/OUTPUT_FILES $LOADL_STEP_INITDIR/$repertoire_database
+
+# copier les codes source depuis le repertoire de depart vers le repertoire temporaire
+rm -r -f $TMPDIR/$repertoire_code
+cp -r -p $LOADL_STEP_INITDIR $TMPDIR
+
+# creer les nouveaux sous-repertoires dans le repertoire temporaire
+mkdir $TMPDIR/$repertoire_code/OUTPUT_FILES $TMPDIR/$repertoire_code/$repertoire_database
+
+# aller dans le repertoire temporaire
+cd $TMPDIR/$repertoire_code
+
+# compiler le mailleur et l'executer en MPI
+make clean
+make meshfem3D
+./xmeshfem3D
+
+# compiler le solver et l'executer en MPI
+make clean
+make specfem3D
+./xspecfem3D
+
+# deplacer les sismogrammes dans le repertoire de travail
+mv $TMPDIR/$repertoire_code/$repertoire_database/*.semd $TMPDIR/$repertoire_code
+
+# supprimer la base de donnees creee car elle est de tres grande taille
+rm -r -f $TMPDIR/$repertoire_code/$repertoire_database
+
+# recuperer le job ID
+export myjobid=$( echo $LOADL_STEP_ID | cut -d'.' -f4 )
+
+# deplacer tous les resultats dans le workdir en ajoutant le job ID au nom
+# sortir d'abord dans home pour pouvoir detruire le repertoire courant de tmpdir
+cd $HOME
+mv $TMPDIR/$repertoire_code $WORKDIR/${repertoire_code}_${myjobid}
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_OpenMP.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_OpenMP.ll	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_OpenMP.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,59 @@
+# Nom arbitraire du travail LoadLeveler
+# @ job_name = Maxwell_3D_FDTD_PML_OpenMP
+
+# Type de travail
+# @ job_type = serial
+
+# Choix de l'interpreteur de commande
+# @ shell = /bin/ksh
+
+# Fichier de sortie standard du travail
+# @ output = $(job_name).$(jobid)
+
+# Fichier de sortie d'erreur du travail
+# @ error =  $(job_name).$(jobid)
+
+# Temps CPU max. en seconde (pour 1 heure elapsed, compter OMP_NUM_THREADS heures)
+# @ cpu_limit = 43200
+
+# Memoire max. en data utilisee
+# @ data_limit = 11.5Gb
+
+# Memoire max. en stack utilisee
+# @ stack_limit = 1.2Gb,1.2Gb
+
+# Nombre de processeurs a affecter aux threads
+# OpenMP (ici 4, voir plus bas la variable OMP_NUM_THREADS).
+# @ resources = ConsumableCpus(4)
+
+# @ queue
+
+# Pour avoir l'echo des commandes
+set -x
+
+# Repertoire temporaire de travail
+################cd $TMPDIR
+
+# La variable LOADL_STEP_INITDIR est automatiquement positionnee par
+# LoadLeveler au repertoire dans lequel on tape la commande llsubmit
+################cp $LOADL_STEP_INITDIR/source.f .
+
+# Repertoire temporaire de travail
+cd $HOME/code_3D/with_PML_OpenMP_4tasks
+
+# Compilation et edition de liens d'un programme OpenMP au format libre
+rm -f xonde3D
+xlf_r -qsmp=omp -O4 -qfree=f90 -qsuffix=f=f90 -o xonde3D onde3d_mathieu_maxwell_PML_12oct2005.f90
+
+# La memoire STACK max. (defaut 4Mo) utilisee (ici 64 Mo) par
+# les variables privees de chaque thread
+export XLSMPOPTS=stack=65536000
+
+# Variables d'environnement indiquant le nombre de threads OpenMP
+# (indiquer une valeur identique a celle positionnee plus haut
+# dans la directive threads_per_task)
+export OMP_NUM_THREADS=4
+
+# Execution du programme OpenMP
+./xonde3D
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_serial.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_serial.ll	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_serial.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,73 @@
+
+# Choix du shell
+# @ shell = /bin/ksh
+
+# Temps CPU max. par processus MPI en hh:mm:ss
+# @ cpu_limit = 00:59:59
+
+# Mémoire max. utilisée par processus dans data et dans stack
+# @ data_limit = 14.5Gb
+
+# @ stack_limit = 1.0Gb,1.0Gb
+
+# Nom arbitraire du travail LoadLeveler
+# @ job_name = run_CPML_serial
+
+#----------------------------------------------------
+
+# Fichier de sortie standard du travail
+# @ output = $(job_name).$(jobid)
+
+# Fichier de sortie d'erreur du travail
+# @ error =  $(job_name).$(jobid)
+
+# @ queue
+
+#----------------------------------------------------
+
+# Pour avoir l'echo des commandes
+set -x
+
+##### nom du repertoire depuis ou est lance le code
+##### et nom du sous-repertoire ou sera stockee la base de donnees
+##### La variable LOADL_STEP_INITDIR est automatiquement positionnee par
+##### LoadLeveler au repertoire dans lequel on tape la commande llsubmit
+export repertoire_code=$( basename $LOADL_STEP_INITDIR )
+export repertoire_database=DATABASES_MPI_DIMITRI
+
+# vider les repertoires dans le home
+rm -r -f $LOADL_STEP_INITDIR/OUTPUT_FILES $LOADL_STEP_INITDIR/$repertoire_database
+
+# copier les codes source depuis le home vers le repertoire temporaire
+rm -r -f $TMPDIR/$repertoire_code
+cp -r -p $LOADL_STEP_INITDIR $TMPDIR
+
+# creer les nouveaux repertoires temporaires
+mkdir $TMPDIR/$repertoire_code/OUTPUT_FILES $TMPDIR/$repertoire_code/$repertoire_database
+
+cd $TMPDIR/$repertoire_code
+
+# compiler et executer le mailleur en MPI
+make clean
+make meshfem3D
+./xmeshfem3D
+
+# compiler et executer le solver en MPI
+make clean
+make specfem3D
+./xspecfem3D
+
+# deplacer les sismogrammes dans le repertoire de travail
+mv $TMPDIR/$repertoire_code/$repertoire_database/*.semd $TMPDIR/$repertoire_code
+
+# supprimer la base de donnees creee
+rm -r -f $TMPDIR/$repertoire_code/$repertoire_database
+
+# recuperer le job ID
+export myjobid=$( echo $LOADL_STEP_ID | cut -d'.' -f4 )
+
+# deplacer tous les resultats dans le workdir en ajoutant le job ID au nom
+# sortir d'abord dans home pour pouvoir detruire le repertoire courant de tmpdir
+cd $HOME
+mv $TMPDIR/$repertoire_code $WORKDIR/${repertoire_code}_${myjobid}
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/IDRIS/script_IDRIS_serial.ll
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/Makefile_things_to_add_to_use_ParaVer_and_PAPI.txt
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/Makefile_things_to_add_to_use_ParaVer_and_PAPI.txt	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/Makefile_things_to_add_to_use_ParaVer_and_PAPI.txt	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,9 @@
+
+May 2009: there is no need to add tracing libraries to the Makefile any more
+to generate some ParaVer traces. Just type on MareNostrum:
+
+./configure FC=xlf_r
+
+and then edit the Makefile to use
+-qtune=ppc970 -qarch=ppc64v instead of -qtune=auto -qarch=auto
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_important_paths_on_MareNostrum.txt
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_important_paths_on_MareNostrum.txt	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_important_paths_on_MareNostrum.txt	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,16 @@
+
+Right path of ParaVer, May 2009:
+/gpfs/apps/CEPBATOOLS/mpitrace-mx/64
+
+Right path of examples of ParaVer, May 2009:
+/gpfs/apps/CEPBATOOLS/mpitrace-mx/64/share/example
+
+Storage space available (do NOT use the home directory, which is limited to 20 GB):
+
+# added in May 2009: directories with a lot of free space
+## DK DK the best is to work in "projects", NOT "scratch"
+## DK DK because it is bigger and is kind of permanent
+
+alias cdprojects='pushd /gpfs/projects/bsc42/bsc42023'
+alias cdscratch='pushd /gpfs/scratch/bsc42/bsc42023'
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_mpitrace_explained.xml
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_mpitrace_explained.xml	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/README_mpitrace_explained.xml	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,172 @@
+<?xml version='1.0'?>
+
+<!-- Here comes the MPItrace configuration.
+     As a general rule, "enabled" means that the feature is enabled :)  If
+     it's not enabled, then the value can be set to some default.
+-->
+
+<!-- Must we activate the tracing? Which is the tracing mode? (detail/bursts) Where is it located? Which kind of trace? Version of the XML parser?-->
+<trace enabled="yes"
+ home="/gpfs/apps/CEPBATOOLS/mpitrace-mx/64"
+ initial-mode="detail"
+ type="paraver"
+ xml-parser-id="Id: xml-parse.c,v 1.20 2008/05/19 13:55:11 harald Exp $"
+>
+
+  <!-- Configuration of some MPI dependant values -->
+  <mpi enabled="yes">
+    <!-- Gather counters in the MPI routines? -->
+    <counters enabled="yes" />
+  </mpi>
+
+  <!-- Emit information of the callstack -->
+  <callers enabled="yes">
+    <!-- At MPI calls, select depth level -->
+    <mpi enabled="yes">1-3</mpi>
+    <!-- At sampling points, select depth level -->
+    <sampling enabled="yes">1-5</sampling>
+  </callers>
+
+  <!-- Configuration of some OpenMP dependant values -->
+  <openmp enabled="no">
+    <!-- If the library instruments OpenMP, shall we gather info about locks?
+         Obtaining such information can make the final trace quite large.
+    -->
+    <locks enabled="no" />
+    <!-- Gather counters in the OpenMP routines? -->
+    <counters enabled="yes" />
+  </openmp>
+
+  <!-- Configuration of User Functions -->
+  <user-functions enabled="yes" list="/home/bsc41/bsc41273/user-functions.dat">
+    <!-- Set a depth limit for the UF routines -->
+    <max-depth enabled="no">3</max-depth>
+    <!-- Gather counters on the UF routines? -->
+    <counters enabled="yes" />
+  </user-functions>
+
+  <!-- Configure which software/hardware counters must be collected -->
+  <counters enabled="yes">
+    <!-- Configure the CPU hardware counters. You can define here as many sets
+         as you want. You can also define if MPI/OpenMP calls must report such
+         counters.
+         Starting-set property defines which set is chosen from every task.
+         Possible values are:
+           - cyclic : The sets are distributed in a cyclic fashion among all
+           tasks. So Task 0 takes set 1, Task 1 takes set 2,...
+           - block  : The sets are distributed in block fashion among all tasks.
+           Task [0..i-1] takes set 1, Task [i..2*i-1] takes set 2, ...
+           - Number : All the tasks will start with the given set 
+           (from 1..N).
+    -->
+    <cpu enabled="yes" starting-set-distribution="1">
+      <!-- In this example, we configure two sets of counters. The first will 
+           be changed into the second after 5 calls to some collective
+           operation on MPI_COMM_WORLD. Once the second is activated, it will
+           turn to the first after 5seconds (aprox. depending on the MPI calls
+           granularity)
+           If you want that any set be counting forever, just don't set
+           changeat-globalops, or, changeat-time.
+
+           Each set has it's own properties.
+           domain -> in which domain must PAPI obtain the information (see
+                       PAPI info)
+           changeat-globalops=num -> choose the next set after num
+                       MPI_COMM_WORLD operations
+           changeat-time=numTime -> choose the next set after num Time
+                       (for example 5s, 15m,..)
+      -->
+      <set enabled="yes" domain="all" changeat-globalops="5">
+        PAPI_TOT_INS,PAPI_TOT_CYC,PAPI_L1_DCM
+        <!-- Samples the application based on a frequency on a selected HWC  -->
+        <sampling enabled="yes" frequency="100000000">PAPI_TOT_CYC</sampling>
+      </set>
+      <set enabled="yes" domain="user" changeat-globalops="5">
+        PAPI_TOT_INS,PAPI_FP_INS,PAPI_TOT_CYC
+      </set>
+    </cpu>
+
+    <!-- Do we want to gather information of the network counters?
+         Nowadays we can gather information about MX/GM cards.
+     -->
+    <network enabled="yes" />
+
+    <!-- Obtain resource usage information -->
+    <resource-usage enabled="yes" />
+  </counters>
+
+  <!-- Define the characteristics of the tracing storage. If not defined,
+       or set, the tracing will send the traces to the current directory
+       with a default output name.
+  -->
+  <storage enabled="no">
+    <!-- The intermediate files will take the name of the application -->
+    <trace-prefix enabled="yes">TRACE</trace-prefix>
+    <!-- Stop the tracing when the intermediate file reaches this amount of MBs -->
+    <size enabled="no">5</size>
+    <!-- Where must we store the MPIT files while the app runs?
+         Set make-dir to yes if you want to make this directory before running -->
+    <temporal-directory enabled="yes" make-dir="no">/scratch</temporal-directory>
+    <!-- Where must we store the MPIT files once the app ends?
+         Set make-dir to yes if you want to make this directory before running -->
+    <final-directory enabled="yes" make-dir="no">/gpfs/scratch/bsc41/bsc41273</final-directory>
+    <!-- Must the MPITs be gathered once the app ends into a single node? -->
+    <gather-mpits enabled="no" />
+  </storage>
+
+  <!-- Buffer configuration -->
+  <buffer enabled="yes">
+    <!-- How many events can we handle before any flush -->
+    <size enabled="yes">150000</size>
+    <!-- Use the event buffer in a circular manner? You can use this option to
+         trace the last set of events. Needs MPI global routines operating on
+         MPI_COMM_WORLD communicator to be merged
+    -->
+    <circular enabled="no" />
+  </buffer>
+
+  <!-- Control tracing -->
+  <trace-control enabled="yes">
+    <!-- We can start the application with a "latent tracing" and wake it up
+         once a control file is created. Use the property 'frequency' to
+         choose at which frequency this check must be done. If not supplied,
+         it will be checked every 100 global operations on MPI_COMM_WORLD.
+    -->
+    <file enabled="no" frequency="5m">/gpfs/scratch/bsc41/bsc41273/control</file>
+    <!-- 
+    -->
+    <global-ops enabled="no"></global-ops>
+    <!-- A signal can be used to terminate the tracing. Available values can be
+         only USR1/USR2 Some MPI implementations handle one of those, so check
+         first which is available to you. -->
+    <remote-control enabled="no" method="signal">USR1</remote-control>
+  </trace-control>
+
+  <!-- Other options -->
+  <others enabled="yes">
+    <!-- Want to force a minimum amount of time of tracing? Here we force 10
+         minutes -->
+    <minimum-time enabled="no">10m</minimum-time>
+  </others>
+
+  <!-- Bursts library enabled? This requires an special library! -->
+  <bursts enabled="no">
+    <!-- Specify the threshold. This is mandatory! In this example, the
+         threshold is limitted to 500 microseconds
+     -->
+    <threshold enabled="yes">500u</threshold>
+    <!-- Report MPI statistics? -->
+    <mpi-statistics enabled="yes" />
+  </bursts>
+
+  <!-- Cell BE specific tracing options -->
+  <cell enabled="no">
+    <!-- Limit the size of the intermediate files for every SPU -->
+    <spu-file-size enabled="yes">5</spu-file-size>
+    <!-- Which is the buffer size per SPU? -->
+    <spu-buffer-size enabled="yes">64</spu-buffer-size>
+    <!-- Which DMA tag to use for the tracing flushes? -->
+    <spu-dma-channel enabled="yes">2</spu-dma-channel>
+  </cell>
+
+</trace>

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/bash_profile_MareNostrum
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/bash_profile_MareNostrum	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/bash_profile_MareNostrum	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,73 @@
+
+alias l1='ssh login1'
+alias l2='ssh login2'
+alias l3='ssh login3'
+alias l4='ssh login4'
+alias l5='ssh login5'
+alias l6='ssh login6'
+alias l7='ssh login7'
+alias l8='ssh login8'
+
+alias vi='/usr/bin/vim -i NONE'
+
+alias gosis='pushd /gpfs/scratch/hpce07/hpce07084/seismograms_BSC'
+
+alias gotrace='pushd /gpfs/scratch/hpce07/hpce07084/traces_BSC'
+alias gotraces='pushd /gpfs/scratch/hpce07/hpce07084/traces_BSC'
+
+# added in June 2007: directories with ParaVer and LoadLeveler examples
+alias goparaver='pushd /gpfs/apps/CEPBATOOLS/tracing-example'
+alias goloadlever='pushd /home/usertest/usertest'
+
+# autoriser destruction de fichiers existants
+unset noclobber
+ 
+alias rm='/bin/rm -f'
+
+export PATH=/gpfs/apps/CEPBATOOLS/bin:/usr/local/bin:/gpfs/home/hpce07/hpce07084/bin:.:$PATH
+export PARAVER_HOME=/gpfs/apps/CEPBATOOLS/
+umask 077
+
+# acces a MareNostrum
+alias sshmarenostrum='ssh hpce07084 at mn1.bsc.es'
+
+# diverses definitions
+#####alias .='echo $cwd'
+alias ..='set dot=$cwd;cd ..'
+alias ls='/bin/ls '
+alias ll='/bin/ls -lagF '
+alias lm='/bin/ls -lagF | less'
+alias po='popd'
+alias pu='pushd'
+alias k='kill -9 '
+alias h='history'
+alias cls='clear'
+alias m='less'
+alias more='less'
+alias bc='bc -l'
+
+# du in megabytes with only one level of subdirectories
+##alias du='du --max-depth=1 --block-size=MB --human-readable'
+alias du='du --max-depth=1 --human-readable'
+
+# set prompt: ``username at hostname:/directory $ ''
+#PS1="[\u@\h:\w] "
+PS1="\u@\h: "
+
+# Check for bash (and that we haven't already been sourced).
+[ -z "$BASH_VERSION" -o -n "$BASH_COMPLETION" ] && return
+
+# Check for recent enough version of bash.
+bash=${BASH_VERSION%.*}; bmajor=${bash%.*}; bminor=${bash#*.}
+
+# Check for interactive shell.
+if [ -n "$PS1" ]; then
+  if [ $bmajor -eq 2 -a $bminor '>' 04 ] || [ $bmajor -gt 2 ]; then
+    if [ -r /etc/bash_completion ]; then
+      # Source completion code (with variable location)
+      . ${BASH_COMPLETION:=/etc/bash_completion}
+    fi
+  fi
+fi
+unset bash bminor bmajor
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/generate_trace_marenostrum.sh
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/generate_trace_marenostrum.sh	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/generate_trace_marenostrum.sh	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+export MPITRACE_HOME=/gpfs/apps/CEPBATOOLS/mpitrace-mx/64
+export MPTRACE_CONFIG_FILE=mpitrace.xml
+
+export LD_PRELOAD=${MPITRACE_HOME}/lib/libmpitrace.so
+
+## Run the desired program
+$*
+

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/mpitrace.xml
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/mpitrace.xml	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/mpitrace.xml	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,78 @@
+<?xml version='1.0'?>
+
+<trace enabled="yes"
+ home="/gpfs/apps/CEPBATOOLS/mpitrace-mx/64"
+ initial-mode="detail"
+ type="paraver"
+ xml-parser-id="Id: xml-parse.c,v 1.20 2008/05/19 13:55:11 harald Exp $"
+>
+  <mpi enabled="yes">
+    <counters enabled="yes" />
+  </mpi>
+
+  <openmp enabled="no">
+    <locks enabled="no" />
+    <counters enabled="yes" />
+  </openmp>
+
+  <callers enabled="yes">
+    <mpi enabled="yes">1-3</mpi>
+    <sampling enabled="no">1-5</sampling>
+  </callers>
+
+  <user-functions enabled="no" list="/home/bsc41/bsc41273/user-functions.dat">
+    <max-depth enabled="no">3</max-depth>
+    <counters enabled="yes" />
+  </user-functions>
+
+  <counters enabled="yes">
+    <cpu enabled="yes" starting-set-distribution="1">
+      <set enabled="yes" domain="all" changeat-globalops="0">
+        PAPI_TOT_INS,PAPI_TOT_CYC,PAPI_L1_DCM
+        <sampling enabled="no" frequency="100000000">PAPI_TOT_CYC</sampling>
+      </set>
+      <set enabled="yes" domain="user" changeat-globalops="0">
+        PAPI_TOT_INS,PAPI_FP_INS,PAPI_TOT_CYC
+      </set>
+    </cpu>
+
+    <network enabled="no" />
+
+    <resource-usage enabled="yes" />
+  </counters>
+
+  <storage enabled="no">
+    <trace-prefix enabled="yes">TRACE</trace-prefix>
+    <size enabled="no">5</size>
+    <temporal-directory enabled="yes" make-dir="no">/scratch</temporal-directory>
+    <final-directory enabled="yes" make-dir="no">/gpfs/scratch/bsc41/bsc41273</final-directory>
+    <gather-mpits enabled="no" />
+  </storage>
+
+  <buffer enabled="yes">
+    <size enabled="yes">250000</size>
+    <circular enabled="no" />
+  </buffer>
+
+  <trace-control enabled="no">
+    <file enabled="no" frequency="5m">/gpfs/scratch/bsc41/bsc41273/control</file>
+    <global-ops enabled="no"></global-ops>
+    <remote-control enabled="no" method="signal">USR1</remote-control>
+  </trace-control>
+
+  <others enabled="no">
+    <minimum-time enabled="no">10m</minimum-time>
+  </others>
+
+  <bursts enabled="no">
+    <threshold enabled="yes">500u</threshold>
+    <mpi-statistics enabled="yes" />
+  </bursts>
+
+  <cell enabled="no">
+    <spu-file-size enabled="yes">5</spu-file-size>
+    <spu-buffer-size enabled="yes">64</spu-buffer-size>
+    <spu-dma-channel enabled="yes">2</spu-dma-channel>
+  </cell>
+
+</trace>

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_merge_trace.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_merge_trace.ll	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_merge_trace.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,36 @@
+#! /bin/ksh
+
+### DK DK submit this with "mnsubmit name_of_script.ll"
+
+#@ job_name = Specfem3D_MPI
+
+#@ initialdir = .
+
+#####
+# One node of MareNostrum has two dual-core processors
+# therefore the maximum number of tasks per node is four.
+#####
+
+#####################################################################
+## parallel merging step
+#####################################################################
+#@ output = Specfem3D_merge_%j.out
+#@ error = Specfem3D_merge_%j.err
+#@ total_tasks = 32
+#@ tasks_per_node = 4
+############## Wall clock limit hhh:mm:ss
+#@ wall_clock_limit = 02:00:00
+#@ queue
+#@ features = mx 
+#####################################################################
+
+# DK DK this below OK in May 2009
+MPITRACE_HOME=/gpfs/apps/CEPBATOOLS/mpitrace-mx/64
+
+#environment
+MP_EUILIB=mx
+OBJECT_MODE=64
+MP_RSH=ssh
+
+  srun ${MPITRACE_HOME}/bin/mpimpi2prv -f TRACE.mpits -maxmem 1024 -syn -o trace.prv
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_merge_trace.ll
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_with_trace_mx.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_with_trace_mx.ll	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_with_trace_mx.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,36 @@
+#! /bin/ksh
+
+### DK DK submit this with "mnsubmit name_of_script.ll"
+
+#@ job_name = Specfem3D_MPI
+
+#@ initialdir = .
+
+#####
+# One node of MareNostrum has two dual-core processors
+# therefore the maximum number of tasks per node is four.
+#####
+
+#####################################################################
+## Running the job with tracing step
+#####################################################################
+#@ total_tasks = 64
+#@ tasks_per_node = 4
+############## Wall clock limit hhh:mm:ss
+#@ wall_clock_limit = 02:00:00
+#@ output = Specfem3D_run_%j.out
+#@ error = Specfem3D_run_%j.err
+#@ queue
+#@ features = mx
+
+#environment
+MP_EUILIB=mx
+OBJECT_MODE=64
+MP_RSH=ssh
+
+    srun ./generate_trace_marenostrum.sh ./xspecfem3D
+
+# then merge the trace at the end
+    sleep 5
+    mnsubmit script_marenostrum_parallel_merge_trace.ll
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_with_trace_mx.ll
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_without_trace_mx.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_without_trace_mx.ll	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_without_trace_mx.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,33 @@
+#! /bin/ksh
+
+### DK DK submit this with "mnsubmit name_of_script.ll"
+
+#@ job_name = Specfem3D_MPI
+
+#@ initialdir = .
+
+#####
+# One node of MareNostrum has two dual-core processors
+# therefore the maximum number of tasks per node is four.
+#####
+
+#####################################################################
+## Running the job with tracing step
+#####################################################################
+#@ total_tasks = 64
+#@ tasks_per_node = 4
+############## Wall clock limit hhh:mm:ss
+#@ wall_clock_limit = 02:00:00
+#@ output = Specfem3D_run_%j.out
+#@ error = Specfem3D_run_%j.err
+#@ queue
+#@ features = mx
+
+#environment
+MP_EUILIB=mx
+OBJECT_MODE=64
+MP_RSH=ssh
+
+    srun ./xgenerate_databases
+#    srun ./xspecfem3D
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/MareNostrum_Barcelona/script_marenostrum_parallel_run_without_trace_mx.ll
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_mesher
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_mesher	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_mesher	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,61 @@
+#!/bin/csh
+
+# script to run the mesher
+
+# read DATA/Par_file to get information about the run
+
+# name of the file that contains the list of machines
+set machine_file = "mymachines"
+
+set my_local_path = `grep LOCAL_PATH DATA/Par_file | cut -d '=' -f 2 `
+
+# compute total number of processors needed
+set NCHUNKS = `grep NCHUNKS DATA/Par_file | cut -d '=' -f 2 `
+set NPROC_XI = `grep NPROC_XI DATA/Par_file | cut -d '=' -f 2 `
+set NPROC_ETA = `grep NPROC_ETA DATA/Par_file | cut -d '=' -f 2 `
+
+# total number of processors is the product of the values read
+@ numprocessors = $NCHUNKS * $NPROC_XI * $NPROC_ETA
+
+rm -r -f OUTPUT_FILES
+mkdir OUTPUT_FILES
+
+# suppress old processor list files
+rm -f PI*
+
+if ( ! -d $my_local_path ) mkdir $my_local_path
+
+if ( -f $machine_file ) then
+  echo " "
+  echo using machine file \"$machine_file\"
+  echo " "
+  echo list of active machines:
+  echo " "
+  cat $machine_file
+  echo " "
+# this only used on a cluster if specifying list of machines in MACHINE_FILE
+# assumes the name of the machines is n001, n002 etc...
+# if name is different then change the  tr -d 'n' below
+  grep -v '#' $machine_file | tr -d ' ' | tr -d 'n' > OUTPUT_FILES/filtered_machines.txt
+endif
+
+echo NCHUNKS = $NCHUNKS
+echo NPROC_XI = $NPROC_XI
+echo NPROC_ETA = $NPROC_ETA
+echo " "
+echo starting MPI mesher on $numprocessors processors
+echo " "
+echo starting run in current directory $PWD
+echo " "
+echo mesh files will be saved in directory $my_local_path
+echo " "
+
+#### use this on Beowulf
+mpirun -nolocal -machinefile $machine_file -np $numprocessors $PWD/xmeshfem3D
+
+#### use this on SGI
+#  mpirun -np $numprocessors xmeshfem3D
+
+#### use this on Compaq Dec Alpha
+# dmpirun -np $numprocessors xmeshfem3D
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_mesher
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_solver
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_solver	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_solver	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,52 @@
+#!/bin/csh
+
+# script to run the solver
+
+# read DATA/Par_file to get information about the run
+
+# name of the file that contains the list of machines
+set machine_file = "mymachines"
+
+set my_local_path = `grep LOCAL_PATH DATA/Par_file | cut -d '=' -f 2 `
+
+# compute total number of processors needed
+set NCHUNKS = `grep NCHUNKS DATA/Par_file | cut -d '=' -f 2 `
+set NPROC_XI = `grep NPROC_XI DATA/Par_file | cut -d '=' -f 2 `
+set NPROC_ETA = `grep NPROC_ETA DATA/Par_file | cut -d '=' -f 2 `
+
+# total number of processors is the product of the values read
+@ numprocessors = $NCHUNKS * $NPROC_XI * $NPROC_ETA
+
+# suppress old processor list files
+rm -f PI*
+
+if ( -f $machine_file ) then
+  echo " "
+  echo using machine file \"$machine_file\"
+  echo " "
+  echo list of active machines:
+  echo " "
+  cat $machine_file
+  echo " "
+endif
+
+echo NCHUNKS = $NCHUNKS
+echo NPROC_XI = $NPROC_XI
+echo NPROC_ETA = $NPROC_ETA
+echo " "
+echo starting MPI solver on $numprocessors processors
+echo " "
+echo starting run in current directory $PWD
+echo " "
+echo mesh files will be read from directory $my_local_path
+echo " "
+
+#### use this on Beowulf
+mpirun -nolocal -machinefile $machine_file -np $numprocessors $PWD/xspecfem3D 
+
+#### use this on SGI
+#  mpirun -np $numprocessors xspecfem3D
+
+#### use this on Compaq Dec Alpha
+# dmpirun -np $numprocessors xspecfem3D
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/go_solver
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase.pl
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase.pl	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase.pl	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,14 @@
+#!/usr/bin/perl -w
+
+# completely cleans your scratch disk, and regenerate DATABASES_MPI/ directory in the scratch disk
+#   uses 'shmux' to have simultaneous access to all nodes
+
+# Qinya Liu, Caltech, May 2007
+
+
+if (@ARGV != 1) {die("cleanbase.pl machinefile\n");}
+$mymachine = $ARGV[0];
+if (not -f $mymachine) {die("check if $mymachine is a file or not\n");}
+
+`shmux -M50 -Sall -c "rm -rf /scratch/$ENV{USER}/*; mkdir /scratch/$ENV{USER}/DATABASES_MPI;" - < $mymachine `;
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase.pl
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_all_nodes.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_all_nodes.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_all_nodes.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# remove all the old mesh files in the local /scratch of each blade
+# on "pangu" at Caltech
+
+# Dimitri Komatitsch, University of Pau, November 2007
+
+if [ -z $USER ]; then
+	echo "cannot run this script because no USER env is set"
+	exit 2
+fi
+
+BASEMPIDIR=/scratch/$USER
+
+echo cleaning local scratch space $BASEMPIDIR on each node of the cluster
+
+grep compute- /opt/lsfhpc/conf/lsf.cluster.lsfhpc | expand | cut -f 1 -d ' ' > ___________bubu
+
+shmux -M 50 -S all -c "rm -r -f $BASEMPIDIR" - < ___________bubu >/dev/null
+
+rm ___________bubu
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_all_nodes.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_lsf_multi.pl
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_lsf_multi.pl	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_lsf_multi.pl	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,27 @@
+#!/usr/bin/perl -w
+
+# this script cleans ONLY the scratch directory given by Par_file
+#   assumes that machine file lists the nodes line by line
+#   requires 'shmux'
+# Qinya Liu, Caltech, May 2007
+
+if (@ARGV != 2) {die("cleanbase_lsf_multi.pl machinefile Par_file\n");}
+
+$machine = $ARGV[0];
+$par_file = $ARGV[1];
+
+open(FILE3,"<$par_file") or die ("Fatal Error openning file $par_file\n");
+while (<FILE3>) {
+   if ($_ =~ /^LOCAL_PATH/) {
+	chop;	
+	@vals = split("=", $_);
+	$mpidir = $vals[1];
+	$mpidir =~ s/^\s+//;
+	$mpidir =~ s/\s+$//;
+	close(FILE3);
+	last;
+   }
+}
+
+`shmux -M50 -Sall -c "rm -rf $mpidir" - < $machine`;
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/cleanbase_lsf_multi.pl
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,51 @@
+#!/bin/bash
+#BSUB -o OUTPUT_FILES/%J.o
+#BSUB -a mpich_gm
+#BSUB -J go_mesher_solver_lsf
+
+if [ -z $USER ]; then
+	echo "could not run go_mesher_solver_...bash as no USER env is set"
+	exit 2
+fi
+
+BASEMPIDIR=/scratch/$USER/DATABASES_MPI
+
+# script to run the mesher and the solver
+
+# read DATA/Par_file to get information about the run
+
+# compute total number of nodes needed
+NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
+NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
+NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
+
+# total number of nodes is the product of the values read
+numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
+
+rm -r -f OUTPUT_FILES
+mkdir OUTPUT_FILES
+
+# obtain lsf job information
+echo "$LSB_MCPU_HOSTS" > OUTPUT_FILES/lsf_machines
+echo "$LSB_JOBID" > OUTPUT_FILES/jobid
+
+remap_lsf_machines.pl OUTPUT_FILES/lsf_machines >OUTPUT_FILES/machines
+
+# now cleanup and make the dir (seismos are now written by the master, no more need to collect them on the nodes), this for avoiding crashes
+shmux -M 50 -S all -c "rm -r -f /scratch/$USER; mkdir -p /scratch/$USER; mkdir -p $BASEMPIDIR" - < OUTPUT_FILES/machines >/dev/null
+
+echo starting MPI mesher on $numnodes processors
+echo " "
+echo starting run in current directory $PWD
+echo " "
+
+sleep 20
+mpirun.lsf --gm-no-shmem --gm-copy-env $PWD/xmeshfem3D
+
+sleep 20
+mpirun.lsf --gm-no-shmem --gm-copy-env $PWD/xspecfem3D
+
+# cleanup after the run
+sleep 10
+shmux -M 50 -S all -c "rm -r -f /scratch/$USER" - < OUTPUT_FILES/machines >/dev/null
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.daniel.kernel
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.daniel.kernel	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.daniel.kernel	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,47 @@
+#!/bin/bash -v
+#BSUB -o OUTPUT_FILES/%J.o
+#BSUB -a mpich_gm
+#BSUB -J go_mesher_solver_lsf
+
+# this is the launch script to run a kernel simulation
+# on CITerra with the LSF scheduler
+# assumes 'remap_lsf_machines.pl' is already in the $PATH
+#  Qinya Liu, Caltech, May 2007
+
+BASEMPIDIR=/scratch/$USER/DATABASES_MPI
+BASESCRATCHDIR=/scratch/$USER
+
+echo "$LSB_MCPU_HOSTS" > OUTPUT_FILES/lsf_machines
+echo "$LSB_JOBID" > OUTPUT_FILES/jobid
+
+remap_lsf_machines.pl OUTPUT_FILES/lsf_machines >OUTPUT_FILES/machines
+
+# clean old files that may be in the local /scratch directory
+# and create a directory for this job
+shmux -M 50 -S all -c "rm -r -f /scratch/$USER; mkdir -p /scratch/$USER; mkdir -p $BASEMPIDIR" - < OUTPUT_FILES/machines >/dev/null
+
+current_pwd=$PWD
+
+mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xmeshfem3D
+change_simulation_type.pl -F
+mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xspecfem3D
+
+#### this is the part needs to be revised for each kernel simulation #####
+cd $current_pwd/SEM
+## no need to collect the seismograms from the nodes anymore in v4.0
+mv ../OUTPUT_FILES/*.ascii .
+rename .semd.ascii .for *.semd.ascii
+
+mv ../OUTPUT_FILES/*.sac .
+rename .semd.sac .for.sac *.semd.sac
+
+# P waves
+#/home/lqy/bin/utils_bin/xcut_velocity 460 525 3 T60*.for
+#rename .for.ad .adj *.for.ad
+
+cd $current_pwd
+##########################################################################
+
+change_simulation_type.pl -b
+mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xspecfem3D
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.daniel.kernel
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.kernel
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.kernel	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.kernel	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,42 @@
+#!/bin/bash -v
+#BSUB -o OUTPUT_FILES/%J.o
+#BSUB -a mpich_gm
+#BSUB -J go_mesher_solver_lsf
+
+# this is the launch script to run a kernel simulation
+# on CITerra with the LSF scheduler
+# assumes 'remap_lsf_machines.pl' is already in the $PATH
+#  Qinya Liu, Caltech, May 2007
+
+BASEMPIDIR=/scratch/$USER/DATABASES_MPI
+BASESCRATCHDIR=/scratch/$USER
+
+echo "$LSB_MCPU_HOSTS" > OUTPUT_FILES/lsf_machines
+echo "$LSB_JOBID" > OUTPUT_FILES/jobid
+
+remap_lsf_machines.pl OUTPUT_FILES/lsf_machines >OUTPUT_FILES/machines
+
+# clean old files that may be in the local /scratch directory
+# and create a directory for this job
+shmux -M 50 -S all -c "rm -r -f /scratch/$USER; mkdir -p /scratch/$USER; mkdir -p $BASEMPIDIR" - < OUTPUT_FILES/machines >/dev/null
+
+current_pwd=$PWD
+
+mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xmeshfem3D
+change_simulation_type.pl -F
+mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xspecfem3D
+
+#### this is the part needs to be revised for each kernel simulation #####
+cd $current_pwd/SEM
+## no need to collect the seismograms from the nodes anymore in v4.0
+mv ../OUTPUT_FILES/*.ascii .
+rename .semd.ascii .for *.semd.ascii
+# P waves
+/home/lqy/bin/utils_bin/xcut_velocity 460 525 3 T60*.for
+rename .for.ad .adj *.for.ad
+cd $current_pwd
+##########################################################################
+
+change_simulation_type.pl -b
+mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xspecfem3D
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/go_mesher_solver_lsf_globe.kernel
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/compile
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/compile	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/compile	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,2 @@
+\rm *.o remap_database
+mpif90 -o remap_database -O3 remap_database.f90 exit_mpi.f90


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/compile
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/exit_mpi.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/exit_mpi.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/exit_mpi.f90	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,57 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  3 . 5
+!          --------------------------------------------------
+!
+!                 Dimitri Komatitsch and Jeroen Tromp
+!    Seismological Laboratory - California Institute of Technology
+!        (c) California Institute of Technology July 2004
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+! end the simulation and exit MPI
+
+! version with rank number printed in the error message
+  subroutine exit_MPI(myrank,error_msg)
+
+  implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+
+  integer myrank
+  character(len=*) error_msg
+
+  integer ier
+  character(len=80) outputname
+
+! write error message to screen
+  write(*,*) error_msg(1:len(error_msg))
+  write(*,*) 'Error detected, aborting MPI... proc ',myrank
+
+! stop all the MPI processes, and exit
+! on some machines, MPI_FINALIZE needs to be called before MPI_ABORT
+  call MPI_FINALIZE(ier)
+  call MPI_ABORT(ier)
+  stop 'error, program ended in exit_MPI'
+
+  end subroutine exit_MPI
+
+!
+!----
+!

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/old_machines
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/old_machines	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/old_machines	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,150 @@
+compute-35-7
+2
+compute-45-29
+2
+compute-47-14
+2
+compute-41-26
+2
+compute-26-28
+2
+compute-45-1
+2
+compute-17-22
+2
+compute-37-21
+2
+compute-45-18
+2
+compute-26-36
+2
+compute-13-22
+2
+compute-27-23
+2
+compute-12-8
+2
+compute-32-38
+2
+compute-11-31
+2
+compute-47-38
+2
+compute-26-35
+2
+compute-16-33
+2
+compute-31-15
+2
+compute-14-7
+2
+compute-27-3
+2
+compute-35-10
+2
+compute-25-14
+2
+compute-41-5
+2
+compute-11-14
+2
+compute-37-32
+2
+compute-15-31
+2
+compute-15-9
+2
+compute-33-1
+2
+compute-45-27
+2
+compute-35-38
+2
+compute-26-21
+2
+compute-43-11
+2
+compute-44-4
+2
+compute-35-3
+2
+compute-41-2
+2
+compute-17-12
+2
+compute-27-38
+2
+compute-27-9
+2
+compute-36-26
+2
+compute-11-32
+2
+compute-17-39
+2
+compute-36-19
+2
+compute-46-5
+2
+compute-31-23
+2
+compute-31-19
+2
+compute-32-37
+2
+compute-46-10
+2
+compute-35-35
+2
+compute-11-8
+2
+compute-37-14
+2
+compute-36-28
+2
+compute-15-21
+2
+compute-32-40
+2
+compute-26-25
+2
+compute-46-25
+2
+compute-41-21
+2
+compute-32-19
+2
+compute-25-24
+2
+compute-26-9
+2
+compute-15-7
+2
+compute-16-34
+2
+compute-44-3
+2
+compute-47-9
+2
+compute-24-1
+2
+compute-25-29
+2
+compute-45-37
+2
+compute-27-35
+2
+compute-33-22
+2
+compute-27-1
+2
+compute-31-6
+2
+compute-11-18
+2
+compute-25-38
+2
+compute-27-19
+2
+compute-45-7
+2

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,16 @@
+#!/bin/bash -v
+#BSUB -o OUTPUT_FILES/%J.o
+#BSUB -a mpich_gm
+#BSUB -J go_mesher_solver_lsf
+
+BASEMPIDIR=/scratch/$USER/DATABASES_MPI
+
+echo "$LSB_MCPU_HOSTS" > OUTPUT_FILES/lsf_machines
+echo "$LSB_JOBID" > OUTPUT_FILES/jobid
+
+remap_lsf_machines.pl OUTPUT_FILES/lsf_machines >OUTPUT_FILES/machines
+
+shmux -M50 -Sall -c "mkdir -p $BASEMPIDIR" - < OUTPUT_FILES/machines >/dev/null
+
+# you can use . to replace the jobid's if no jobid sub-directory was made on the scratch
+mpirun.lsf --gm-no-shmem --gm-copy-env $PWD/remap old_machines 150 old_jobid new_jobid

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap_database.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap_database.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/remap_database.f90	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,80 @@
+program remap_databases
+
+implicit none
+
+! standard include of the MPI library
+  include 'mpif.h'
+
+  integer, parameter :: MAX_PROCS = 1000
+  integer ier,sizeprocs,myrank,ios,i
+  character(len=150) old_machine_file,junk,junk2,junk3 
+  character(len=150) slice_to_old_machine(MAX_PROCS),mymachine, &
+  old_local_data_base, new_local_data_base,scp_outfile, command_string
+
+  integer num_slices, num_slices2,num, old_jobid, new_jobid
+  logical use_jobid
+
+! initialize the MPI communicator and start the NPROCTOT MPI processes.
+  call MPI_INIT(ier)
+
+! run the main program
+  call MPI_COMM_SIZE(MPI_COMM_WORLD,sizeprocs,ier)
+  call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ier)
+
+  call getarg(1,old_machine_file)
+  call getarg(2,junk)
+  if (trim(old_machine_file) == '' .or. trim(junk) == '') call exit_mpi(myrank,'Usage: remap old-mach num-slice [old-jobid new-jobid]')
+  read(junk,*) num_slices
+ 
+  call getarg(3,junk2)
+  if (trim(junk2) == '') then
+     use_jobid=.false.
+  else
+     call getarg(4,junk3)
+     if (trim(junk3) == '') call exit_mpi(myrank,'Usage: remap old-mach num-slice [old-jobid new-jobid]')
+     read(junk2,*) old_jobid
+     read(junk3,*) new_jobid
+     use_jobid=.true.
+  endif
+  if (num_slices /= sizeprocs) call exit_mpi(myrank,'number of slices does not match')
+
+  num_slices2 = num_slices
+  open(11,file=trim(old_machine_file),status='old',iostat=ios)
+  if (ios /= 0) stop 'Error opening old machine file'
+  do while (1 == 1)
+    read(11,'(a)',iostat=ios) junk2
+    if (ios /= 0) exit
+    read(11,'(i)',iostat=ios) num
+    if (ios /= 0) exit
+    do i = 1, num
+      slice_to_old_machine(num_slices2-i+1) = junk2
+    enddo
+    num_slices2 = num_slices2 - num
+  enddo
+  if (num_slices2 /= 0) stop 'Error counting number of slices'
+  close(11)
+
+  mymachine = slice_to_old_machine(myrank+1)
+
+  if (use_jobid) then
+    write(old_local_data_base,'(a,i0)') '/scratch/lqy/DATABASES_MPI.',old_jobid 
+    write(new_local_data_base,'(a,i0)') '/scratch/lqy/DATABASES_MPI.',new_jobid
+  else
+    old_local_data_base = '/scratch/lqy/DATABASES_MPI'
+    new_local_data_base = '/scratch/lqy/DATABASES_MPI'
+  endif
+
+  write(scp_outfile,'(a,i4.4)') 'OUTPUT_FILES/scp_out.',myrank
+
+  write(command_string,'(a,i4.4,a)') 'scp lqy@'//trim(mymachine)//':'//trim(old_local_data_base)//'/*', &
+             myrank, '*  '//trim(new_local_data_base)
+
+!  call system('echo '//trim(command_string)//' > '//trim(scp_outfile))
+  
+  call system(trim(command_string)) !//' >> '//trim(scp_outfile))
+           
+! stop all the MPI processes, and exit
+  call MPI_FINALIZE(ier)
+
+end program remap_databases
+          

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/run_lsf
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/run_lsf	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/run_lsf	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,2 @@
+\rm OUTPUT_FILES/*
+bsub -q normal -n 150 -K < remap.bash 


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/lsf/remap_database/run_lsf
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_mesher_pbs.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_mesher_pbs.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_mesher_pbs.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,60 @@
+#!/bin/bash
+#PBS -S /bin/bash
+
+## job name and output file
+#PBS -N go_mesher
+#PBS -j oe
+#PBS -o OUTPUT_FILES/job.o
+
+###########################################################
+# USER PARAMETERS
+
+## 150 CPUs ( 18*8 + 6 ), walltime 1 hour
+#PBS -l nodes=18:ppn=8+1:ppn=6,walltime=1:00:00
+
+###########################################################
+
+cd $PBS_O_WORKDIR
+
+BASEMPIDIR=`grep LOCAL_PATH DATA/Par_file | cut -d = -f 2 `
+
+# script to run the mesher and the solver
+# read DATA/Par_file to get information about the run
+# compute total number of nodes needed
+NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
+NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
+NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
+
+# total number of nodes is the product of the values read
+numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
+
+# cleans OUTPUT_FILES
+rm -r -f OUTPUT_FILES
+mkdir OUTPUT_FILES
+
+# backup for files used for this simulation
+cp DATA/Par_file OUTPUT_FILES/
+
+rm -rf OUTPUT_FILES/src
+mkdir OUTPUT_FILES/src
+cp -p *.f90 OUTPUT_FILES/src/
+cp -p *.c OUTPUT_FILES/src/
+cp -p *.in OUTPUT_FILES/src/
+cp -p *.h OUTPUT_FILES/src/
+
+# obtain job information
+cat $PBS_NODEFILE > OUTPUT_FILES/compute_nodes
+echo "$PBS_JOBID" > OUTPUT_FILES/jobid
+
+
+echo starting MPI mesher on $numnodes processors
+echo " "
+
+sleep 2 
+mpiexec -np $numnodes $PWD/xmeshfem3D
+
+# backup important files addressing.txt and list*.txt 
+cp OUTPUT_FILES/*.txt $BASEMPIDIR/
+
+
+echo "done"


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_mesher_pbs.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_solver_pbs.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_solver_pbs.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_solver_pbs.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,62 @@
+#!/bin/bash
+#PBS -S /bin/bash
+
+## job name and output file
+#PBS -N go_solver
+#PBS -j oe
+#PBS -o OUTPUT_FILES/job.o
+
+###########################################################
+# USER PARAMETERS
+
+## 150 CPUs ( 18*8 + 6 ), walltime 5 hour
+#PBS -l nodes=18:ppn=8+1:ppn=6,walltime=5:00:00
+
+###########################################################
+
+cd $PBS_O_WORKDIR
+
+BASEMPIDIR=`grep LOCAL_PATH DATA/Par_file | cut -d = -f 2 `
+
+# script to run the mesher and the solver
+# read DATA/Par_file to get information about the run
+# compute total number of nodes needed
+NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
+NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
+NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
+
+# total number of nodes is the product of the values read
+numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
+
+mkdir -p OUTPUT_FILES
+
+
+# backup files used for this simulation
+cp DATA/Par_file OUTPUT_FILES/
+cp DATA/STATIONS OUTPUT_FILES/
+cp DATA/CMTSOLUTION OUTPUT_FILES/
+
+rm -rf OUTPUT_FILES/src
+mkdir OUTPUT_FILES/src
+cp -p *.f90 OUTPUT_FILES/src/
+cp -p *.c OUTPUT_FILES/src/
+cp -p *.in OUTPUT_FILES/src/
+cp -p *.h OUTPUT_FILES/src/
+
+
+# obtain job information
+cat $PBS_NODEFILE > OUTPUT_FILES/compute_nodes
+echo "$PBS_JOBID" > OUTPUT_FILES/jobid
+
+
+echo starting run in current directory $PWD
+echo " "
+
+cp $BASEMPIDIR/addr*.txt OUTPUT_FILES/
+cp $BASEMPIDIR/list*.txt OUTPUT_FILES/
+
+sleep 2 
+mpiexec -np $numnodes $PWD/xspecfem3D
+
+echo "finished successfully"
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/go_solver_pbs.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/run_pbs.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/run_pbs.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/run_pbs.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,24 @@
+#!/bin/bash
+# use the normal queue unless otherwise directed
+
+#module load openmpi/intel-11.1
+
+rm -f OUTPUT_FILES/*
+
+d=`date`
+echo "Starting compilation $d"
+make clean
+make xmeshfem3D
+make xcreate_header_file
+./xcreate_header_file
+make xspecfem3D
+d=`date`
+echo "Finished compilation $d"
+
+echo "Submitting job"
+
+# mesher
+qsub < go_mesher_sge.bash
+
+# solver
+#qsub < go_solver_sge.bash


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/run_pbs.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/valgrind_go_solver_pbs.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/valgrind_go_solver_pbs.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/valgrind_go_solver_pbs.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+# Valgrind, a suite of tools for debugging and profiling
+# http://valgrind.org/
+#
+
+# bash script
+#PBS -S /bin/bash
+
+# job name 
+#PBS -N go_solver
+
+# joins output and error information
+#PBS -j oe
+
+# job output file
+#PBS -o OUTPUT_FILES/job.o
+
+###########################################################
+# USER PARAMETERS
+
+# Queue
+#PBS -q tromp
+
+# 150 CPUs ( 18*8+6 ), walltime 15 hour
+#PBS -l nodes=18:ppn=8+1:ppn=6,walltime=15:00:00
+
+# valgrind mpi library 
+PRELOAD_LIB=/my_valgrind_path/valgrind/lib/valgrind/libmpiwrap-x86-linux.so
+
+###########################################################
+
+cd $PBS_O_WORKDIR
+
+BASEMPIDIR=`grep LOCAL_PATH DATA/Par_file | cut -d = -f 2 `
+
+# script to run the mesher and the solver
+# read DATA/Par_file to get information about the run
+# compute total number of nodes needed
+NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
+NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
+NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
+
+# total number of nodes is the product of the values read
+numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
+
+mkdir -p OUTPUT_FILES
+
+# backup for files used for this simulation
+cp DATA/Par_file OUTPUT_FILES/
+cp DATA/STATIONS OUTPUT_FILES/
+cp DATA/CMTSOLUTION OUTPUT_FILES/
+
+rm -rf OUTPUT_FILES/src
+mkdir OUTPUT_FILES/src
+cp -p *.f90 OUTPUT_FILES/src/
+cp -p *.c OUTPUT_FILES/src/
+cp -p *.in OUTPUT_FILES/src/
+cp -p *.h OUTPUT_FILES/src/
+
+
+# obtain job information
+cat $PBS_NODEFILE > OUTPUT_FILES/compute_nodes
+echo "$PBS_JOBID" > OUTPUT_FILES/jobid
+
+echo starting run in current directory $PWD
+echo " "
+
+cp $BASEMPIDIR/addr*.txt OUTPUT_FILES/
+cp $BASEMPIDIR/list*.txt OUTPUT_FILES/
+
+sleep 2
+
+# memory leaks
+LD_PRELOAD=$PRELOAD_LIB mpiexec -np $numnodes valgrind --leak-check=full $PWD/xspecfem3D
+
+cp OUTPUT_FILES/job.o OUTPUT_FILES/job.memory-leaks.o
+
+sleep 2
+
+# cache misses
+LD_PRELOAD=$PRELOAD_LIB mpiexec -np $numnodes valgrind --tool=cachegrind $PWD/xspecfem3D
+
+cp OUTPUT_FILES/job.o OUTPUT_FILES/job.cache-misses.o
+cp cachegrind.out.* OUTPUT_FILES/
+
+
+echo "finished successfully"
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/pbs/valgrind_go_solver_pbs.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/run3d.csh
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/run3d.csh	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/run3d.csh	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,29 @@
+#!/bin/csh
+
+# compile and run the mesher and then the solver, then collect the seismograms
+
+set OUTDIR="SEM"
+
+sleep 1
+make clean
+sleep 1
+make meshfem3D
+sleep 10
+go_mesher
+
+sleep 5
+make clean
+sleep 1
+make specfem3D
+sleep 10
+go_solver
+
+sleep 5
+if (! -d $OUTDIR) then
+   mkdir $OUTDIR
+endif 
+cd $OUTDIR
+cp ../DATA/CMTSOLUTION .
+cp ../DATA/STATIONS .
+cp ../DATA/Par_file .
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/run3d.csh
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_mesher_sge.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_mesher_sge.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_mesher_sge.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,66 @@
+#!/bin/sh 
+# sun grid engine cluster @ oxford
+
+# use current working directory
+#$ -cwd
+
+# merge error output into standard output stream
+#$ -j yes
+#$ -o OUTPUT_FILES/job.o
+
+###########################################################
+# USER PARAMETERS
+
+# specific environment with 180 cpu total, request to use 150 cpus:
+#$ -pe make 150
+
+###########################################################
+
+
+BASEMPIDIR=`grep LOCAL_PATH DATA/Par_file | cut -d = -f 2 `
+
+# script to run the mesher and the solver
+# read DATA/Par_file to get information about the run
+# compute total number of nodes needed
+NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
+NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
+NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
+
+# total number of nodes is the product of the values read
+numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
+
+mkdir -p OUTPUT_FILES
+
+# backup for files used for this simulation
+cp DATA/Par_file OUTPUT_FILES/
+cp DATA/STATIONS OUTPUT_FILES/
+cp DATA/CMTSOLUTION OUTPUT_FILES/
+
+rm -rf OUTPUT_FILES/src
+mkdir OUTPUT_FILES/src
+cp -p *.f90 OUTPUT_FILES/src/
+cp -p *.c OUTPUT_FILES/src/
+cp -p *.in OUTPUT_FILES/src/
+cp -p *.h OUTPUT_FILES/src/
+
+
+# obtain job information
+cat $PBS_NODEFILE > OUTPUT_FILES/compute_nodes
+echo "$PBS_JOBID" > OUTPUT_FILES/jobid
+
+
+echo starting run in current directory $PWD
+echo " "
+
+cp $BASEMPIDIR/addr*.txt OUTPUT_FILES/
+cp $BASEMPIDIR/list*.txt OUTPUT_FILES/
+
+# run in parallel with sge as resource manager
+#set echo
+
+/opt/SUNWhpc/bin/mprun -x sge ./xmeshfem3D
+
+# or using mpiexec 
+#/opt/mpich/bin/mpiexec -np $numnodes ./xmeshfem3D
+
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_mesher_sge.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_solver_sge.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_solver_sge.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_solver_sge.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,64 @@
+#!/bin/sh 
+# sun grid engine cluster @ oxford
+
+# use current working directory
+#$ -cwd
+
+# merge error output into standard output stream
+#$ -j yes
+#$ -o OUTPUT_FILES/job.o
+
+###########################################################
+# USER PARAMETERS
+
+# specific environment with 180 cpu total, request to use 150 cpus:
+#$ -pe make 150
+
+###########################################################
+
+
+BASEMPIDIR=`grep LOCAL_PATH DATA/Par_file | cut -d = -f 2 `
+
+# script to run the mesher and the solver
+# read DATA/Par_file to get information about the run
+# compute total number of nodes needed
+NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
+NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
+NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
+
+# total number of nodes is the product of the values read
+numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
+
+mkdir -p OUTPUT_FILES
+
+# backup for files used for this simulation
+cp DATA/Par_file OUTPUT_FILES/
+cp DATA/STATIONS OUTPUT_FILES/
+cp DATA/CMTSOLUTION OUTPUT_FILES/
+
+rm -rf OUTPUT_FILES/src
+mkdir OUTPUT_FILES/src
+cp -p *.f90 OUTPUT_FILES/src/
+cp -p *.c OUTPUT_FILES/src/
+cp -p *.in OUTPUT_FILES/src/
+cp -p *.h OUTPUT_FILES/src/
+
+
+# obtain job information
+cat $PBS_NODEFILE > OUTPUT_FILES/compute_nodes
+echo "$PBS_JOBID" > OUTPUT_FILES/jobid
+
+
+echo starting run in current directory $PWD
+echo " "
+
+cp $BASEMPIDIR/addr*.txt OUTPUT_FILES/
+cp $BASEMPIDIR/list*.txt OUTPUT_FILES/
+
+# run in parallel with sge as resource manager
+#set echo
+
+/opt/SUNWhpc/bin/mprun -x sge ./xspecfem3D
+
+# or using mpiexec 
+#/opt/mpich/bin/mpiexec -np $numnodes ./xspecfem3D
\ No newline at end of file


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/go_solver_sge.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/run_sge.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/run_sge.bash	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/run_sge.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,26 @@
+#!/bin/bash
+# use the normal queue unless otherwise directed
+# specific queue: 
+#     pe.q with 130 cpus, 
+#     all.q with 49 cpus 
+# can be added to qsub command at the end -p pe.q
+
+rm -f OUTPUT_FILES/*
+
+d=`date`
+echo "Starting compilation $d"
+make clean
+make xmeshfem3D
+make xcreate_header_file
+./xcreate_header_file
+make xspecfem3D
+d=`date`
+echo "Finished compilation $d"
+
+echo "Submitting job"
+
+# mesher
+qsub go_mesher_sge.bash
+
+# solver
+#qsub go_solver_sge.bash


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/sge/run_sge.bash
___________________________________________________________________
Name: svn:executable
   + *

Added: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/test_cache_size.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/test_cache_size.f90	                        (rev 0)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/test_cache_size.f90	2010-02-20 19:47:34 UTC (rev 16283)
@@ -0,0 +1,128 @@
+!=====================================================================
+!
+!          S p e c f e m 3 D  G l o b e  V e r s i o n  4 . 0
+!          --------------------------------------------------
+!
+!          Main authors: Dimitri Komatitsch and Jeroen Tromp
+!    Seismological Laboratory, California Institute of Technology, USA
+!                    and University of Pau, France
+! (c) California Institute of Technology and University of Pau, October 2007
+!
+! This program is free software; you can redistribute it and/or modify
+! it under the terms of the GNU General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or
+! (at your option) any later version.
+!
+! This program is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+! GNU General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License along
+! with this program; if not, write to the Free Software Foundation, Inc.,
+! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+!
+!=====================================================================
+
+  program test_cache_size
+
+! test processor cache size as in the book of Deville et al. (2002), Chapter 8
+
+! Dimitri Komatitsch, University of Pau, France, May 2007
+
+  implicit none
+
+! timer to count elapsed time
+  character(len=8) datein
+  character(len=10) timein
+  character(len=5)  :: zone
+  integer, dimension(8) :: time_values_in,time_values_out
+  double precision :: time_start,time_end
+
+  integer, parameter :: NELEM_ARRAY_MAX = 10000000
+
+  real, dimension(NELEM_ARRAY_MAX) :: a
+
+  real :: c
+
+  double precision :: megaflops
+
+  integer :: NELEM_ARRAY,l,chiffre,puissance,NLOOP
+
+!***********************************************************************
+
+! count elapsed wall-clock time
+  datein = ''
+  timein = ''
+  zone = ''
+
+  NELEM_ARRAY = 1000
+
+  c = 1.0
+
+  do puissance = 0,6
+
+  do chiffre = 1,9
+
+  NELEM_ARRAY = chiffre * 10**puissance
+
+  a(:) = 0.0
+
+  call c_add(a,c,NELEM_ARRAY)
+
+  if(NELEM_ARRAY <= 1000) then
+    NLOOP = 10000000
+  else if(NELEM_ARRAY <= 10000) then
+    NLOOP = 600000
+  else if(NELEM_ARRAY <= 100000) then
+    NLOOP = 60000
+  else
+    NLOOP = 5000
+  endif
+
+  call date_and_time(datein,timein,zone,time_values_in)
+  do l = 1,NLOOP
+    call c_add(a,c,NELEM_ARRAY)
+  enddo
+  call date_and_time(datein,timein,zone,time_values_out)
+
+! time_values_in(3): day of the month
+! time_values_in(5): hour of the day
+! time_values_in(6): minutes of the hour
+! time_values_in(7): seconds of the minute
+! time_values_in(8): milliseconds of the second
+
+! this fails if we cross the end of the hour
+  time_start = 60000.d0*time_values_in(6) + 1000.d0*time_values_in(7) + time_values_in(8)
+  time_end   = 60000.d0*time_values_out(6) + 1000.d0*time_values_out(7) + time_values_out(8)
+
+  megaflops = dble(NELEM_ARRAY) * dble(NLOOP) / (time_end - time_start) / 1.d3
+
+  print *,NELEM_ARRAY,' elements -> megaflops = ',megaflops
+
+  enddo
+
+  enddo
+
+  end program test_cache_size
+
+!---
+
+  subroutine c_add(a,c,NELEM_ARRAY)
+
+  implicit none
+
+  integer :: NELEM_ARRAY
+
+  integer :: i
+
+  real, dimension(NELEM_ARRAY) :: a
+
+  real :: c
+
+  do i = 1,NELEM_ARRAY
+   a(i) = a(i) + c
+  enddo
+
+  end subroutine c_add
+


Property changes on: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/Cluster/test_cache_size.f90
___________________________________________________________________
Name: svn:executable
   + *

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase.pl
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase.pl	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase.pl	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,14 +0,0 @@
-#!/usr/bin/perl -w
-
-# completely cleans your scratch disk, and regenerate DATABASES_MPI/ directory in the scratch disk
-#   uses 'shmux' to have simultaneous access to all nodes
-
-# Qinya Liu, Caltech, May 2007
-
-
-if (@ARGV != 1) {die("cleanbase.pl machinefile\n");}
-$mymachine = $ARGV[0];
-if (not -f $mymachine) {die("check if $mymachine is a file or not\n");}
-
-`shmux -M50 -Sall -c "rm -rf /scratch/$ENV{USER}/*; mkdir /scratch/$ENV{USER}/DATABASES_MPI;" - < $mymachine `;
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_all_nodes.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_all_nodes.bash	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_all_nodes.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# remove all the old mesh files in the local /scratch of each blade
-# on "pangu" at Caltech
-
-# Dimitri Komatitsch, University of Pau, November 2007
-
-if [ -z $USER ]; then
-	echo "cannot run this script because no USER env is set"
-	exit 2
-fi
-
-BASEMPIDIR=/scratch/$USER
-
-echo cleaning local scratch space $BASEMPIDIR on each node of the cluster
-
-grep compute- /opt/lsfhpc/conf/lsf.cluster.lsfhpc | expand | cut -f 1 -d ' ' > ___________bubu
-
-shmux -M 50 -S all -c "rm -r -f $BASEMPIDIR" - < ___________bubu >/dev/null
-
-rm ___________bubu
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_lsf_multi.pl
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_lsf_multi.pl	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/cleanbase_lsf_multi.pl	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,27 +0,0 @@
-#!/usr/bin/perl -w
-
-# this script cleans ONLY the scratch directory given by Par_file
-#   assumes that machine file lists the nodes line by line
-#   requires 'shmux'
-# Qinya Liu, Caltech, May 2007
-
-if (@ARGV != 2) {die("cleanbase_lsf_multi.pl machinefile Par_file\n");}
-
-$machine = $ARGV[0];
-$par_file = $ARGV[1];
-
-open(FILE3,"<$par_file") or die ("Fatal Error openning file $par_file\n");
-while (<FILE3>) {
-   if ($_ =~ /^LOCAL_PATH/) {
-	chop;	
-	@vals = split("=", $_);
-	$mpidir = $vals[1];
-	$mpidir =~ s/^\s+//;
-	$mpidir =~ s/\s+$//;
-	close(FILE3);
-	last;
-   }
-}
-
-`shmux -M50 -Sall -c "rm -rf $mpidir" - < $machine`;
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,61 +0,0 @@
-#!/bin/csh
-
-# script to run the mesher
-
-# read DATA/Par_file to get information about the run
-
-# name of the file that contains the list of machines
-set machine_file = "mymachines"
-
-set my_local_path = `grep LOCAL_PATH DATA/Par_file | cut -d '=' -f 2 `
-
-# compute total number of processors needed
-set NCHUNKS = `grep NCHUNKS DATA/Par_file | cut -d '=' -f 2 `
-set NPROC_XI = `grep NPROC_XI DATA/Par_file | cut -d '=' -f 2 `
-set NPROC_ETA = `grep NPROC_ETA DATA/Par_file | cut -d '=' -f 2 `
-
-# total number of processors is the product of the values read
-@ numprocessors = $NCHUNKS * $NPROC_XI * $NPROC_ETA
-
-rm -r -f OUTPUT_FILES
-mkdir OUTPUT_FILES
-
-# suppress old processor list files
-rm -f PI*
-
-if ( ! -d $my_local_path ) mkdir $my_local_path
-
-if ( -f $machine_file ) then
-  echo " "
-  echo using machine file \"$machine_file\"
-  echo " "
-  echo list of active machines:
-  echo " "
-  cat $machine_file
-  echo " "
-# this only used on a cluster if specifying list of machines in MACHINE_FILE
-# assumes the name of the machines is n001, n002 etc...
-# if name is different then change the  tr -d 'n' below
-  grep -v '#' $machine_file | tr -d ' ' | tr -d 'n' > OUTPUT_FILES/filtered_machines.txt
-endif
-
-echo NCHUNKS = $NCHUNKS
-echo NPROC_XI = $NPROC_XI
-echo NPROC_ETA = $NPROC_ETA
-echo " "
-echo starting MPI mesher on $numprocessors processors
-echo " "
-echo starting run in current directory $PWD
-echo " "
-echo mesh files will be saved in directory $my_local_path
-echo " "
-
-#### use this on Beowulf
-mpirun -nolocal -machinefile $machine_file -np $numprocessors $PWD/xmeshfem3D
-
-#### use this on SGI
-#  mpirun -np $numprocessors xmeshfem3D
-
-#### use this on Compaq Dec Alpha
-# dmpirun -np $numprocessors xmeshfem3D
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.bash	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,51 +0,0 @@
-#!/bin/bash
-#BSUB -o OUTPUT_FILES/%J.o
-#BSUB -a mpich_gm
-#BSUB -J go_mesher_solver_lsf
-
-if [ -z $USER ]; then
-	echo "could not run go_mesher_solver_...bash as no USER env is set"
-	exit 2
-fi
-
-BASEMPIDIR=/scratch/$USER/DATABASES_MPI
-
-# script to run the mesher and the solver
-
-# read DATA/Par_file to get information about the run
-
-# compute total number of nodes needed
-NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2 `
-NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
-NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2 `
-
-# total number of nodes is the product of the values read
-numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
-
-rm -r -f OUTPUT_FILES
-mkdir OUTPUT_FILES
-
-# obtain lsf job information
-echo "$LSB_MCPU_HOSTS" > OUTPUT_FILES/lsf_machines
-echo "$LSB_JOBID" > OUTPUT_FILES/jobid
-
-remap_lsf_machines.pl OUTPUT_FILES/lsf_machines >OUTPUT_FILES/machines
-
-# now cleanup and make the dir (seismos are now written by the master, no more need to collect them on the nodes), this for avoiding crashes
-shmux -M 50 -S all -c "rm -r -f /scratch/$USER; mkdir -p /scratch/$USER; mkdir -p $BASEMPIDIR" - < OUTPUT_FILES/machines >/dev/null
-
-echo starting MPI mesher on $numnodes processors
-echo " "
-echo starting run in current directory $PWD
-echo " "
-
-sleep 20
-mpirun.lsf --gm-no-shmem --gm-copy-env $PWD/xmeshfem3D
-
-sleep 20
-mpirun.lsf --gm-no-shmem --gm-copy-env $PWD/xspecfem3D
-
-# cleanup after the run
-sleep 10
-shmux -M 50 -S all -c "rm -r -f /scratch/$USER" - < OUTPUT_FILES/machines >/dev/null
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.kernel
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.kernel	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_mesher_solver_lsf_globe.kernel	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,42 +0,0 @@
-#!/bin/bash -v
-#BSUB -o OUTPUT_FILES/%J.o
-#BSUB -a mpich_gm
-#BSUB -J go_mesher_solver_lsf
-
-# this is the launch script to run a kernel simulation
-# on CITerra with the LSF scheduler
-# assumes 'remap_lsf_machines.pl' is already in the $PATH
-#  Qinya Liu, Caltech, May 2007
-
-BASEMPIDIR=/scratch/$USER/DATABASES_MPI
-BASESCRATCHDIR=/scratch/$USER
-
-echo "$LSB_MCPU_HOSTS" > OUTPUT_FILES/lsf_machines
-echo "$LSB_JOBID" > OUTPUT_FILES/jobid
-
-remap_lsf_machines.pl OUTPUT_FILES/lsf_machines >OUTPUT_FILES/machines
-
-# clean old files that may be in the local /scratch directory
-# and create a directory for this job
-shmux -M 50 -S all -c "rm -r -f /scratch/$USER; mkdir -p /scratch/$USER; mkdir -p $BASEMPIDIR" - < OUTPUT_FILES/machines >/dev/null
-
-current_pwd=$PWD
-
-mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xmeshfem3D
-change_simulation_type.pl -F
-mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xspecfem3D
-
-#### this is the part needs to be revised for each kernel simulation #####
-cd $current_pwd/SEM
-## no need to collect the seismograms from the nodes anymore in v4.0
-mv ../OUTPUT_FILES/*.ascii .
-rename .semd.ascii .for *.semd.ascii
-# P waves
-/home/lqy/bin/utils_bin/xcut_velocity 460 525 3 T60*.for
-rename .for.ad .adj *.for.ad
-cd $current_pwd
-##########################################################################
-
-change_simulation_type.pl -b
-mpirun.lsf --gm-no-shmem --gm-copy-env $current_pwd/xspecfem3D
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_solver
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_solver	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/go_solver	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,52 +0,0 @@
-#!/bin/csh
-
-# script to run the solver
-
-# read DATA/Par_file to get information about the run
-
-# name of the file that contains the list of machines
-set machine_file = "mymachines"
-
-set my_local_path = `grep LOCAL_PATH DATA/Par_file | cut -d '=' -f 2 `
-
-# compute total number of processors needed
-set NCHUNKS = `grep NCHUNKS DATA/Par_file | cut -d '=' -f 2 `
-set NPROC_XI = `grep NPROC_XI DATA/Par_file | cut -d '=' -f 2 `
-set NPROC_ETA = `grep NPROC_ETA DATA/Par_file | cut -d '=' -f 2 `
-
-# total number of processors is the product of the values read
-@ numprocessors = $NCHUNKS * $NPROC_XI * $NPROC_ETA
-
-# suppress old processor list files
-rm -f PI*
-
-if ( -f $machine_file ) then
-  echo " "
-  echo using machine file \"$machine_file\"
-  echo " "
-  echo list of active machines:
-  echo " "
-  cat $machine_file
-  echo " "
-endif
-
-echo NCHUNKS = $NCHUNKS
-echo NPROC_XI = $NPROC_XI
-echo NPROC_ETA = $NPROC_ETA
-echo " "
-echo starting MPI solver on $numprocessors processors
-echo " "
-echo starting run in current directory $PWD
-echo " "
-echo mesh files will be read from directory $my_local_path
-echo " "
-
-#### use this on Beowulf
-mpirun -nolocal -machinefile $machine_file -np $numprocessors $PWD/xspecfem3D 
-
-#### use this on SGI
-#  mpirun -np $numprocessors xspecfem3D
-
-#### use this on Compaq Dec Alpha
-# dmpirun -np $numprocessors xspecfem3D
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/remap_lsf_machines.pl
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/remap_lsf_machines.pl	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/remap_lsf_machines.pl	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,23 +0,0 @@
-#!/usr/bin/perl -w
-
-# this program remaps the LSF output machine file to
-# the standard one-node-per-column machine file
-
-if (@ARGV != 1) {die("remap_lsf_machines.pl machinefile\n");}
-
-$machine = $ARGV[0];
-
-open(FILE,"$machine") or die("Error opening file $machine\n");
-(@junk) = <FILE>;
-close(FILE);
-
-for($i=0;$i<@junk;$i++) {
-  @node_array = split(" ",$junk[$i]);
-  foreach $node (@node_array) {
-	next if ( $node =~ /^[0-9]/ );
-  	push(@nodes, $node);
-  }
-}
-foreach $node (@nodes) {
-    print "$node\n";
-}

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run3d.csh
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run3d.csh	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run3d.csh	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,29 +0,0 @@
-#!/bin/csh
-
-# compile and run the mesher and then the solver, then collect the seismograms
-
-set OUTDIR="SEM"
-
-sleep 1
-make clean
-sleep 1
-make meshfem3D
-sleep 10
-go_mesher
-
-sleep 5
-make clean
-sleep 1
-make specfem3D
-sleep 10
-go_solver
-
-sleep 5
-if (! -d $OUTDIR) then
-   mkdir $OUTDIR
-endif 
-cd $OUTDIR
-cp ../DATA/CMTSOLUTION .
-cp ../DATA/STATIONS .
-cp ../DATA/Par_file .
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.bash
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.bash	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.bash	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-# this is the launch script to run a regular forward simulation
-# on CITerra at Caltech with the LSF scheduler
-# Qinya Liu, Caltech, May 2007
-
-# use the normal queue unless otherwise directed
-queue="-q normal"
-if [ $# -eq 1 ]; then
-	echo "Setting the queue to $1"
-	queue="-q $1"
-fi
-
-d=`date`
-echo "Starting compilation $d"
-make clean
-make xmeshfem3D
-make xcreate_header_file
-xcreate_header_file
-make xspecfem3D
-d=`date`
-echo "Finished compilation $d"
-
-# compute total number of nodes needed
-NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2`
-NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
-NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2`
-
-# total number of nodes is the product of the values read
-numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
-
-#rm -r -f OUTPUT_FILES/*
-
-echo "Submitting job"
-
-# time below is given in hh:mm
-bsub $queue -n $numnodes -W 48:00 -C 0 < go_mesher_solver_lsf_globe.bash
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.kernel
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.kernel	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/run_lsf.kernel	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-# this is the launch script to run a kernel simulation 
-# on CITerra with the LSF scheduler
-#  Qinya Liu, Caltech, May 2007
-
-# use the normal queue unless otherwise directed
-queue="-q normal"
-if [ $# -eq 1 ]; then
-	echo "Setting the queue to $1"
-	queue="-q $1"
-fi
-
-rm -f OUTPUT_FILES/*
-
-d=`date`
-echo "Starting compilation $d"
-# you have to make sure you set this first before compiling the mesher & solver
-change_simulation_type.pl -b
-make clean
-make meshfem3D
-make create_header_file
-xcreate_header_file
-make specfem3D
-d=`date`
-echo "Finished compilation $d"
-
-# compute total number of nodes needed
-NPROC_XI=`grep NPROC_XI DATA/Par_file | cut -d = -f 2`
-NPROC_ETA=`grep NPROC_ETA DATA/Par_file | cut -d = -f 2`
-NCHUNKS=`grep NCHUNKS DATA/Par_file | cut -d = -f 2`
-
-# total number of nodes is the product of the values read
-numnodes=$(( $NCHUNKS * $NPROC_XI * $NPROC_ETA ))
-
-#rm -r -f OUTPUT_FILES/*
-
-echo "Submitting job"
-
-# time below is given in hh:mm
-bsub $queue -n $numnodes -W 48:00 -C 0 < go_mesher_solver_lsf_globe.kernel
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_MPI.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_MPI.ll	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_MPI.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,80 +0,0 @@
-
-# Choix du shell
-# @ shell = /bin/ksh
-
-# Nombre de processus MPI demandes
-# @ total_tasks = 24
-
-# Temps CPU maximum par processus MPI en hh:mm:ss
-# @ cpu_limit = 04:59:59
-
-# Memoire maximum utilisee par processus dans data et dans stack
-# @ data_limit = 2.0Gb
-
-# @ stack_limit = 1.0Gb,1.0Gb
-
-# Nom arbitraire du travail LoadLeveler
-# @ job_name = run_SPECFEM3D_acoustic_MPI
-
-#----------------------------------------------------
-
-# Type de travail
-# @ job_type = parallel
-
-# Fichier de sortie standard du travail
-# @ output = $(job_name).$(jobid)
-
-# Fichier de sortie d'erreur du travail
-# @ error =  $(job_name).$(jobid)
-
-# @ queue
-
-#----------------------------------------------------
-
-# Pour avoir l'echo des commandes
-set -x
-
-##### nom du repertoire depuis ou est lance le code
-##### et nom du sous-repertoire ou sera stockee la base de donnees
-##### La variable LOADL_STEP_INITDIR est automatiquement positionnee par
-##### LoadLeveler au repertoire dans lequel on tape la commande llsubmit
-export repertoire_code=$( basename $LOADL_STEP_INITDIR )
-export repertoire_database=DATABASES_MPI_DIMITRI
-
-# vider les sous-repertoires dans le repertoire de depart
-rm -r -f $LOADL_STEP_INITDIR/OUTPUT_FILES $LOADL_STEP_INITDIR/$repertoire_database
-
-# copier les codes source depuis le repertoire de depart vers le repertoire temporaire
-rm -r -f $TMPDIR/$repertoire_code
-cp -r -p $LOADL_STEP_INITDIR $TMPDIR
-
-# creer les nouveaux sous-repertoires dans le repertoire temporaire
-mkdir $TMPDIR/$repertoire_code/OUTPUT_FILES $TMPDIR/$repertoire_code/$repertoire_database
-
-# aller dans le repertoire temporaire
-cd $TMPDIR/$repertoire_code
-
-# compiler le mailleur et l'executer en MPI
-make clean
-make meshfem3D
-./xmeshfem3D
-
-# compiler le solver et l'executer en MPI
-make clean
-make specfem3D
-./xspecfem3D
-
-# deplacer les sismogrammes dans le repertoire de travail
-mv $TMPDIR/$repertoire_code/$repertoire_database/*.semd $TMPDIR/$repertoire_code
-
-# supprimer la base de donnees creee car elle est de tres grande taille
-rm -r -f $TMPDIR/$repertoire_code/$repertoire_database
-
-# recuperer le job ID
-export myjobid=$( echo $LOADL_STEP_ID | cut -d'.' -f4 )
-
-# deplacer tous les resultats dans le workdir en ajoutant le job ID au nom
-# sortir d'abord dans home pour pouvoir detruire le repertoire courant de tmpdir
-cd $HOME
-mv $TMPDIR/$repertoire_code $WORKDIR/${repertoire_code}_${myjobid}
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_OpenMP.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_OpenMP.ll	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_OpenMP.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,59 +0,0 @@
-# Nom arbitraire du travail LoadLeveler
-# @ job_name = Maxwell_3D_FDTD_PML_OpenMP
-
-# Type de travail
-# @ job_type = serial
-
-# Choix de l'interpreteur de commande
-# @ shell = /bin/ksh
-
-# Fichier de sortie standard du travail
-# @ output = $(job_name).$(jobid)
-
-# Fichier de sortie d'erreur du travail
-# @ error =  $(job_name).$(jobid)
-
-# Temps CPU max. en seconde (pour 1 heure elapsed, compter OMP_NUM_THREADS heures)
-# @ cpu_limit = 43200
-
-# Memoire max. en data utilisee
-# @ data_limit = 11.5Gb
-
-# Memoire max. en stack utilisee
-# @ stack_limit = 1.2Gb,1.2Gb
-
-# Nombre de processeurs a affecter aux threads
-# OpenMP (ici 4, voir plus bas la variable OMP_NUM_THREADS).
-# @ resources = ConsumableCpus(4)
-
-# @ queue
-
-# Pour avoir l'echo des commandes
-set -x
-
-# Repertoire temporaire de travail
-################cd $TMPDIR
-
-# La variable LOADL_STEP_INITDIR est automatiquement positionnee par
-# LoadLeveler au repertoire dans lequel on tape la commande llsubmit
-################cp $LOADL_STEP_INITDIR/source.f .
-
-# Repertoire temporaire de travail
-cd $HOME/code_3D/with_PML_OpenMP_4tasks
-
-# Compilation et edition de liens d'un programme OpenMP au format libre
-rm -f xonde3D
-xlf_r -qsmp=omp -O4 -qfree=f90 -qsuffix=f=f90 -o xonde3D onde3d_mathieu_maxwell_PML_12oct2005.f90
-
-# La memoire STACK max. (defaut 4Mo) utilisee (ici 64 Mo) par
-# les variables privees de chaque thread
-export XLSMPOPTS=stack=65536000
-
-# Variables d'environnement indiquant le nombre de threads OpenMP
-# (indiquer une valeur identique a celle positionnee plus haut
-# dans la directive threads_per_task)
-export OMP_NUM_THREADS=4
-
-# Execution du programme OpenMP
-./xonde3D
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_serial.ll
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_serial.ll	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/script_IDRIS_serial.ll	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,73 +0,0 @@
-
-# Choix du shell
-# @ shell = /bin/ksh
-
-# Temps CPU max. par processus MPI en hh:mm:ss
-# @ cpu_limit = 00:59:59
-
-# Mémoire max. utilisée par processus dans data et dans stack
-# @ data_limit = 14.5Gb
-
-# @ stack_limit = 1.0Gb,1.0Gb
-
-# Nom arbitraire du travail LoadLeveler
-# @ job_name = run_CPML_serial
-
-#----------------------------------------------------
-
-# Fichier de sortie standard du travail
-# @ output = $(job_name).$(jobid)
-
-# Fichier de sortie d'erreur du travail
-# @ error =  $(job_name).$(jobid)
-
-# @ queue
-
-#----------------------------------------------------
-
-# Pour avoir l'echo des commandes
-set -x
-
-##### nom du repertoire depuis ou est lance le code
-##### et nom du sous-repertoire ou sera stockee la base de donnees
-##### La variable LOADL_STEP_INITDIR est automatiquement positionnee par
-##### LoadLeveler au repertoire dans lequel on tape la commande llsubmit
-export repertoire_code=$( basename $LOADL_STEP_INITDIR )
-export repertoire_database=DATABASES_MPI_DIMITRI
-
-# vider les repertoires dans le home
-rm -r -f $LOADL_STEP_INITDIR/OUTPUT_FILES $LOADL_STEP_INITDIR/$repertoire_database
-
-# copier les codes source depuis le home vers le repertoire temporaire
-rm -r -f $TMPDIR/$repertoire_code
-cp -r -p $LOADL_STEP_INITDIR $TMPDIR
-
-# creer les nouveaux repertoires temporaires
-mkdir $TMPDIR/$repertoire_code/OUTPUT_FILES $TMPDIR/$repertoire_code/$repertoire_database
-
-cd $TMPDIR/$repertoire_code
-
-# compiler et executer le mailleur en MPI
-make clean
-make meshfem3D
-./xmeshfem3D
-
-# compiler et executer le solver en MPI
-make clean
-make specfem3D
-./xspecfem3D
-
-# deplacer les sismogrammes dans le repertoire de travail
-mv $TMPDIR/$repertoire_code/$repertoire_database/*.semd $TMPDIR/$repertoire_code
-
-# supprimer la base de donnees creee
-rm -r -f $TMPDIR/$repertoire_code/$repertoire_database
-
-# recuperer le job ID
-export myjobid=$( echo $LOADL_STEP_ID | cut -d'.' -f4 )
-
-# deplacer tous les resultats dans le workdir en ajoutant le job ID au nom
-# sortir d'abord dans home pour pouvoir detruire le repertoire courant de tmpdir
-cd $HOME
-mv $TMPDIR/$repertoire_code $WORKDIR/${repertoire_code}_${myjobid}
-

Deleted: seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/test_cache_size.f90
===================================================================
--- seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/test_cache_size.f90	2010-02-20 19:26:42 UTC (rev 16282)
+++ seismo/3D/SPECFEM3D_GLOBE/trunk/UTILS/test_cache_size.f90	2010-02-20 19:47:34 UTC (rev 16283)
@@ -1,128 +0,0 @@
-!=====================================================================
-!
-!          S p e c f e m 3 D  G l o b e  V e r s i o n  4 . 0
-!          --------------------------------------------------
-!
-!          Main authors: Dimitri Komatitsch and Jeroen Tromp
-!    Seismological Laboratory, California Institute of Technology, USA
-!                    and University of Pau, France
-! (c) California Institute of Technology and University of Pau, October 2007
-!
-! This program is free software; you can redistribute it and/or modify
-! it under the terms of the GNU General Public License as published by
-! the Free Software Foundation; either version 2 of the License, or
-! (at your option) any later version.
-!
-! This program is distributed in the hope that it will be useful,
-! but WITHOUT ANY WARRANTY; without even the implied warranty of
-! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-! GNU General Public License for more details.
-!
-! You should have received a copy of the GNU General Public License along
-! with this program; if not, write to the Free Software Foundation, Inc.,
-! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-!
-!=====================================================================
-
-  program test_cache_size
-
-! test processor cache size as in the book of Deville et al. (2002), Chapter 8
-
-! Dimitri Komatitsch, University of Pau, France, May 2007
-
-  implicit none
-
-! timer to count elapsed time
-  character(len=8) datein
-  character(len=10) timein
-  character(len=5)  :: zone
-  integer, dimension(8) :: time_values_in,time_values_out
-  double precision :: time_start,time_end
-
-  integer, parameter :: NELEM_ARRAY_MAX = 10000000
-
-  real, dimension(NELEM_ARRAY_MAX) :: a
-
-  real :: c
-
-  double precision :: megaflops
-
-  integer :: NELEM_ARRAY,l,chiffre,puissance,NLOOP
-
-!***********************************************************************
-
-! count elapsed wall-clock time
-  datein = ''
-  timein = ''
-  zone = ''
-
-  NELEM_ARRAY = 1000
-
-  c = 1.0
-
-  do puissance = 0,6
-
-  do chiffre = 1,9
-
-  NELEM_ARRAY = chiffre * 10**puissance
-
-  a(:) = 0.0
-
-  call c_add(a,c,NELEM_ARRAY)
-
-  if(NELEM_ARRAY <= 1000) then
-    NLOOP = 10000000
-  else if(NELEM_ARRAY <= 10000) then
-    NLOOP = 600000
-  else if(NELEM_ARRAY <= 100000) then
-    NLOOP = 60000
-  else
-    NLOOP = 5000
-  endif
-
-  call date_and_time(datein,timein,zone,time_values_in)
-  do l = 1,NLOOP
-    call c_add(a,c,NELEM_ARRAY)
-  enddo
-  call date_and_time(datein,timein,zone,time_values_out)
-
-! time_values_in(3): day of the month
-! time_values_in(5): hour of the day
-! time_values_in(6): minutes of the hour
-! time_values_in(7): seconds of the minute
-! time_values_in(8): milliseconds of the second
-
-! this fails if we cross the end of the hour
-  time_start = 60000.d0*time_values_in(6) + 1000.d0*time_values_in(7) + time_values_in(8)
-  time_end   = 60000.d0*time_values_out(6) + 1000.d0*time_values_out(7) + time_values_out(8)
-
-  megaflops = dble(NELEM_ARRAY) * dble(NLOOP) / (time_end - time_start) / 1.d3
-
-  print *,NELEM_ARRAY,' elements -> megaflops = ',megaflops
-
-  enddo
-
-  enddo
-
-  end program test_cache_size
-
-!---
-
-  subroutine c_add(a,c,NELEM_ARRAY)
-
-  implicit none
-
-  integer :: NELEM_ARRAY
-
-  integer :: i
-
-  real, dimension(NELEM_ARRAY) :: a
-
-  real :: c
-
-  do i = 1,NELEM_ARRAY
-   a(i) = a(i) + c
-  enddo
-
-  end subroutine c_add
-



More information about the CIG-COMMITS mailing list