[cig-commits] r18568 - mc/3D/CitcomCU/trunk/src

becker at geodynamics.org becker at geodynamics.org
Thu Jun 9 11:50:27 PDT 2011


Author: becker
Date: 2011-06-09 11:50:26 -0700 (Thu, 09 Jun 2011)
New Revision: 18568

Modified:
   mc/3D/CitcomCU/trunk/src/Advection_diffusion.c
   mc/3D/CitcomCU/trunk/src/Citcom.c
   mc/3D/CitcomCU/trunk/src/Composition_adv.c
   mc/3D/CitcomCU/trunk/src/Drive_solvers.c
   mc/3D/CitcomCU/trunk/src/Makefile.gzdir_ani
   mc/3D/CitcomCU/trunk/src/Output_gzdir.c
   mc/3D/CitcomCU/trunk/src/Parallel_related.c
   mc/3D/CitcomCU/trunk/src/global_defs.h
Log:
Checking in this version, getting rid off (hopefully) all debug
statements. Turns out the bug I was hunting for was in our MPI install
(thanks for the suggestion, Eh!). This version has flavor-dependent
plasticity as an experimental feature; seems to work.




Modified: mc/3D/CitcomCU/trunk/src/Advection_diffusion.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Advection_diffusion.c	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Advection_diffusion.c	2011-06-09 18:50:26 UTC (rev 18568)
@@ -132,6 +132,7 @@
 	//static int loops_since_new_eta = 0;
 	static int been_here = 0;
 	static int on_off = 0;
+	static int step_debug = 0;
 
 	DTdot = (float *)malloc((E->lmesh.nno + 1) * sizeof(float));
 	Tdot1 = (float *)malloc((E->lmesh.nno + 1) * sizeof(float));
@@ -143,7 +144,7 @@
 
 	if(on_off == 0)
 	{
-	  //if(E->parallel.me == 0)fprintf(stderr,"PGp: std advect\n");
+	  if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: std advect\n");
 		E->advection.timesteps++;
 		std_timestep(E);
 		E->advection.total_timesteps++;
@@ -151,12 +152,12 @@
 
 	if(on_off == 1)
 	{
-	  //if(E->parallel.me == 0)fprintf(stderr,"PGp: RK\n");
+	  if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: RK\n");
 		Runge_Kutta(E, E->C, E->V, on_off);
 	}
 	else if(on_off == 0)
 	{
-	  //if(E->parallel.me == 0)fprintf(stderr,"PGp: main\n");
+	  if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: main\n");
 		for(i = 1; i <= E->lmesh.nno; i++)
 		{
 			T1[i] = E->T[i];
@@ -181,12 +182,12 @@
 
 			if(E->advection.ADVECTION)
 			{
-			  //if(E->parallel.me == 0)fprintf(stderr,"PGp: advect predict\n");
+			  if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: advect predict\n");
 				predictor(E, E->T, E->Tdot);
 
 				for(psc_pass = 0; psc_pass < E->advection.temp_iterations; psc_pass++)
 				{
-				  //if(E->parallel.me == 0)fprintf(stderr,"PGp: advect correct\n");
+				  if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: advect correct\n");
 					pg_solver(E, E->T, E->Tdot, DTdot, E->V, E->convection.heat_sources, 1.0, 1, E->TB, E->node);
 					corrector(E, E->T, E->Tdot, DTdot);
 				}
@@ -205,7 +206,7 @@
 				E->advection.dt_reduced *= 0.5;
 				E->advection.last_sub_iterations++;
 			}
-			//if(E->parallel.me == 0)fprintf(stderr,"PGp: main iter %i\n",E->advection.last_sub_iterations);
+			if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: main iter %i\n",E->advection.last_sub_iterations);
 		} while(iredo == 1 && E->advection.last_sub_iterations <= 5);
 
 
@@ -214,12 +215,12 @@
 		temperatures_conform_bcs(E);
 		E->advection.last_sub_iterations = count;
 		
-		//if(E->parallel.me == 0)fprintf(stderr,"PGp: Euler %i\n",on_off);
+		if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: Euler %i\n",on_off);
 		Euler(E, E->C, E->V, on_off);
-		//if(E->parallel.me == 0)fprintf(stderr,"PGp: Euler done\n");
+		if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: Euler done\n");
 		E->monitor.elapsed_time += E->advection.timestep;
 	}							/* end for on_off==0  */
-	//if(E->parallel.me == 0)fprintf(stderr,"PGp: thermal\n");
+	if(step_debug && (E->parallel.me == 0))fprintf(stderr,"PGp: thermal\n");
 	thermal_buoyancy(E);
 	if(E->monitor.solution_cycles < E->advection.max_timesteps)
 		E->control.keep_going = 1;

Modified: mc/3D/CitcomCU/trunk/src/Citcom.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Citcom.c	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Citcom.c	2011-06-09 18:50:26 UTC (rev 18568)
@@ -55,9 +55,9 @@
 {
 	struct All_variables E;
 	double time, initial_time, start_time;
+	int debug_steps = 0;
 
 	srand48((long int)-1);
-/*	parallel_process_initialization(&E,argc,argv); */
 
 	E.parallel.me = 0;
 	E.parallel.nproc = 1;
@@ -94,11 +94,11 @@
 		fprintf(E.fp, "Initialization overhead = %f\n", initial_time);
 		initial_time = CPU_time0();
 	}
-	//if(E.parallel.me == 0)fprintf(stderr,"solving stokes\n");
+	if(debug_steps && (E.parallel.me==0))fprintf(stderr,"solving stokes\n");
 	general_stokes_solver(&E);
-	//if(E.parallel.me == 0)fprintf(stderr,"processing temp\n");
+	if(debug_steps && (E.parallel.me==0))fprintf(stderr,"processing temp\n");
 	process_temp_field(&E, E.monitor.solution_cycles);
-	//if(E.parallel.me == 0)fprintf(stderr,"processing vel\n");
+	if(debug_steps && (E.parallel.me==0))fprintf(stderr,"processing vel\n");
 	process_new_velocity(&E, E.monitor.solution_cycles);
 
 	if(E.control.stokes)
@@ -113,33 +113,33 @@
 	  
 	  
 		process_heating(&E);
-		//if(E.parallel.me==0)fprintf(stderr,"process heating done\n");
+		if(debug_steps && (E.parallel.me==0))fprintf(stderr,"process heating done\n");
 		
 		E.monitor.solution_cycles++;
 		if(E.monitor.solution_cycles > E.control.print_convergence)
 			E.control.print_convergence = 1;
 		 /**/ report(&E, "Update buoyancy for further `timesteps'");
 		(E.next_buoyancy_field) (&E);
-		if(E.parallel.me==0)fprintf(stderr,"buoyancy field done\n");
+		if(debug_steps && (E.parallel.me==0))fprintf(stderr,"buoyancy field done\n");
 
 		 /**/ report(&E, "Process results of buoyancy update");
 		process_temp_field(&E, E.monitor.solution_cycles);
-  		if(E.parallel.me==0)fprintf(stderr,"temp field done\n");
+  		if(debug_steps && (E.parallel.me==0))fprintf(stderr,"temp field done\n");
 
 		if(E.monitor.solution_cycles == E.control.freeze_surface_at_step){ /* for testing purposes */
 		  freeze_surface(&E);
 		}
 
 		general_stokes_solver(&E);
-		//if(E.parallel.me==0)fprintf(stderr,"stokes solver done\n");
+		if(debug_steps && (E.parallel.me==0))fprintf(stderr,"stokes solver done\n");
 
 		if(E.control.composition){
 		  (E.next_buoyancy_field) (&E);	/* correct with R-G */
-		  //if(E.parallel.me==0)fprintf(stderr,"next buoyancy composition done\n");
+		  if(debug_steps && (E.parallel.me==0))fprintf(stderr,"next buoyancy composition done\n");
 		}
 		 /**/ report(&E, "Process results of velocity solver");
 		process_new_velocity(&E, E.monitor.solution_cycles);
-		//if(E.parallel.me==0)fprintf(stderr,"process new velocity done\n");
+		if(debug_steps && (E.parallel.me==0))fprintf(stderr,"process new velocity done\n");
 
 
 		if(E.monitor.T_interior > E.monitor.T_interior_max)
@@ -150,7 +150,9 @@
 		}
 		if(E.parallel.me == 0)
 		{
-			fprintf(E.fp, "CPU total = %g & CPU = %g for step %d time = %.4e dt = %.4e  maxT = %.4e sub_iteration%d markers=%d\n", CPU_time0() - start_time, CPU_time0() - time, E.monitor.solution_cycles, E.monitor.elapsed_time, E.advection.timestep, E.monitor.T_interior, E.advection.last_sub_iterations, E.advection.markers_g);
+			fprintf(E.fp, "CPU total = %g & CPU = %g for step %d time = %.4e dt = %.4e  maxT = %.4e sub_iteration%d markers=%d\n", 
+				CPU_time0() - start_time, CPU_time0() - time, E.monitor.solution_cycles, 
+				E.monitor.elapsed_time, E.advection.timestep, E.monitor.T_interior, E.advection.last_sub_iterations, E.advection.markers_g);
 			time = CPU_time0();
 		}
 

Modified: mc/3D/CitcomCU/trunk/src/Composition_adv.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Composition_adv.c	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Composition_adv.c	2011-06-09 18:50:26 UTC (rev 18568)
@@ -139,6 +139,7 @@
 		}
 	}
 	transfer_markers_processors(E, on_off);
+
 	/*   predicted compositional field at t+dt  */
 	element_markers(E, on_off);
 	/* update nodal values */
@@ -168,15 +169,19 @@
 	{
 		markers = E->advection.markers / 10;
 		asize = (markers + 1) * E->mesh.nsd * 2;
+		if( E->parallel.no_neighbors >= MAX_NEIGHBORS)
+		  myerror("error, number of neighbors out of bounds",E);
 		for(neighbor = 1; neighbor <= E->parallel.no_neighbors; neighbor++)
 		{
 			E->parallel.traces_transfer_index[neighbor] = (int *)malloc((markers + 1) * sizeof(int));
+
 			E->RVV[neighbor] = (float *)malloc(asize * sizeof(int));
 			E->RXX[neighbor] = (double *)malloc(asize * sizeof(double));
 			E->RINS[neighbor] = (int *)malloc((markers + 1) * (2 + E->tracers_add_flavors) * sizeof(int));
 			E->PVV[neighbor] = (float *)malloc(asize  * sizeof(int));
 			E->PXX[neighbor] = (double *)malloc(asize * sizeof(double));
 			E->PINS[neighbor] = (int *)malloc((markers + 1) * (2 + E->tracers_add_flavors) * sizeof(int));
+
 		}
 		E->traces_leave_index = (int *)malloc((markers + 1) * sizeof(int));
 		been++;
@@ -249,7 +254,11 @@
 	{
 		no_transferred += E->parallel.traces_transfer_number[neighbor];
 	}
+
+	/* this is the bad routine!!! */
 	prepare_transfer_arrays(E);
+	
+
 	exchange_number_rec_markers(E);
 
 	no_received = 0;
@@ -271,6 +280,7 @@
 
 	unify_markers_array(E, no_transferred, no_received);
 
+
 	return;
 }
 
@@ -412,17 +422,26 @@
 void prepare_transfer_arrays(struct All_variables *E)
 {
   int j, part, neighbor, k1, k2, k3,k;
-	//if(E->parallel.me==0)fprintf(stderr,"ta 1 ok\n");
+  static int asize, markers,bsize,been_here = 0;
+  if(!been_here){
+    markers = E->advection.markers / 10;
+    asize = (markers + 1) * E->mesh.nsd * 2;
+    bsize = (markers + 1) * (2 + E->tracers_add_flavors);
+    been_here = 1;
+  }
+  parallel_process_sync();
+
+  //if(E->parallel.me==0)fprintf(stderr,"ta 1 ok\n");
 	for(neighbor = 1; neighbor <= E->parallel.no_neighbors; neighbor++)
 	{
 		k1 = k2 = k3 = 0;
-		//if((E->parallel.me==0) && (E->monitor.solution_cycles>199))
-		//fprintf(stderr,"ta %i %i %i - %i %i - %i %i %i \n",neighbor, E->parallel.no_neighbors,E->parallel.traces_transfer_number[neighbor],E->parallel.traces_transfer_number[neighbor]*6,E->parallel.traces_transfer_number[neighbor]*2,E->advection.markers / 10 ,(E->advection.markers / 10 + 1) * E->mesh.nsd * 2  ,(E->advection.markers / 10 + 1) *2);
 		for(j = 0; j < E->parallel.traces_transfer_number[neighbor]; j++)
 		{
 			part = E->parallel.traces_transfer_index[neighbor][j];
-			if((part > E->advection.markers)||(part<1)){fprintf(stderr,"pta: out of bounds %i %i\n",part,E->advection.markers);}
-			//if((E->parallel.me==0) && (E->monitor.solution_cycles>199))fprintf(stderr,"%i %i %i\n",neighbor,j,part);
+			//if((part > E->advection.markers)||(part<1)){fprintf(stderr,"pta: out of bounds %i %i\n",part,E->advection.markers);myerror("pta out of bounds",E);}
+			//if(E->monitor.solution_cycles>199)fprintf(stderr,"%i --- %i/%i %i/%i %i\n",E->parallel.me,neighbor,E->parallel.no_neighbors,j,E->parallel.traces_transfer_number[neighbor],part);
+			
+			//if(k1+6 >= asize){fprintf(stderr,"k1 %i asize %i out of bounds\n",k1,asize);myerror("exit",E);};
 			E->PVV[neighbor][k1++] = E->VO[1][part];
 			E->PVV[neighbor][k1++] = E->VO[2][part];
 			E->PVV[neighbor][k1++] = E->VO[3][part];
@@ -430,6 +449,8 @@
 			E->PVV[neighbor][k1++] = E->Vpred[2][part];
 			E->PVV[neighbor][k1++] = E->Vpred[3][part];
 
+
+			//if(k2+6 >= asize){fprintf(stderr,"k2 %i asize %i out of bounds\n",k2,asize);myerror("exit",E);};
 			E->PXX[neighbor][k2++] = E->XMC[1][part];
 			E->PXX[neighbor][k2++] = E->XMC[2][part];
 			E->PXX[neighbor][k2++] = E->XMC[3][part];
@@ -437,13 +458,14 @@
 			E->PXX[neighbor][k2++] = E->XMCpred[2][part];
 			E->PXX[neighbor][k2++] = E->XMCpred[3][part];
 
+
+			//if(k3+2+E->tracers_add_flavors >= bsize){fprintf(stderr,"k3 %i bsize %i out of bounds\n",k3,bsize);myerror("exit",E);};
 			E->PINS[neighbor][k3++] = E->C12[part];
 			E->PINS[neighbor][k3++] = E->CElement[part];
 			for(k=0;k < E->tracers_add_flavors;k++)
 			  E->PINS[neighbor][k3++] = E->tflavors[part][k];
 
 		}
-		//if((E->parallel.me==0) && (E->monitor.solution_cycles>199))fprintf(stderr,"ta inner loop ok\n");
 	}
 	//if(E->parallel.me==0)fprintf(stderr,"ta 2 ok\n");
 	return;
@@ -544,9 +566,9 @@
 		}
 		E->CE[el] = temp3;
 	}
-	//if(E->parallel.me==0)fprintf(stderr,"gcfm: exchange\n");
+
 	exchange_node_f20(E, C, E->mesh.levmax);
-	//if(E->parallel.me==0)fprintf(stderr,"gcfm: exchange done\n");
+
 	for(node = 1; node <= nno; node++)
 	{
 		C[node] = C[node] * E->Mass[node];

Modified: mc/3D/CitcomCU/trunk/src/Drive_solvers.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Drive_solvers.c	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Drive_solvers.c	2011-06-09 18:50:26 UTC (rev 18568)
@@ -54,7 +54,8 @@
   static int damp=0,visits = 0;
   
   const int neq = E->lmesh.neq;
-  
+  const int step_debug = 0;
+
   int iterate; 
   
   iterate = need_to_iterate(E);
@@ -107,11 +108,12 @@
   E->monitor.visc_iter_count = 0;
   
   do{
+    if(step_debug && (E->parallel.me==0))fprintf(stderr,"dealing with viscosity\n");
     if(E->viscosity.update_allowed)
       get_system_viscosity(E, 1, E->EVI[E->mesh.levmax], E->VI[E->mesh.levmax]);
     
     construct_stiffness_B_matrix(E);
-    //if(E->parallel.me==0)fprintf(stderr,"calling solver\n");
+    if(step_debug && (E->parallel.me==0))fprintf(stderr,"calling solver\n");
 	
     solve_constrained_flow_iterative(E);
     

Modified: mc/3D/CitcomCU/trunk/src/Makefile.gzdir_ani
===================================================================
--- mc/3D/CitcomCU/trunk/src/Makefile.gzdir_ani	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Makefile.gzdir_ani	2011-06-09 18:50:26 UTC (rev 18568)
@@ -43,10 +43,6 @@
 #	Operating System Variables
 ###################################################################
 
-#CC=/usr/local/mpich/bin/mpicc
-#CC=/opt/mpich/bin/mpicc
-#CC=/usr/lib/cmplrs/cc/gemc_cc
-#CC=$(HOME)/usr/local/mpich/bin/mpicc
 CC=mpicc
 F77=mpif77
 CPP=
@@ -60,69 +56,9 @@
 FOBJFLAG=-c
 
 
-###################################
-# Choose your machine from here.
-###################################
-
-####################################
-# Dec Alpha, OSF 1
-AXPFLAGS= -unsigned  -non_shared  \
-	-math_library=fast -prefix=all -reentrancy=none -assume=noaccuracy_sensitive \
-	-unsigned_char -extern=strict_refdef -trapuv #  -D_INTRINSICS 
-AXPLDFLAGS= -unsigned -assume=noaccuracy_sensitive -non_shared -D_INTRINSICS 
-AXPOPTIM= -O -Ublas -float -Olimit 1000 # -cord  -feedback citcom.feedback
-####################################
-
-####################################
-# CRAY Unicos systems
-CRAYFLAGS=
-CRAYLDFLAGS=
-CRAYOPTIM=
-####################################
-
-####################################
-#IBM AIX systems
-AIXFLAGS= -D__aix__
-AIXLDFLAGS=
-AIXOPTIM= -O2 -qarch=pwr -qopt=pwr -Ublas
-####################################
-
-####################################
-# SUNOS systems
-SUNFLAGS= -D__sunos__ -Dconst=""
-SUNLDFLAGS=
-SUNOPTIM=-O -fsingle
-####################################
-
-####################################
-# Solaris systems
-SOLARISFLAGS= -D__solaris -Dconst="" -I/opt/mpi/include
-SOLARISLDFLAGS=-fast -lsocket -lnsl -lthread
-SOLARISOPTIM=-fast -xO4 -dalign -xtarget=ultra -xarch=v8plus -fsingle
-####################################
-
-####################################
-#HP running HPUX
-HPUXFLAGS=-Dconst=""
-HPUXLDFLAGS=
-HPUXOPTIM=+O3 +Onolimit +Odataprefetch
-#HPUXOPTIM=+O4 +Onolimit +Odataprefetch +Ofastaccess
-#HPUXOPTIM3=+O3 +Onolimit +Odataprefetch
-####################################
-
-####################################
-# SGI with IRIX 
-SGIFLAGS=
-SGILDFLAGS=
-SGIOPTIM=-O -fsingle
-####################################
-
-#LinuxFLAGS=-I/usr/local/mpi/include
-#LinuxFLAGS=-I/opt/mpi/include
-#LinuxFLAGS=-I$(HOME)/usr/include
 LinuxFLAGS=
 LinuxLDFLAGS=
-#LinuxOPTIM=-g
+#LinuxOPTIM=-g -O0 -Werror $(DEFINES)
 #LinuxOPTIM=-O3 -mtune=core2 -Werror $(DEFINES)
 LinuxOPTIM=-O2 -Werror $(DEFINES)
 
@@ -205,7 +141,7 @@
 default: citcom.mpi 
 
 citcom.mpi: $(OBJFILES) $(HEADER) Makefile
-	$(F77) $(OPTIM) $(FLAGS) $(LDFLAGS) -o citcom.mpi \
+	$(F77) $(OPTIM) $(FLAGS) $(LDFLAGS) -o citcom.mpi -nofor_main \
 	$(OBJFILES)  $(FFTLIB)  $(LIB)
 
 clean:

Modified: mc/3D/CitcomCU/trunk/src/Output_gzdir.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Output_gzdir.c	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Output_gzdir.c	2011-06-09 18:50:26 UTC (rev 18568)
@@ -667,9 +667,8 @@
       }
     }
 
-  if(E->parallel.me==0)fprintf(stderr,"vel output done\n");
-  parallel_process_sync();
-
+  //if(E->parallel.me==0)fprintf(stderr,"vel output done\n");
+  
   return;
 }
 

Modified: mc/3D/CitcomCU/trunk/src/Parallel_related.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Parallel_related.c	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/Parallel_related.c	2011-06-09 18:50:26 UTC (rev 18568)
@@ -959,8 +959,8 @@
 
 	static int been_here = 0;
 
-	MPI_Status status[100];
-	MPI_Request request[100];
+	MPI_Status status[CU_MPI_MSG_LIM];
+	MPI_Request request[CU_MPI_MSG_LIM];
 
 	if(been_here == 0)
 	{
@@ -1010,8 +1010,8 @@
 	int target_proc, kk, k, idb;
 	int rioff;
 
-	MPI_Status status[100];
-	MPI_Request request[100];
+	MPI_Status status[CU_MPI_MSG_LIM];
+	MPI_Request request[CU_MPI_MSG_LIM];
 
 	rioff = 2 + E->tracers_add_flavors;
 
@@ -1071,8 +1071,8 @@
 	static int sizeofk;
 	const int levmax = E->mesh.levmax;
 
-	MPI_Status status[100];
-	MPI_Request request[100];
+	MPI_Status status[CU_MPI_MSG_LIM];
+	MPI_Request request[CU_MPI_MSG_LIM];
 
 	if(been_here == 0)
 	{
@@ -1160,8 +1160,8 @@
 	static int sizeofk;
 	const int levmax = E->mesh.levmax;
 
-	MPI_Status status[100];
-	MPI_Request request[100];
+	MPI_Status status[CU_MPI_MSG_LIM];
+	MPI_Request request[CU_MPI_MSG_LIM];
 
 	if(been_here == 0)
 	{
@@ -1247,8 +1247,8 @@
   static int sizeofk;
   const int levmax = E->mesh.levmax;
   
-  MPI_Status status[100];
-  MPI_Request request[100];
+  MPI_Status status[CU_MPI_MSG_LIM];
+  MPI_Request request[CU_MPI_MSG_LIM];
   
   if(been_here == 0){
       sizeofk = 0;

Modified: mc/3D/CitcomCU/trunk/src/global_defs.h
===================================================================
--- mc/3D/CitcomCU/trunk/src/global_defs.h	2011-06-09 18:03:19 UTC (rev 18567)
+++ mc/3D/CitcomCU/trunk/src/global_defs.h	2011-06-09 18:50:26 UTC (rev 18568)
@@ -92,6 +92,11 @@
 #define MAX_F    10
 #define MAX_S    30
 
+#define MAX_NEIGHBORS 27
+
+//#define CU_MPI_MSG_LIM 100
+#define CU_MPI_MSG_LIM 1000
+
 /* Macros */
 
 #define max(A,B) (((A) > (B)) ? (A) : (B))
@@ -494,11 +499,11 @@
 
 	int me_sph;
 	int no_neighbors;
-	int neighbors[27];
+	int neighbors[MAX_NEIGHBORS];
 	int *neighbors_rev;
-	int traces_receive_number[27];
-	int traces_transfer_number[27];
-	int *traces_transfer_index[27];
+	int traces_receive_number[MAX_NEIGHBORS];
+	int traces_transfer_number[MAX_NEIGHBORS];
+	int *traces_transfer_index[MAX_NEIGHBORS];
 };
 
 struct MESH_DATA
@@ -933,9 +938,10 @@
 	int *Node_eqn[MAX_LEVELS];
 	int *Node_k_id[MAX_LEVELS];
 
-	float *RVV[27], *PVV[27];
-	double *RXX[27], *PXX[27];
-	int *RINS[27], *PINS[27];
+  
+	float *RVV[MAX_NEIGHBORS], *PVV[MAX_NEIGHBORS];
+	double *RXX[MAX_NEIGHBORS], *PXX[MAX_NEIGHBORS];
+	int *RINS[MAX_NEIGHBORS], *PINS[MAX_NEIGHBORS];
 
 	float *VO[4];
 	float *Vpred[4];



More information about the CIG-COMMITS mailing list