[cig-commits] r11904 - mc/3D/CitcomCU/trunk/src

tan2 at geodynamics.org tan2 at geodynamics.org
Fri May 2 16:39:49 PDT 2008


Author: tan2
Date: 2008-05-02 16:39:49 -0700 (Fri, 02 May 2008)
New Revision: 11904

Modified:
   mc/3D/CitcomCU/trunk/src/Parallel_related.c
   mc/3D/CitcomCU/trunk/src/global_defs.h
Log:
Removed parallel.mst* arrays. This lifted the restriction on # of processors the code can use.


Modified: mc/3D/CitcomCU/trunk/src/Parallel_related.c
===================================================================
--- mc/3D/CitcomCU/trunk/src/Parallel_related.c	2008-05-02 23:36:19 UTC (rev 11903)
+++ mc/3D/CitcomCU/trunk/src/Parallel_related.c	2008-05-02 23:39:49 UTC (rev 11904)
@@ -103,22 +103,6 @@
 	E->parallel.nprocxy = E->parallel.nprocx * E->parallel.nprocy;
 	E->parallel.nproczy = E->parallel.nprocz * E->parallel.nprocy;
 
-	k = 0;
-	for(j = 0; j < E->parallel.nproc; j++)
-		for(i = 0; i <= j; i++)
-		{
-			E->parallel.mst[j][i][1] = k++;
-			E->parallel.mst[j][i][2] = k++;
-		}
-	for(j = 0; j < E->parallel.nproc; j++)
-		for(i = 0; i <= E->parallel.nproc; i++)
-			if(i > j)
-			{
-				E->parallel.mst[j][i][1] = E->parallel.mst[i][j][2];
-				E->parallel.mst[j][i][2] = E->parallel.mst[i][j][1];
-			}
-
-
 	/* for overlapping domain, good for e by e assemble */
 
 	/* z direction first */
@@ -995,14 +979,14 @@
 		idb++;
 		S[k][0] = E->parallel.traces_transfer_number[k];
 		S[k][1] = E->parallel.me;
-		MPI_Isend(S[k], 2, MPI_INT, target_proc, E->parallel.mst1[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+		MPI_Isend(S[k], 2, MPI_INT, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 	}							/* for k */
 
 	for(k = 1; k <= E->parallel.no_neighbors; k++)
 	{
 		target_proc = E->parallel.neighbors[k];
 		idb++;
-		MPI_Irecv(R[k], 2, MPI_INT, target_proc, E->parallel.mst1[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+		MPI_Irecv(R[k], 2, MPI_INT, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 	}							/* for k */
 
 	MPI_Waitall(idb, request, status);
@@ -1040,12 +1024,12 @@
 			target_proc = E->parallel.neighbors[k];
 			idb++;
 			kk = E->parallel.traces_transfer_number[k] * 2 + 1;
-			MPI_Isend(E->PINS[k], kk, MPI_INT, target_proc, E->parallel.mst1[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+			MPI_Isend(E->PINS[k], kk, MPI_INT, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 			idb++;
 			kk = E->parallel.traces_transfer_number[k] * 2 * E->mesh.nsd + 1;
-			MPI_Isend(E->PVV[k], kk, MPI_FLOAT, target_proc, E->parallel.mst2[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+			MPI_Isend(E->PVV[k], kk, MPI_FLOAT, target_proc, 2, MPI_COMM_WORLD, &request[idb - 1]);
 			idb++;
-			MPI_Isend(E->PXX[k], kk, MPI_DOUBLE, target_proc, E->parallel.mst3[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+			MPI_Isend(E->PXX[k], kk, MPI_DOUBLE, target_proc, 3, MPI_COMM_WORLD, &request[idb - 1]);
 		}
 	}							/* for k */
 
@@ -1056,12 +1040,12 @@
 			target_proc = E->parallel.neighbors[k];
 			idb++;
 			kk = E->parallel.traces_receive_number[k] * 2 + 1;
-			MPI_Irecv(E->RINS[k], kk, MPI_INT, target_proc, E->parallel.mst1[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+			MPI_Irecv(E->RINS[k], kk, MPI_INT, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 			idb++;
 			kk = E->parallel.traces_receive_number[k] * 2 * E->mesh.nsd + 1;
-			MPI_Irecv(E->RVV[k], kk, MPI_FLOAT, target_proc, E->parallel.mst2[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+			MPI_Irecv(E->RVV[k], kk, MPI_FLOAT, target_proc, 2, MPI_COMM_WORLD, &request[idb - 1]);
 			idb++;
-			MPI_Irecv(E->RXX[k], kk, MPI_DOUBLE, target_proc, E->parallel.mst3[E->parallel.me][target_proc], MPI_COMM_WORLD, &request[idb - 1]);
+			MPI_Irecv(E->RXX[k], kk, MPI_DOUBLE, target_proc, 3, MPI_COMM_WORLD, &request[idb - 1]);
 		}
 	}							/* for k */
 
@@ -1122,7 +1106,7 @@
 				if(target_proc != E->parallel.me)
 				{
 					idb++;
-					MPI_Isend(S[k], E->parallel.NUM_NEQ[lev].pass[i][k], MPI_DOUBLE, target_proc, E->parallel.mst[E->parallel.me][target_proc][k], MPI_COMM_WORLD, &request[idb - 1]);
+					MPI_Isend(S[k], E->parallel.NUM_NEQ[lev].pass[i][k], MPI_DOUBLE, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 				}
 			}					/* for k */
 
@@ -1134,7 +1118,7 @@
 				if(target_proc != E->parallel.me)
 				{
 					idb++;
-					MPI_Irecv(R[k], E->parallel.NUM_NEQ[lev].pass[i][k], MPI_DOUBLE, target_proc, E->parallel.mst[E->parallel.me][target_proc][k], MPI_COMM_WORLD, &request[idb - 1]);
+					MPI_Irecv(R[k], E->parallel.NUM_NEQ[lev].pass[i][k], MPI_DOUBLE, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 				}
 			}					/* for k */
 
@@ -1210,7 +1194,7 @@
 				if(target_proc != E->parallel.me)
 				{
 					idb++;
-					MPI_Isend(S[k], E->parallel.NUM_NODE[lev].pass[i][k], MPI_FLOAT, target_proc, E->parallel.mst[E->parallel.me][target_proc][k], MPI_COMM_WORLD, &request[idb - 1]);
+					MPI_Isend(S[k], E->parallel.NUM_NODE[lev].pass[i][k], MPI_FLOAT, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 				}
 			}					/* for k */
 
@@ -1222,7 +1206,7 @@
 				if(target_proc != E->parallel.me)
 				{
 					idb++;
-					MPI_Irecv(R[k], E->parallel.NUM_NODE[lev].pass[i][k], MPI_FLOAT, target_proc, E->parallel.mst[E->parallel.me][target_proc][k], MPI_COMM_WORLD, &request[idb - 1]);
+					MPI_Irecv(R[k], E->parallel.NUM_NODE[lev].pass[i][k], MPI_FLOAT, target_proc, 1, MPI_COMM_WORLD, &request[idb - 1]);
 				}
 			}					/* for k */
 

Modified: mc/3D/CitcomCU/trunk/src/global_defs.h
===================================================================
--- mc/3D/CitcomCU/trunk/src/global_defs.h	2008-05-02 23:36:19 UTC (rev 11903)
+++ mc/3D/CitcomCU/trunk/src/global_defs.h	2008-05-02 23:39:49 UTC (rev 11904)
@@ -467,11 +467,6 @@
 	int me_loc[4];
 	int num_b;
 
-	int mst[100][100][3];
-	int mst1[100][100];
-	int mst2[100][100];
-	int mst3[100][100];
-
 	int *IDD[MAX_LEVELS];
 	int *ELE_ORDER[MAX_LEVELS];
 	int *NODE_ORDER[MAX_LEVELS];



More information about the cig-commits mailing list