[cig-commits] [commit] rajesh-petsc-schur: Changed the shape of E->parallel.TNUM_PASS as part of caps_per_proc_removal (24bbe09)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Wed Sep 24 15:23:07 PDT 2014


Repository : https://github.com/geodynamics/citcoms

On branch  : rajesh-petsc-schur
Link       : https://github.com/geodynamics/citcoms/compare/980de677d7efb78fdd5c41fd4b35a9283a2d251f...b86b2dd00f2bd13fb282a2eb9169871be4b1e923

>---------------------------------------------------------------

commit 24bbe090b49e493fd85c32abf0420b9edd303f8f
Author: Rajesh Kommu <rajesh.kommu at gmail.com>
Date:   Wed Sep 24 09:26:47 2014 -0700

    Changed the shape of E->parallel.TNUM_PASS as part of caps_per_proc_removal


>---------------------------------------------------------------

24bbe090b49e493fd85c32abf0420b9edd303f8f
 lib/Full_parallel_related.c     | 52 ++++++++++++++++++++---------------------
 lib/Full_tracer_advection.c     |  4 ++--
 lib/Regional_parallel_related.c | 24 +++++++++----------
 lib/Tracer_setup.c              |  2 +-
 lib/global_defs.h               |  2 +-
 5 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/lib/Full_parallel_related.c b/lib/Full_parallel_related.c
index 511c088..ca2445d 100644
--- a/lib/Full_parallel_related.c
+++ b/lib/Full_parallel_related.c
@@ -653,7 +653,7 @@ void full_parallel_communication_routs_v(E)
 	}
 
 
-      E->parallel.TNUM_PASS[lev][CPPR] = npass;
+      E->parallel.TNUM_PASS[lev] = npass;
 
   }   /* end for lev  */
 
@@ -703,8 +703,8 @@ void full_parallel_communication_routs_v(E)
   if(E->control.verbose) {
     for(lev=E->mesh.gridmax;lev>=E->mesh.gridmin;lev--) {
       fprintf(E->fp_out,"output_communication route surface for lev=%d \n",lev);
-	fprintf(E->fp_out,"  me= %d cap=%d pass  %d \n",E->parallel.me,E->sphere.capid[CPPR],E->parallel.TNUM_PASS[lev][CPPR]);
-	for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+	fprintf(E->fp_out,"  me= %d cap=%d pass  %d \n",E->parallel.me,E->sphere.capid[CPPR],E->parallel.TNUM_PASS[lev]);
+	for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
 	  fprintf(E->fp_out,"proc %d and pass  %d to proc %d with %d eqn and %d node\n",E->parallel.me,k,E->parallel.PROCESSOR[lev][CPPR].pass[k],E->parallel.NUM_NEQ[lev][CPPR].pass[k],E->parallel.NUM_NODE[lev][CPPR].pass[k]);
 	  fprintf(E->fp_out,"Eqn:\n");  
 	  for (ii=1;ii<=E->parallel.NUM_NEQ[lev][CPPR].pass[k];ii++)  
@@ -759,7 +759,7 @@ void full_parallel_communication_routs_s(E)
 
       j = E->sphere.capid[CPPR];
 
-      for (kkk=1;kkk<=E->parallel.TNUM_PASS[lev][CPPR];kkk++) {
+      for (kkk=1;kkk<=E->parallel.TNUM_PASS[lev];kkk++) {
         if (kkk<=4) {  /* first 4 communications are for XZ and YZ planes */
           ii = kkk;
           E->parallel.NUM_sNODE[lev][CPPR].pass[kkk] =
@@ -788,8 +788,8 @@ void full_parallel_communication_routs_s(E)
   if(E->control.verbose) {
     for(lev=E->mesh.gridmax;lev>=E->mesh.gridmin;lev--) {
       fprintf(E->fp_out,"output_communication route surface for lev=%d \n",lev);
-	fprintf(E->fp_out,"  me= %d cap=%d pass  %d \n",E->parallel.me,E->sphere.capid[CPPR],E->parallel.TNUM_PASS[lev][CPPR]);
-	for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++) {
+	fprintf(E->fp_out,"  me= %d cap=%d pass  %d \n",E->parallel.me,E->sphere.capid[CPPR],E->parallel.TNUM_PASS[lev]);
+	for (k=1;k<=E->parallel.TNUM_PASS[lev];k++) {
 	  fprintf(E->fp_out,"proc %d and pass  %d to proc %d with %d node\n",E->parallel.me,k,E->parallel.PROCESSOR[lev][CPPR].pass[k],E->parallel.NUM_sNODE[lev][CPPR].pass[k]);
 	  fprintf(E->fp_out,"Node:\n");
 	  for (ii=1;ii<=E->parallel.NUM_sNODE[lev][CPPR].pass[k];ii++)
@@ -865,7 +865,7 @@ void full_exchange_id_d(E, U, lev)
  MPI_Status status1;
  MPI_Request request[100];
 
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
      sizeofk = (1+E->parallel.NUM_NEQ[lev][CPPR].pass[k])*sizeof(double);
      S[k]=(double *)malloc( sizeofk );
      R[k]=(double *)malloc( sizeofk );
@@ -880,7 +880,7 @@ void full_exchange_id_d(E, U, lev)
  SV=(double *)malloc( sizeofk );
 
   idb=0;
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)     {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)     {
 
       for (j=1;j<=E->parallel.NUM_NEQ[lev][CPPR].pass[k];j++) {
         S[k][j-1] = U[ E->parallel.EXCHANGE_ID[lev][CPPR][j].pass[k] ];
@@ -895,7 +895,7 @@ void full_exchange_id_d(E, U, lev)
       }
     }           /* for k */
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k] != E->parallel.me &&
 	  E->parallel.PROCESSOR[lev][CPPR].pass[k] != -1) {
@@ -912,7 +912,7 @@ void full_exchange_id_d(E, U, lev)
 
   MPI_Waitall(idb,request,status);
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k] != E->parallel.me &&
 	  E->parallel.PROCESSOR[lev][CPPR].pass[k] != -1) {
@@ -941,7 +941,7 @@ void full_exchange_id_d(E, U, lev)
         U[ E->parallel.EXCHANGE_ID[lev][CPPR][j].pass[kk] ] += RV[jj++];
   }
 
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
      free((void*) S[k]);
      free((void*) R[k]);
    }
@@ -968,7 +968,7 @@ static void exchange_node_d(E, U, lev)
  MPI_Request request[100];
 
  kk=0;
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
      ++kk;
      sizeofk = (1+E->parallel.NUM_NODE[lev][CPPR].pass[k])*sizeof(double);
      S[kk]=(double *)malloc( sizeofk );
@@ -985,7 +985,7 @@ static void exchange_node_d(E, U, lev)
  SV=(double *)malloc( idb );
 
   idb=0;
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)     {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)     {
       kk=k;
 
       for (j=1;j<=E->parallel.NUM_NODE[lev][CPPR].pass[k];j++)
@@ -1000,7 +1000,7 @@ static void exchange_node_d(E, U, lev)
          }
       }           /* for k */
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       kk=k;
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k]!=E->parallel.me)  {
@@ -1020,7 +1020,7 @@ static void exchange_node_d(E, U, lev)
 
   MPI_Waitall(idb,request,status);
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       kk=k;
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k]!=E->parallel.me)
@@ -1050,7 +1050,7 @@ static void exchange_node_d(E, U, lev)
     }
 
   kk = 0;
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
      kk++;
      free((void*) S[kk]);
      free((void*) R[kk]);
@@ -1079,7 +1079,7 @@ static void exchange_node_f(E, U, lev)
  MPI_Request request[100];
 
  kk=0;
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
      ++kk;
      sizeofk = (1+E->parallel.NUM_NODE[lev][CPPR].pass[k])*sizeof(float);
      S[kk]=(float *)malloc( sizeofk );
@@ -1096,7 +1096,7 @@ static void exchange_node_f(E, U, lev)
  SV=(float *)malloc( idb );
 
   idb=0;
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)     {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)     {
       kk=k;
 
       for (j=1;j<=E->parallel.NUM_NODE[lev][CPPR].pass[k];j++)
@@ -1111,7 +1111,7 @@ static void exchange_node_f(E, U, lev)
          }
       }           /* for k */
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       kk=k;
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k]!=E->parallel.me)  {
@@ -1131,7 +1131,7 @@ static void exchange_node_f(E, U, lev)
 
   MPI_Waitall(idb,request,status);
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       kk=k;
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k]!=E->parallel.me)
@@ -1161,7 +1161,7 @@ static void exchange_node_f(E, U, lev)
     }
 
   kk = 0;
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
      kk++;
      free((void*) S[kk]);
      free((void*) R[kk]);
@@ -1183,7 +1183,7 @@ void full_exchange_snode_f(struct All_variables *E, float *U1, float *U2, int le
  MPI_Request request[100];
 
    kk=0;
-     for (k=1;k<=E->parallel.TNUM_PASS[E->mesh.levmax][CPPR];k++)  {
+     for (k=1;k<=E->parallel.TNUM_PASS[E->mesh.levmax];k++)  {
        ++kk;
        sizeofk = (1+2*E->parallel.NUM_sNODE[E->mesh.levmax][CPPR].pass[k])*sizeof(float);
        S[kk]=(float *)malloc( sizeofk );
@@ -1192,7 +1192,7 @@ void full_exchange_snode_f(struct All_variables *E, float *U1, float *U2, int le
 
   idb=0;
   /* sending */
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)     {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)     {
       kk=k;
 
       /* pack */
@@ -1212,7 +1212,7 @@ void full_exchange_snode_f(struct All_variables *E, float *U1, float *U2, int le
       }           /* for k */
 
   /* receiving */
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       kk=k;
 
       if (E->parallel.PROCESSOR[lev][CPPR].pass[k]!=E->parallel.me)  {
@@ -1236,7 +1236,7 @@ void full_exchange_snode_f(struct All_variables *E, float *U1, float *U2, int le
 
   MPI_Waitall(idb,request,status);
 
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       kk=k;
 
       /* unpack */
@@ -1251,7 +1251,7 @@ void full_exchange_snode_f(struct All_variables *E, float *U1, float *U2, int le
       }
 
   kk=0;
-    for (k=1;k<=E->parallel.TNUM_PASS[E->mesh.levmax][CPPR];k++)  {
+    for (k=1;k<=E->parallel.TNUM_PASS[E->mesh.levmax];k++)  {
       ++kk;
       free((void*) S[kk]);
       free((void*) R[kk]);
diff --git a/lib/Full_tracer_advection.c b/lib/Full_tracer_advection.c
index 3400db0..f5a41d4 100644
--- a/lib/Full_tracer_advection.c
+++ b/lib/Full_tracer_advection.c
@@ -295,7 +295,7 @@ void full_lost_souls(struct All_variables *E)
 
     int number_of_caps=12;
     int lev=E->mesh.levmax;
-    int num_ngb = E->parallel.TNUM_PASS[lev][CPPR];
+    int num_ngb = E->parallel.TNUM_PASS[lev];
 
     /* Note, if for some reason, the number of neighbors exceeds */
     /* 50, which is unlikely, the MPI arrays must be increased.  */
@@ -805,7 +805,7 @@ static void full_put_lost_tracers(struct All_variables *E,
 
         /* check neighboring caps */
 
-        for (pp=1;pp<=E->parallel.TNUM_PASS[lev][CPPR];pp++) {
+        for (pp=1;pp<=E->parallel.TNUM_PASS[lev];pp++) {
             ithatcap=pp;
             icheck=full_icheck_cap(E,ithatcap,x,y,z,rad);
             if (icheck==1) goto foundit;
diff --git a/lib/Regional_parallel_related.c b/lib/Regional_parallel_related.c
index af1bcd8..82a87d1 100644
--- a/lib/Regional_parallel_related.c
+++ b/lib/Regional_parallel_related.c
@@ -567,7 +567,7 @@ void regional_parallel_communication_routs_v(E)
             }     /* end for j */
 
 
-      E->parallel.TNUM_PASS[lev][CPPR] = kkk;
+      E->parallel.TNUM_PASS[lev] = kkk;
 
 
 
@@ -576,8 +576,8 @@ void regional_parallel_communication_routs_v(E)
   if(E->control.verbose) {
     for(lev=E->mesh.gridmax;lev>=E->mesh.gridmin;lev--) {
       fprintf(E->fp_out,"output_communication route surface for lev=%d \n",lev);
-    fprintf(E->fp_out,"  me= %d cap=%d pass  %d \n",E->parallel.me,E->sphere.capid[CPPR],E->parallel.TNUM_PASS[lev][CPPR]);
-    for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+    fprintf(E->fp_out,"  me= %d cap=%d pass  %d \n",E->parallel.me,E->sphere.capid[CPPR],E->parallel.TNUM_PASS[lev]);
+    for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
       fprintf(E->fp_out,"proc %d and pass  %d to proc %d with %d eqn and %d node\n",E->parallel.me,k,E->parallel.PROCESSOR[lev][CPPR].pass[k],E->parallel.NUM_NEQ[lev][CPPR].pass[k],E->parallel.NUM_NODE[lev][CPPR].pass[k]);
 /*    fprintf(E->fp_out,"Eqn:\n");  */
 /*    for (ii=1;ii<=E->parallel.NUM_NEQ[lev][m].pass[k];ii++)  */
@@ -709,13 +709,13 @@ void regional_exchange_id_d(E, U, lev)
 
  MPI_Status status;
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
    sizeofk = (1+E->parallel.NUM_NEQ[lev][CPPR].pass[k])*sizeof(double);
    S[k]=(double *)malloc( sizeofk );
    R[k]=(double *)malloc( sizeofk );
  }
 
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
 
      for (j=1;j<=E->parallel.NUM_NEQ[lev][CPPR].pass[k];j++)
        S[k][j-1] = U[ E->parallel.EXCHANGE_ID[lev][CPPR][j].pass[k] ];
@@ -731,7 +731,7 @@ void regional_exchange_id_d(E, U, lev)
 
    }           /* for k */
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
    free((void*) S[k]);
    free((void*) R[k]);
  }
@@ -752,13 +752,13 @@ static void exchange_node_d(E, U, lev)
 
  MPI_Status status;
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
    sizeofk = (1+E->parallel.NUM_NODE[lev][CPPR].pass[k])*sizeof(double);
    S[k]=(double *)malloc( sizeofk );
    R[k]=(double *)malloc( sizeofk );
  }   /* end for k */
 
-   for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+   for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
 
      for (j=1;j<=E->parallel.NUM_NODE[lev][CPPR].pass[k];j++)
        S[k][j-1] = U[ E->parallel.EXCHANGE_NODE[lev][CPPR][j].pass[k] ];
@@ -773,7 +773,7 @@ static void exchange_node_d(E, U, lev)
        U[ E->parallel.EXCHANGE_NODE[lev][CPPR][j].pass[k] ] += R[k][j-1];
    }
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
    free((void*) S[k]);
    free((void*) R[k]);
  }
@@ -794,14 +794,14 @@ static void exchange_node_f(E, U, lev)
 
  MPI_Status status;
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
    sizeofk = (1+E->parallel.NUM_NODE[lev][CPPR].pass[k])*sizeof(float);
    S[k]=(float *)malloc( sizeofk );
    R[k]=(float *)malloc( sizeofk );
  }   /* end for k */
 
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)   {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)   {
 
    for (j=1;j<=E->parallel.NUM_NODE[lev][CPPR].pass[k];j++)
      S[k][j-1] = U[ E->parallel.EXCHANGE_NODE[lev][CPPR][j].pass[k] ];
@@ -816,7 +816,7 @@ static void exchange_node_f(E, U, lev)
      U[ E->parallel.EXCHANGE_NODE[lev][CPPR][j].pass[k] ] += R[k][j-1];
  }
 
- for (k=1;k<=E->parallel.TNUM_PASS[lev][CPPR];k++)  {
+ for (k=1;k<=E->parallel.TNUM_PASS[lev];k++)  {
    free((void*) S[k]);
    free((void*) R[k]);
  }
diff --git a/lib/Tracer_setup.c b/lib/Tracer_setup.c
index 0c91c18..20891c0 100644
--- a/lib/Tracer_setup.c
+++ b/lib/Tracer_setup.c
@@ -1289,7 +1289,7 @@ void get_neighboring_caps(struct All_variables *E)
         }
 
         idb = 0;
-        num_ngb = E->parallel.TNUM_PASS[lev][CPPR];
+        num_ngb = E->parallel.TNUM_PASS[lev];
         for (kk=1; kk<=num_ngb; kk++) {
             neighbor_proc = E->parallel.PROCESSOR[lev][CPPR].pass[kk];
 
diff --git a/lib/global_defs.h b/lib/global_defs.h
index ed874b6..8beeea8 100644
--- a/lib/global_defs.h
+++ b/lib/global_defs.h
@@ -235,7 +235,7 @@ struct Parallel {
     int Skip_neq[MAX_LEVELS];
     int *Skip_id[MAX_LEVELS];
 
-    int TNUM_PASS[MAX_LEVELS][NCS];
+    int TNUM_PASS[MAX_LEVELS];
     struct BOUND *NODE[MAX_LEVELS][NCS];
     struct BOUND NUM_NNO[MAX_LEVELS][NCS];
     struct BOUND NUM_PASS[MAX_LEVELS][NCS];



More information about the CIG-COMMITS mailing list