[cig-commits] [commit] rajesh-petsc, rajesh-petsc-schur: Added the petsc_citcoms.h and Petsc_citcoms.c files, modified lib/Makefile.am, fixed some minor bugs (e0055e4)

cig_noreply at geodynamics.org cig_noreply at geodynamics.org
Wed Nov 5 19:02:18 PST 2014


Repository : https://github.com/geodynamics/citcoms

On branches: rajesh-petsc,rajesh-petsc-schur
Link       : https://github.com/geodynamics/citcoms/compare/464e1b32299b15819f93efd98d969cddb84dfe51...f97ae655a50bdbd6dac1923a3471ee4dae178fbd

>---------------------------------------------------------------

commit e0055e476ee36ab3f178843d458d0e3a9e96a363
Author: Rajesh Kommu <rajesh.kommu at gmail.com>
Date:   Wed Aug 20 12:28:05 2014 -0700

    Added the petsc_citcoms.h and Petsc_citcoms.c files, modified lib/Makefile.am, fixed some minor bugs


>---------------------------------------------------------------

e0055e476ee36ab3f178843d458d0e3a9e96a363
 lib/Instructions.c  |   6 +-
 lib/Makefile.am     |   2 +
 lib/Parallel_util.c |   4 +-
 lib/Petsc_citcoms.c | 391 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/global_defs.h   |   7 +
 lib/petsc_citcoms.h |  94 +++++++++++++
 6 files changed, 499 insertions(+), 5 deletions(-)

diff --git a/lib/Instructions.c b/lib/Instructions.c
index 99b60a4..2879584 100644
--- a/lib/Instructions.c
+++ b/lib/Instructions.c
@@ -787,9 +787,9 @@ void read_initial_settings(struct All_variables *E)
   (E->problem_settings)(E);
 
   /* PETSc related flags */
-  input_boolean("use_petsc",&E->data.use_petsc,"on",m);
-  input_boolean("petsc_linear",&E->data.petsc_linear,"on",m);
-  input_boolean("petsc_nonlinear",&E->data.petsc_nonlinear,"off",m);
+  input_boolean("use_petsc",&E->control.use_petsc,"on",m);
+  input_boolean("petsc_linear",&E->control.petsc_linear,"on",m);
+  input_boolean("petsc_nonlinear",&E->control.petsc_nonlinear,"off",m);
 
   check_settings_consistency(E);
   return;
diff --git a/lib/Makefile.am b/lib/Makefile.am
index 59c8613..4931bdd 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -114,6 +114,8 @@ sources = \
 	Parallel_util.c \
 	Parsing.c \
 	parsing.h \
+	Petsc_citcoms.c \
+	petsc_citcoms.h \
 	Phase_change.c \
 	phase_change.h \
 	Problem_related.c \
diff --git a/lib/Parallel_util.c b/lib/Parallel_util.c
index 61ab507..182ad4b 100644
--- a/lib/Parallel_util.c
+++ b/lib/Parallel_util.c
@@ -36,7 +36,7 @@
 
 PetscErrorCode parallel_process_finalize()
 {
-  return Petsc_Finalize();
+  return PetscFinalize();
 }
 
 /* ============================================ */
@@ -45,7 +45,7 @@ PetscErrorCode parallel_process_finalize()
 void parallel_process_termination()
 {
 
-  Petsc_Finalize();
+  PetscFinalize();
   exit(8);
 }
 
diff --git a/lib/Petsc_citcoms.c b/lib/Petsc_citcoms.c
new file mode 100644
index 0000000..8ad1e2f
--- /dev/null
+++ b/lib/Petsc_citcoms.c
@@ -0,0 +1,391 @@
+#include "global_defs.h"
+#include "element_definitions.h"
+#include "petsc_citcoms.h"
+
+
+/* return ||V||^2 */
+double global_v_norm2_PETSc( struct All_variables *E,  Vec v )
+{
+    int i, m, d;
+    int eqn1, eqn2, eqn3;
+    double prod = 0.0, temp = 0.0;
+    PetscErrorCode ierr;
+
+    PetscScalar *V;
+    ierr = VecGetArray( v, &V ); CHKERRQ( ierr );
+    for (m=1; m<=E->sphere.caps_per_proc; m++)
+        for (i=1; i<=E->lmesh.nno; i++) {
+            eqn1 = E->id[m][i].doff[1];
+            eqn2 = E->id[m][i].doff[2];
+            eqn3 = E->id[m][i].doff[3];
+            /* L2 norm  */
+            temp += (V[eqn1] * V[eqn1] +
+                     V[eqn2] * V[eqn2] +
+                     V[eqn3] * V[eqn3]) * E->NMass[m][i];
+        }
+    ierr = VecRestoreArray( v, &V ); CHKERRQ( ierr );
+
+    MPI_Allreduce(&temp, &prod, 1, MPI_DOUBLE, MPI_SUM, E->parallel.world);
+
+    return (prod/E->mesh.volume);
+}
+
+
+/* return ||P||^2 */
+double global_p_norm2_PETSc( struct All_variables *E,  Vec p )
+{
+    int i, m;
+    double prod = 0.0, temp = 0.0;
+    PetscErrorCode ierr;
+
+    PetscScalar *P;
+    ierr = VecGetArray( p, &P ); CHKERRQ( ierr );
+
+    for (m=1; m<=E->sphere.caps_per_proc; m++)
+        for (i=1; i<=E->lmesh.npno; i++) {
+            /* L2 norm */
+            temp += P[i] * P[i] * E->eco[m][i].area;
+        }
+    ierr = VecRestoreArray( p, &P ); CHKERRQ( ierr );
+
+    MPI_Allreduce(&temp, &prod, 1, MPI_DOUBLE, MPI_SUM, E->parallel.world);
+
+    return (prod/E->mesh.volume);
+}
+
+/* return ||A||^2, where A_i is \int{div(u) d\Omega_i} */
+double global_div_norm2_PETSc( struct All_variables *E,  Vec a )
+{
+    int i, m;
+    double prod = 0.0, temp = 0.0;
+    PetscErrorCode ierr;
+
+    PetscScalar *A;
+    ierr = VecGetArray( a, &A ); CHKERRQ( ierr );
+
+
+    for (m=1; m<=E->sphere.caps_per_proc; m++)
+        for (i=1; i<=E->lmesh.npno; i++) {
+            /* L2 norm of div(u) */
+            temp += A[i] * A[i] / E->eco[m][i].area;
+
+            /* L1 norm */
+            /*temp += fabs(A[i]);*/
+        }
+    ierr = VecRestoreArray( a, &A ); CHKERRQ( ierr );
+
+    MPI_Allreduce(&temp, &prod, 1, MPI_DOUBLE, MPI_SUM, E->parallel.world);
+
+    return (prod/E->mesh.volume);
+}
+
+/* =====================================================
+   Assemble grad(rho_ref*ez)*V element by element.
+   Note that the storage is not zero'd before assembling.
+   =====================================================  */
+
+PetscErrorCode assemble_c_u_PETSc( struct All_variables *E,
+                         Vec U, Vec result, int level )
+//                  double **U, double **result, int level)
+{
+    int e,j1,j2,j3,p,a,b,m;
+
+    const int nel = E->lmesh.NEL[level];
+    const int ends = enodes[E->mesh.nsd];
+    const int dims = E->mesh.nsd;
+    const int npno = E->lmesh.NPNO[level];
+
+    PetscErrorCode ierr;
+    PetscScalar *U_temp, *result_temp;
+
+    ierr = VecGetArray( U, &U_temp ); CHKERRQ( ierr );
+    ierr = VecGetArray( result, &result_temp ); CHKERRQ( ierr );
+
+  for( m = 1; m <= E->sphere.caps_per_proc; m++ ) {
+    for(a=1;a<=ends;a++) {
+      p = (a-1)*dims;
+      for(e=1;e<=nel;e++) {
+        b = E->IEN[level][m][e].node[a];
+        j1= E->ID[level][m][b].doff[1];
+        j2= E->ID[level][m][b].doff[2];
+        j3= E->ID[level][m][b].doff[3];
+
+        result_temp[e]  += E->elt_c[level][m][e].c[p  ][0] * U_temp[j1]
+                         + E->elt_c[level][m][e].c[p+1][0] * U_temp[j2]
+                         + E->elt_c[level][m][e].c[p+2][0] * U_temp[j3];
+      }
+    }
+  }
+  ierr = VecRestoreArray( U, &U_temp ); CHKERRQ( ierr );
+  ierr = VecRestoreArray( result, &result_temp ); CHKERRQ( ierr );
+}
+
+void strip_bcs_from_residual_PETSc( 
+    struct All_variables *E, Vec Res, int level )
+{
+  int i, m, low, high;
+  VecGetOwnershipRange( Res, &low, &high );
+ 
+  for( m = 1; m <= E->sphere.caps_per_proc; m++ ) {
+    if( E->num_zero_resid[level][m] ) {
+      for( i = 1; i <= E->num_zero_resid[level][m]; i++ ) {
+	      VecSetValue( Res, E->zero_resid[level][m][i]+low, 0.0, INSERT_VALUES );
+      }
+    }
+  }
+  VecAssemblyBegin( Res );
+  VecAssemblyEnd( Res );
+}
+
+PetscErrorCode initial_vel_residual_PETSc( struct All_variables *E,
+                                 Vec V, Vec P, Vec F,
+                                 double acc )
+{
+    int neq = E->lmesh.neq;
+    int lev = E->mesh.levmax;
+    int npnp = E->lmesh.npno;
+    int i, m, valid;
+    PetscErrorCode ierr;
+
+    Vec u1;
+    ierr = VecCreateMPI( PETSC_COMM_WORLD, neq+1, PETSC_DECIDE, &u1 ); 
+    CHKERRQ( ierr );
+
+    /* F = F - grad(P) - K*V */
+    // u1 = grad(P) i.e. G*P
+    ierr = MatMult( E->G, P, u1 ); CHKERRQ( ierr );
+    // F = F - u1
+    ierr = VecAXPY( F, -1.0, u1 ); CHKERRQ( ierr ); 
+    // u1 = del2(V) i.e. K*V
+    ierr = MatMult( E->K, V, u1 ); CHKERRQ( ierr );
+    // F = F - u1
+    ierr = VecAXPY( F, -1.0, u1 ); CHKERRQ( ierr ); 
+
+    strip_bcs_from_residual_PETSc(E, F, lev);
+
+    /* solve K*u1 = F for u1 */
+    //ierr = KSPSetTolerances( ... );
+    ierr = KSPSolve( E->ksp, F, u1 ); CHKERRQ( ierr );
+
+    strip_bcs_from_residual_PETSc(E, u1, lev);
+
+    /* V = V + u1 */
+    ierr = VecAXPY( V, 1.0, u1 ); CHKERRQ( ierr );
+}
+
+PetscErrorCode PC_Apply_MultiGrid( PC pc, Vec x, Vec y )
+{
+
+  PetscErrorCode ierr;
+  struct MultiGrid_PC *ctx;
+  ierr = PCShellGetContext( pc, (void **)&ctx ); CHKERRQ( ierr );
+  int count, valid;
+  double residual;
+  int m, i;
+
+
+  int low, high;
+  VecGetOwnershipRange( x, &low, &high );
+
+  VecAssemblyBegin(x);
+  VecAssemblyEnd(x);
+  for( m=1; m<=ctx->E->sphere.caps_per_proc; ++m ) {
+    for( i=0; i<ctx->nno; ++i ) {
+      PetscInt ix[] = {i+low};
+      ierr = VecGetValues( x, 1, ix, &ctx->RR[m][i] );
+      CHKERRQ( ierr );
+    }
+  }
+
+  /* initialize the space for the solution */
+  for( m = 1; m <= ctx->caps_per_proc; m++ ) {
+    for( i = 0; i < ctx->nno; i++ ) {
+      ctx->V[m][i] = 0.0;
+    }
+  }
+
+  count = 0;
+
+  do {
+    residual = multi_grid( ctx->E, ctx->V, ctx->RR, ctx->acc, ctx->level );
+    valid = (residual < ctx->acc) ? 1 : 0;
+    count++;
+  } while ( (!valid) && (count < ctx->max_vel_iterations) );
+  ctx->status = residual;
+
+  VecGetOwnershipRange( y, &low, &high );
+  for( m = 1; m <= ctx->caps_per_proc; m++ ) {
+    for( i = 0; i < ctx->nno; i++ ) {
+      ierr = VecSetValue( y, i+low, ctx->V[m][i], INSERT_VALUES ); 
+      CHKERRQ( ierr );
+    }
+  }
+  VecAssemblyBegin(y);
+  VecAssemblyEnd(y);
+
+  PetscFunctionReturn(0);
+}
+
+PetscErrorCode MatShellMult_del2_u( Mat K, Vec U, Vec KU )
+{
+  // K is (neq+1) x (neq+1)
+  // U is (neq+1)
+  // KU is (neq+1)
+  int i, j, neq, nel;
+  PetscErrorCode ierr;
+  struct MatMultShell_del2_u *ctx;
+  MatShellGetContext( K, (void **)&ctx );
+  neq = ctx->neq;
+  nel = ctx->nel;
+  int low, high;
+
+  VecAssemblyBegin(U);
+  VecAssemblyEnd(U);
+  VecGetOwnershipRange( U, &low, &high );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j <= neq; j++ ) {
+      PetscInt ix[] = {j+low};
+      ierr = VecGetValues( U, 1, ix, &ctx->u[i][j] );
+      CHKERRQ( ierr );
+    }
+  }
+
+  // actual CitcomS operation
+  assemble_del2_u( ctx->E, ctx->u, ctx->Ku, ctx->level, 1 );
+  
+  VecGetOwnershipRange( KU, &low, &high );
+  for( i=1; i <= ctx->E->sphere.caps_per_proc; ++i ) {
+    for( j = 0; j <= neq; j++ ) {
+      ierr = VecSetValue( KU, j+low, ctx->Ku[i][j], INSERT_VALUES );
+      CHKERRQ( ierr );
+    }
+  }
+  VecAssemblyBegin( KU );
+  VecAssemblyEnd( KU );
+
+  PetscFunctionReturn(0);
+}
+
+PetscErrorCode MatShellMult_grad_p( Mat G, Vec P, Vec GP )
+{
+  // G is (neq+1) x (nel)
+  // P is (nel) x 1
+  // GP is (neq+1) x 1 of which first neq entries (0:neq-1) are actual values
+  int i, j, neq, nel;
+  PetscErrorCode ierr;
+  struct MatMultShell_grad_p *ctx;
+  MatShellGetContext( G, (void **)&ctx );
+  neq = ctx->neq;
+  nel = ctx->nel;
+
+  int low, high;
+  VecGetOwnershipRange( P, &low, &high );
+
+  VecAssemblyBegin( P );
+  VecAssemblyEnd( P );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j < nel; j++ ) {
+      PetscInt ix[] = {j+low};
+      ierr = VecGetValues( P, 1, ix, &ctx->p[i][j+1] );
+      CHKERRQ( ierr );
+    }
+  }
+
+  // actual CitcomS operation
+  assemble_grad_p( ctx->E, ctx->p, ctx->Gp, ctx->level );
+
+  VecGetOwnershipRange( GP, &low, &high );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j < neq; j++ ) {
+      ierr = VecSetValue( GP, j+low, ctx->Gp[i][j], INSERT_VALUES );
+      CHKERRQ( ierr );
+    }
+  }
+  VecAssemblyBegin( GP );
+  VecAssemblyEnd( GP );
+
+  PetscFunctionReturn(0);
+}
+
+PetscErrorCode MatShellMult_div_u( Mat D, Vec U, Vec DU )
+{
+  // D is nel x (neq+1)
+  // U is (neq+1) x 1, of which first neq values (0:neq-1) are actual values
+  // DU is nel x 1
+  int i, j, neq, nel;
+  PetscErrorCode ierr;
+  struct MatMultShell_div_u *ctx;
+  MatShellGetContext( D, (void **)&ctx );
+  neq = ctx->neq;
+  nel = ctx->nel;
+
+  int low, high;
+  VecGetOwnershipRange( U, &low, &high );
+
+  VecAssemblyBegin( U );
+  VecAssemblyEnd( U );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j < neq; j++ ) {
+      PetscInt ix[] = {j+low};
+      ierr = VecGetValues( U, 1, ix, &ctx->u[i][j] );
+      CHKERRQ( ierr );
+    }
+  }
+
+  // actual CitcomS operation
+  assemble_div_u( ctx->E, ctx->u, ctx->Du, ctx->level );
+
+  VecGetOwnershipRange( DU, &low, &high );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j < nel; j++ ) {
+      ierr = VecSetValue( DU, j+low, ctx->Du[i][j+1], INSERT_VALUES );
+      CHKERRQ( ierr );
+    }
+  }
+  VecAssemblyBegin( DU );
+  VecAssemblyEnd( DU );
+
+  PetscFunctionReturn(0);
+}
+
+PetscErrorCode MatShellMult_div_rho_u( Mat DC, Vec U, Vec DU )
+{
+  // DC is nel x (neq+1)
+  // U is (neq+1) x 1, of which first neq values (0:neq-1) are actual values
+  // DU is nel x 1
+  int i, j, neq, nel;
+  PetscErrorCode ierr;
+  struct MatMultShell_div_u *ctx;
+  MatShellGetContext( DC, (void **)&ctx );
+  neq = ctx->neq;
+  nel = ctx->nel;
+
+  int low, high;
+  VecGetOwnershipRange( U, &low, &high );
+
+  VecAssemblyBegin( U );
+  VecAssemblyEnd( U );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j < neq; j++ ) {
+      PetscInt ix[] = {j+low};
+      ierr = VecGetValues( U, 1, ix, &ctx->u[i][j] );
+      CHKERRQ( ierr );
+    }
+  }
+
+  // actual CitcomS operation
+  assemble_div_rho_u( ctx->E, ctx->u, ctx->Du, ctx->level );
+
+  VecGetOwnershipRange( DU, &low, &high );
+  for( i = 1; i <= ctx->E->sphere.caps_per_proc; i++ ) {
+    for( j = 0; j < nel; j++ ) {
+      ierr = VecSetValue( DU, j+low, ctx->Du[i][j+1], INSERT_VALUES );
+      CHKERRQ( ierr );
+    }
+  }
+  VecAssemblyBegin( DU );
+  VecAssemblyEnd( DU );
+
+  PetscFunctionReturn(0);
+}
+
diff --git a/lib/global_defs.h b/lib/global_defs.h
index c603ce4..be70358 100644
--- a/lib/global_defs.h
+++ b/lib/global_defs.h
@@ -45,6 +45,7 @@ to functions across the whole filespace of CITCOM.
 #include <stdlib.h>
 #include <mpi.h>
 #include <petscksp.h>
+#include <petscsnes.h>
 
 
 
@@ -716,6 +717,12 @@ struct All_variables {
 #include "viscosity_descriptions.h"
 #include "advection.h"
 
+    /* PETSc related data structures */
+    Mat   K, G, D, DC;
+    KSP   ksp;
+    PC    pc;
+    SNES  snes;
+
     FILE *fp;
     FILE *fptime;
     FILE *fp_out;
diff --git a/lib/petsc_citcoms.h b/lib/petsc_citcoms.h
new file mode 100644
index 0000000..3c6e95c
--- /dev/null
+++ b/lib/petsc_citcoms.h
@@ -0,0 +1,94 @@
+// petsc_citcoms.h
+#ifndef __CitcomS__PETSc__h__
+#define __CitcomS__PETSc__h__
+
+#include <petscksp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+  
+struct MultiGrid_PC
+{
+  PC self;
+  struct All_variables *E;
+  // multigrid stuff
+  double acc;
+  int smooth_up;
+  int smooth_down;
+  int smooth_coarse;
+  int smooth_fine;
+  int max_vel_iterations;
+  int cycle_type;
+  double status;
+  int level;
+  int levmax;
+  PetscBool mg_monitor;
+  int nno;
+  int caps_per_proc;
+  double *V[NCS];
+  double *RR[NCS];
+};
+
+struct MatMultShell_del2_u
+{
+  struct All_variables *E;
+  int level;
+  int neq;
+  int nel;
+  double *u[NCS];
+  double *Ku[NCS];
+};
+
+struct MatMultShell_grad_p
+{
+  struct All_variables *E;
+  int level;
+  int neq;
+  int nel;
+  double *p[NCS];
+  double *Gp[NCS];
+};
+
+struct MatMultShell_div_u
+{
+  struct All_variables *E;
+  int level;
+  int neq;
+  int nel;
+  double *u[NCS];
+  double *Du[NCS];
+};
+
+struct MatMultShell_div_rho_u
+{
+  struct All_variables *E;
+  int level;
+  int neq;
+  int npno;
+  double *u[NCS];
+  double *Du[NCS];
+};
+
+void strip_bcs_from_residual_PETSc( 
+    struct All_variables *E, Vec Res, int level );
+PetscErrorCode initial_vel_residual_PETSc( struct All_variables *E,
+                                 Vec V, Vec P, Vec F,
+                                 double acc );
+double global_v_norm2_PETSc( struct All_variables *E, Vec v );
+double global_p_norm2_PETSc( struct All_variables *E, Vec p );
+double global_div_norm2_PETSc( struct All_variables *E,  Vec a );
+PetscErrorCode assemble_c_u_PETSc( struct All_variables *E, Vec U, Vec res, int level );
+
+PetscErrorCode PC_Apply_MultiGrid( PC pc, Vec x, Vec y );
+
+PetscErrorCode MatShellMult_del2_u( Mat K, Vec U, Vec KU );
+PetscErrorCode MatShellMult_grad_p( Mat G, Vec P, Vec GP );
+PetscErrorCode MatShellMult_div_u( Mat D, Vec U, Vec DU );
+PetscErrorCode MatShellMult_div_rho_u( Mat D, Vec U, Vec DU );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __CitcomS__PETSc__h__



More information about the CIG-COMMITS mailing list