[cig-commits] r16794 - in short/3D/PyLith/trunk: libsrc/faults libsrc/problems modulesrc/faults

brad at geodynamics.org brad at geodynamics.org
Wed May 26 17:25:33 PDT 2010


Author: brad
Date: 2010-05-26 17:25:32 -0700 (Wed, 26 May 2010)
New Revision: 16794

Modified:
   short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.cc
   short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.hh
   short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.cc
   short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.hh
   short/3D/PyLith/trunk/libsrc/problems/Formulation.cc
   short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveDyn.i
   short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveLagrange.i
Log:
Fixed assembly of fault calculations across processors. Must account for cases where fault vertices are on more than one processor and vertices on the fault are on different processors from cohesive cells.

Modified: short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.cc
===================================================================
--- short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.cc	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.cc	2010-05-27 00:25:32 UTC (rev 16794)
@@ -148,6 +148,9 @@
   assert(0 != fields);
   assert(0 != _fields);
 
+  // Initial fault tractions have been assembled, so they do not need
+  // assembling across processors.
+
   FaultCohesiveLagrange::integrateResidual(residual, t, fields);
 
   // No contribution if no initial tractions are specified.
@@ -169,6 +172,10 @@
   const ALE::Obj<RealSection>& slipSection = _fields->get("slip").section();
   assert(!slipSection.isNull());
 
+  const ALE::Obj<RealSection>& assemblyWtSection = 
+    _fields->get("assembly weight").section();
+  assert(!assemblyWtSection.isNull());
+
   const int numVertices = _cohesiveVertices.size();
   for (int iVertex=0; iVertex < numVertices; ++iVertex) {
     const int v_fault = _cohesiveVertices[iVertex].fault;
@@ -186,9 +193,19 @@
     const double* slipVertex = slipSection->restrictPoint(v_fault);
     assert(0 != slipVertex);
     
+    // Get assembly weight for fault vertex.
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    const double* assemblyWtVertex = 
+      assemblyWtSection->restrictPoint(v_fault);
+    assert(0 != assemblyWtVertex);
+
     // only apply initial tractions if there is no opening
     if (0.0 == slipVertex[spaceDim-1]) {
       residualVertex = forcesInitialVertex;
+
+      // Avoid double sum across processors
+      residualVertex *= *assemblyWtVertex;
+
       assert(residualVertex.size() == 
 	     residualSection->getFiberDimension(v_positive));
       residualSection->updateAddPoint(v_positive, &residualVertex[0]);
@@ -651,6 +668,10 @@
   const ALE::Obj<RealSection>& areaSection = _fields->get("area").section();
   assert(!areaSection.isNull());
 
+  const ALE::Obj<RealSection>& assemblyWtSection = 
+    _fields->get("assembly weight").section();
+  assert(!assemblyWtSection.isNull());
+
   double_array orientationVertex(orientationSize);
   const ALE::Obj<RealSection>& orientationSection =
       _fields->get("orientation").section();
@@ -669,6 +690,10 @@
       fields->get("dispIncr(t->t+dt)").section();
   assert(!dispTIncrSection.isNull());
 
+  const ALE::Obj<RealSection>& dispTIncrAdjSection = fields->get(
+    "dispIncr adjust").section();
+  assert(!dispTIncrAdjSection.isNull());
+
   double_array jacobianVertexN(spaceDim);
   double_array jacobianVertexP(spaceDim);
   const ALE::Obj<RealSection>& jacobianSection = jacobian.section();
@@ -742,6 +767,12 @@
     assert(0 != areaVertex);
     assert(1 == areaSection->getFiberDimension(v_fault));
     
+    // Get assembly weight for fault vertex.
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    const double* assemblyWtVertex = 
+      assemblyWtSection->restrictPoint(v_fault);
+    assert(0 != assemblyWtVertex);
+
     // Get fault orientation
     orientationSection->restrictPoint(v_fault, &orientationVertex[0],
 				      orientationVertex.size());
@@ -918,15 +949,20 @@
     _logger->eventBegin(updateEvent);
 #endif
 
+    // Avoid double sum across processors
+    dispTIncrVertexN *= *assemblyWtVertex;
+    dispTIncrVertexP *= *assemblyWtVertex;
+    lagrangeTIncrVertex *= *assemblyWtVertex;
+
     // Adjust displacements to account for Lagrange multiplier values
     // (assumed to be zero in perliminary solve).
     assert(dispTIncrVertexN.size() == 
-	   dispTIncrSection->getFiberDimension(v_negative));
-    dispTIncrSection->updateAddPoint(v_negative, &dispTIncrVertexN[0]);
+	   dispTIncrAdjSection->getFiberDimension(v_negative));
+    dispTIncrAdjSection->updateAddPoint(v_negative, &dispTIncrVertexN[0]);
 
     assert(dispTIncrVertexP.size() == 
-	   dispTIncrSection->getFiberDimension(v_positive));
-    dispTIncrSection->updateAddPoint(v_positive, &dispTIncrVertexP[0]);
+	   dispTIncrAdjSection->getFiberDimension(v_positive));
+    dispTIncrAdjSection->updateAddPoint(v_positive, &dispTIncrVertexP[0]);
 
     // Set Lagrange multiplier value. Value from preliminary solve is
     // bogus due to artificial diagonal entry of 1.0.

Modified: short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.hh
===================================================================
--- short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.hh	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveDyn.hh	2010-05-27 00:25:32 UTC (rev 16794)
@@ -77,10 +77,10 @@
 		  const double normalDir[3]);
 
   /** Integrate contributions to residual term (r) for operator that
-   * require assembly processors.
+   * do not require assembly across processors.
    *
-   * Initial tractions (if specified) contribute to the residual like
-   * Neumann boundary conditions.
+   * Initial tractions (if specified) are already assembled and
+   * contribute to the residual like Neumann boundary conditions.
    *
    * @param residual Field containing values for residual
    * @param t Current time

Modified: short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.cc
===================================================================
--- short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.cc	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.cc	2010-05-27 00:25:32 UTC (rev 16794)
@@ -212,6 +212,10 @@
   const ALE::Obj<RealSection>& slipSection = slip.section();
   assert(!slipSection.isNull());
 
+  const ALE::Obj<RealSection>& assemblyWtSection = 
+    _fields->get("assembly weight").section();
+  assert(!assemblyWtSection.isNull());
+
   const ALE::Obj<RealSection>& residualSection = residual.section();
   assert(!residualSection.isNull());
 
@@ -246,6 +250,12 @@
     // Get slip at fault vertex.
     slipSection->restrictPoint(v_fault, &slipVertex[0], slipVertex.size());
 
+    // Get assembly weight for fault vertex.
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    const double* assemblyWtVertex = 
+      assemblyWtSection->restrictPoint(v_fault);
+    assert(0 != assemblyWtVertex);
+
     // Get orientations at fault vertex.
     orientationSection->restrictPoint(v_fault, &orientationVertex[0],
 				      orientationVertex.size());
@@ -300,6 +310,12 @@
     _logger->eventBegin(updateEvent);
 #endif
 
+    // Weight assembly contributions to avoid double counting across
+    // processors
+    residualVertexN *= *assemblyWtVertex;
+    residualVertexP *= *assemblyWtVertex;
+    residualVertexL *= *assemblyWtVertex;
+
     assert(residualVertexN.size() == 
 	   residualSection->getFiberDimension(v_negative));
     residualSection->updateAddPoint(v_negative, &residualVertexN[0]);
@@ -367,6 +383,10 @@
   const ALE::Obj<RealSection>& solutionSection = fields->solution().section();
   assert(!solutionSection.isNull());
 
+  const ALE::Obj<RealSection>& assemblyWtSection = 
+    _fields->get("assembly weight").section();
+  assert(!assemblyWtSection.isNull());
+
   const ALE::Obj<RealSection>& orientationSection =
       _fields->get("orientation").section();
   assert(!orientationSection.isNull());
@@ -400,6 +420,12 @@
     // Get orientations at fault cell's vertices.
     orientationSection->restrictPoint(v_fault, &orientationVertex[0], orientationVertex.size());
 
+    // Get assembly weight for fault vertex.
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    const double* assemblyWtVertex = 
+      assemblyWtSection->restrictPoint(v_fault);
+    assert(0 != assemblyWtVertex);
+
     // Set global order indices
     indicesL = indicesRel + globalOrder->getIndex(v_lagrange);
     indicesN = indicesRel + globalOrder->getIndex(v_negative);
@@ -413,6 +439,7 @@
     // Values associated with [C]
     // Values at positive vertex, entry L,P in Jacobian
     jacobianVertex = orientationVertex;
+    jacobianVertex *= *assemblyWtVertex; // avoid double sum across processors
     MatSetValues(jacobianMatrix,
                  indicesL.size(), &indicesL[0],
                  indicesP.size(), &indicesP[0],
@@ -429,9 +456,14 @@
 
     // Values associated with [C]^T
     // Transpose orientation matrix
+    jacobianVertex = 0.0;
     for (int iDim=0; iDim < spaceDim; ++iDim)
       for (int jDim=0; jDim < spaceDim; ++jDim)
-        jacobianVertex[iDim*spaceDim+jDim] = orientationVertex[jDim*spaceDim+iDim];
+        jacobianVertex[iDim*spaceDim+jDim] = 
+	  orientationVertex[jDim*spaceDim+iDim];
+
+    jacobianVertex *= *assemblyWtVertex; // avoid double sum across processors
+
     // Values at positive vertex, entry P,L in Jacobian
     MatSetValues(jacobianMatrix,
                  indicesP.size(), &indicesP[0],
@@ -472,7 +504,7 @@
 
 // ----------------------------------------------------------------------
 // Compute Jacobian matrix (A) associated with operator that do not
-// require assembly across cells, vertices, or processors.
+// require assembly across processors.
 void
 pylith::faults::FaultCohesiveLagrange::integrateJacobian(
 	                  topology::Field<topology::Mesh>* jacobian,
@@ -1028,6 +1060,10 @@
   const ALE::Obj<RealSection>& slipSection = _fields->get("slip").section();
   assert(!slipSection.isNull());
 
+  const ALE::Obj<RealSection>& assemblyWtSection = 
+    _fields->get("assembly weight").section();
+  assert(!assemblyWtSection.isNull());
+
   double_array jacobianVertexN(spaceDim);
   double_array jacobianVertexP(spaceDim);
   const ALE::Obj<RealSection>& jacobianSection = jacobian.section();
@@ -1037,17 +1073,24 @@
   double_array residualVertexP(spaceDim);
   const ALE::Obj<RealSection>& residualSection =
       fields->get("residual").section();
+  assert(!residualSection.isNull());
 
   double_array dispTVertexN(spaceDim);
   double_array dispTVertexP(spaceDim);
   const ALE::Obj<RealSection>& dispTSection = fields->get("disp(t)").section();
+  assert(!dispTSection.isNull());
 
   double_array dispTIncrVertexN(spaceDim);
   double_array dispTIncrVertexP(spaceDim);
-  double_array lagrangeIncrVertex(spaceDim);
+  double_array lagrangeTIncrVertex(spaceDim);
   const ALE::Obj<RealSection>& dispTIncrSection = fields->get(
     "dispIncr(t->t+dt)").section();
+  assert(!dispTIncrSection.isNull());
 
+  const ALE::Obj<RealSection>& dispTIncrAdjSection = fields->get(
+    "dispIncr adjust").section();
+  assert(!dispTIncrAdjSection.isNull());
+
   adjustSolnLumped_fn_type adjustSolnLumpedFn;
   switch (spaceDim) { // switch
   case 1:
@@ -1092,6 +1135,12 @@
     // Get slip at fault cell's vertices.
     slipSection->restrictPoint(v_fault, &slipVertex[0], slipVertex.size());
 
+    // Get assembly weight for fault vertex.
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    const double* assemblyWtVertex = 
+      assemblyWtSection->restrictPoint(v_fault);
+    assert(0 != assemblyWtVertex);
+
     // Get residual at cohesive cell's vertices.
     residualSection->restrictPoint(v_negative, &residualVertexN[0],
 			   residualVertexN.size());
@@ -1116,7 +1165,7 @@
 #endif
 
     CALL_MEMBER_FN(*this, 
-		   adjustSolnLumpedFn)(&lagrangeIncrVertex, 
+		   adjustSolnLumpedFn)(&lagrangeTIncrVertex, 
 				       &dispTIncrVertexN, &dispTIncrVertexP,
 				       slipVertex, orientationVertex, 
 				       dispTVertexN, dispTVertexP,
@@ -1128,21 +1177,26 @@
     _logger->eventBegin(updateEvent);
 #endif
 
+    // Avoid double sum across processors
+    dispTIncrVertexN *= *assemblyWtVertex;
+    dispTIncrVertexP *= *assemblyWtVertex;
+    lagrangeTIncrVertex *= *assemblyWtVertex;
+
     // Adjust displacements to account for Lagrange multiplier values
-    // (assumed to be zero in perliminary solve).
+    // (assumed to be zero in preliminary solve).
     assert(dispTIncrVertexN.size() == 
-	   dispTIncrSection->getFiberDimension(v_negative));
-    dispTIncrSection->updateAddPoint(v_negative, &dispTIncrVertexN[0]);
+	   dispTIncrAdjSection->getFiberDimension(v_negative));
+    dispTIncrAdjSection->updateAddPoint(v_negative, &dispTIncrVertexN[0]);
 
     assert(dispTIncrVertexP.size() == 
-	   dispTIncrSection->getFiberDimension(v_positive));
-    dispTIncrSection->updateAddPoint(v_positive, &dispTIncrVertexP[0]);
+	   dispTIncrAdjSection->getFiberDimension(v_positive));
+    dispTIncrAdjSection->updateAddPoint(v_positive, &dispTIncrVertexP[0]);
 
     // Set Lagrange multiplier value. Value from preliminary solve is
     // bogus due to artificial diagonal entry of 1.0.
-    assert(lagrangeIncrVertex.size() == 
+    assert(lagrangeTIncrVertex.size() == 
 	   dispTIncrSection->getFiberDimension(v_lagrange));
-    dispTIncrSection->updatePoint(v_lagrange, &lagrangeIncrVertex[0]);
+    dispTIncrSection->updatePoint(v_lagrange, &lagrangeTIncrVertex[0]);
 
 #if defined(DETAILED_EVENT_LOGGING)
     _logger->eventEnd(updateEvent);
@@ -1581,7 +1635,6 @@
 
   // Allocate area field.
   _fields->add("area", "area");
-
   topology::Field<topology::SubMesh>& area = _fields->get("area");
   const topology::Field<topology::SubMesh>& slip = _fields->get("slip");
   area.newSection(slip, 1);
@@ -1591,6 +1644,14 @@
   assert(!areaSection.isNull());
   topology::Mesh::UpdateAddVisitor areaVisitor(*areaSection, &areaCell[0]);
 
+  // Allocate assembly weight fild
+  _fields->add("assembly weight", "assembly_weight");
+  topology::Field<topology::SubMesh>& assemblyWt = 
+    _fields->get("assembly weight");
+  assemblyWt.cloneSection(area);
+  const ALE::Obj<RealSection>& assemblyWtSection = assemblyWt.section();
+  assert(!assemblyWtSection.isNull());
+
   double_array coordinatesCell(numBasis * spaceDim);
   const ALE::Obj<RealSection>& coordinates = faultSieveMesh->getRealSection(
     "coordinates");
@@ -1635,10 +1696,34 @@
 
     PetscLogFlops( numQuadPts*(1+numBasis*2) );
   } // for
+  // Use area / areaTotal as assembly weight
+  // Copy area to assembly weight.
+  assemblyWt.copy(area);
 
   // Assemble area information
   area.complete();
 
+  // Compute assembly weight as area / areaTotal;
+  const int numVertices = _cohesiveVertices.size();
+  for (int iVertex=0; iVertex < numVertices; ++iVertex) {
+    const int v_fault = _cohesiveVertices[iVertex].fault;
+
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    const double* areaLocal = assemblyWtSection->restrictPoint(v_fault);
+    assert(0 != areaLocal);
+
+    assert(1 == areaSection->getFiberDimension(v_fault));
+    const double* areaTotal = areaSection->restrictPoint(v_fault);
+    assert(0 != areaTotal);
+
+    const double wt = (*areaLocal) / (*areaTotal);
+
+    assert(1 == assemblyWtSection->getFiberDimension(v_fault));
+    assemblyWtSection->updatePoint(v_fault, &wt);
+  } // for
+
+  assemblyWtSection->view("ASSEMBLY WEIGHT");
+
 #if 0 // DEBUGGING
   area.view("AREA");
   //_faultMesh->getSendOverlap()->view("Send fault overlap");
@@ -1765,7 +1850,7 @@
 // Adjust solution in lumped formulation to match slip for 1-D.
 void
 pylith::faults::FaultCohesiveLagrange::_adjustSolnLumped1D(
-					  double_array* lagrangeIncr,
+					  double_array* lagrangeTIncr,
 					  double_array* dispTIncrN,
 					  double_array* dispTIncrP,
 					  const double_array& slip,
@@ -1777,7 +1862,7 @@
 					  const double_array& jacobianN,
 					  const double_array& jacobianP)
 { // _adjustSoln1D
-  assert(0 != lagrangeIncr);
+  assert(0 != lagrangeTIncr);
   assert(0 != dispTIncrN);
   assert(0 != dispTIncrP);
 
@@ -1803,7 +1888,7 @@
   (*dispTIncrP)[0] = -1.0 / jacobianP[0] * dlp;
   
   // Update Lagrange multiplier.
-  (*lagrangeIncr)[0] = dlp;
+  (*lagrangeTIncr)[0] = dlp;
 
   PetscLogFlops(17);
 } // _adjustSoln1D
@@ -1812,7 +1897,7 @@
 // Adjust solution in lumped formulation to match slip for 2-D.
 void
 pylith::faults::FaultCohesiveLagrange::_adjustSolnLumped2D(
-					  double_array* lagrangeIncr,
+					  double_array* lagrangeTIncr,
 					  double_array* dispTIncrN,
 					  double_array* dispTIncrP,
 					  const double_array& slip,
@@ -1824,7 +1909,7 @@
 					  const double_array& jacobianN,
 					  const double_array& jacobianP)
 { // _adjustSoln2D
-  assert(0 != lagrangeIncr);
+  assert(0 != lagrangeTIncr);
   assert(0 != dispTIncrN);
   assert(0 != dispTIncrP);
 
@@ -1874,8 +1959,8 @@
   (*dispTIncrP)[1] = -dly / jacobianP[0];
   
   // Update Lagrange multiplier.
-  (*lagrangeIncr)[0] = dlp;
-  (*lagrangeIncr)[1] = dlq;
+  (*lagrangeTIncr)[0] = dlp;
+  (*lagrangeTIncr)[1] = dlq;
 
     PetscLogFlops(41);
 } // _adjustSoln2D
@@ -1884,7 +1969,7 @@
 // Adjust solution in lumped formulation to match slip for 3-D.
 void
 pylith::faults::FaultCohesiveLagrange::_adjustSolnLumped3D(
-					  double_array* lagrangeIncr,
+					  double_array* lagrangeTIncr,
 					  double_array* dispTIncrN,
 					  double_array* dispTIncrP,
 					  const double_array& slip,
@@ -1896,7 +1981,7 @@
 					  const double_array& jacobianN,
 					  const double_array& jacobianP)
 { // _adjustSoln3D
-  assert(0 != lagrangeIncr);
+  assert(0 != lagrangeTIncr);
   assert(0 != dispTIncrN);
   assert(0 != dispTIncrP);
 
@@ -1964,9 +2049,9 @@
   (*dispTIncrP)[2] = -dlz / jacobianP[2];
 
   // Update Lagrange multiplier.
-  (*lagrangeIncr)[0] = dlp;
-  (*lagrangeIncr)[1] = dlq;
-  (*lagrangeIncr)[2] = dlr;
+  (*lagrangeTIncr)[0] = dlp;
+  (*lagrangeTIncr)[1] = dlq;
+  (*lagrangeTIncr)[2] = dlr;
 
   PetscLogFlops(72);
 } // _adjustSoln3D

Modified: short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.hh
===================================================================
--- short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.hh	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/libsrc/faults/FaultCohesiveLagrange.hh	2010-05-27 00:25:32 UTC (rev 16794)
@@ -125,8 +125,8 @@
    */
   virtual
   void integrateResidual(const topology::Field<topology::Mesh>& residual,
-			 const double t,
-			 topology::SolutionFields* const fields);
+				  const double t,
+				  topology::SolutionFields* const fields);
 
   /** Integrate contributions to Jacobian matrix (A) associated with
    * operator that require assembly across processors.
@@ -137,8 +137,8 @@
    */
   virtual
   void integrateJacobian(topology::Jacobian* jacobian,
-			 const double t,
-			 topology::SolutionFields* const fields);
+				  const double t,
+				  topology::SolutionFields* const fields);
 
   /** Integrate contributions to Jacobian matrix (A) associated with
    * operator that require assembly processors.
@@ -149,8 +149,8 @@
    */
   virtual
   void integrateJacobian(topology::Field<topology::Mesh>* jacobian,
-			 const double t,
-			 topology::SolutionFields* const fields);
+				  const double t,
+				  topology::SolutionFields* const fields);
 
   /** Compute custom fault precoditioner using Schur complement.
    *

Modified: short/3D/PyLith/trunk/libsrc/problems/Formulation.cc
===================================================================
--- short/3D/PyLith/trunk/libsrc/problems/Formulation.cc	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/libsrc/problems/Formulation.cc	2010-05-27 00:25:32 UTC (rev 16794)
@@ -458,6 +458,16 @@
 void
 pylith::problems::Formulation::adjustSolnLumped(void)
 { // adjustSolnLumped
+  topology::Field<topology::Mesh>& solution = _fields->solution();
+
+  if (!_fields->hasField("dispIncr adjust")) {
+    _fields->add("dispIncr adjust", "dispIncr_adjust");
+    topology::Field<topology::Mesh>& adjust = _fields->get("dispIncr adjust");
+    adjust.cloneSection(solution);
+  } // for
+  topology::Field<topology::Mesh>& adjust = _fields->get("dispIncr adjust");
+  adjust.zero();
+
   int numIntegrators = _meshIntegrators.size();
   for (int i=0; i < numIntegrators; ++i)
     _meshIntegrators[i]->adjustSolnLumped(_fields, *_jacobianLumped);
@@ -465,6 +475,9 @@
   numIntegrators = _submeshIntegrators.size();
   for (int i=0; i < numIntegrators; ++i)
     _submeshIntegrators[i]->adjustSolnLumped(_fields, *_jacobianLumped);
+
+  adjust.complete();
+  solution += adjust;
 } // adjustSolnLumped
 
 

Modified: short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveDyn.i
===================================================================
--- short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveDyn.i	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveDyn.i	2010-05-27 00:25:32 UTC (rev 16794)
@@ -62,6 +62,21 @@
 		      const double upDir[3],
 		      const double normalDir[3]);
       
+      /** Integrate contributions to residual term (r) for operator that
+       * do not require assembly across processors.
+       *
+       * Initial tractions (if specified) are already assembled and
+       * contribute to the residual like Neumann boundary conditions.
+       *
+       * @param residual Field containing values for residual
+       * @param t Current time
+       * @param fields Solution fields
+       */
+      virtual
+      void integrateResidual(const pylith::topology::Field<pylith::topology::Mesh>& residual,
+				      const double t,
+				      pylith::topology::SolutionFields* const fields);
+
       /** Update state variables as needed.
        *
        * @param t Current time

Modified: short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveLagrange.i
===================================================================
--- short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveLagrange.i	2010-05-27 00:16:20 UTC (rev 16793)
+++ short/3D/PyLith/trunk/modulesrc/faults/FaultCohesiveLagrange.i	2010-05-27 00:25:32 UTC (rev 16794)
@@ -58,41 +58,39 @@
       void splitField(pylith::topology::Field<pylith::topology::Mesh>* field);
 
       /** Integrate contributions to residual term (r) for operator that
-       * do not require assembly across cells, vertices, or processors.
+       * do not require assembly across processors.
        *
        * @param residual Field containing values for residual
        * @param t Current time
        * @param fields Solution fields
        */
       virtual
-      void integrateResidualAssembled(const pylith::topology::Field<pylith::topology::Mesh>& residual,
-				      const double t,
-				      pylith::topology::SolutionFields* const fields);
+      void integrateResidual(const pylith::topology::Field<pylith::topology::Mesh>& residual,
+			     const double t,
+			     pylith::topology::SolutionFields* const fields);
 
       /** Integrate contributions to Jacobian matrix (A) associated with
-       * operator that do not require assembly across cells, vertices, or
-       * processors.
+       * operator that do not require assembly processors.
        *
        * @param jacobian Sparse matrix
        * @param t Current time
        * @param fields Solution fields
        * @param mesh Finite-element mesh
        */
-      void integrateJacobianAssembled(pylith::topology::Jacobian* jacobian,
-				      const double t,
-				      pylith::topology::SolutionFields* const fields);
+      void integrateJacobian(pylith::topology::Jacobian* jacobian,
+			     const double t,
+			     pylith::topology::SolutionFields* const fields);
       
       /** Integrate contributions to Jacobian matrix (A) associated with
-       * operator that do not require assembly across cells, vertices, or
-       * processors.
+       * operator that do not require assembly across processors.
        *
        * @param jacobian Diagonal Jacobian matrix as a field.
        * @param t Current time
        * @param fields Solution fields
        */
-      void integrateJacobianAssembled(pylith::topology::Field<pylith::topology::Mesh>* jacobian,
-				      const double t,
-				      pylith::topology::SolutionFields* const fields);
+      void integrateJacobian(pylith::topology::Field<pylith::topology::Mesh>* jacobian,
+			     const double t,
+			     pylith::topology::SolutionFields* const fields);
 
       /** Adjust solution from solver with lumped Jacobian to match Lagrange
        *  multiplier constraints.



More information about the CIG-COMMITS mailing list