[cig-commits] r4908 - in long/3D/Gale/trunk/src/StgFEM: . Discretisation/src

walter at geodynamics.org walter at geodynamics.org
Wed Oct 11 13:50:40 PDT 2006


Author: walter
Date: 2006-10-11 13:50:39 -0700 (Wed, 11 Oct 2006)
New Revision: 4908

Modified:
   long/3D/Gale/trunk/src/StgFEM/
   long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.c
   long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.h
Log:
 r794 at earth:  boo | 2006-10-11 13:49:38 -0700
  r768 at earth (orig r647):  LukeHodkinson | 2006-10-04 18:26:01 -0700
  Added a routine for calculating equation numbers based
  on the new mesh topology structures.
  
 



Property changes on: long/3D/Gale/trunk/src/StgFEM
___________________________________________________________________
Name: svk:merge
   - 38867592-cf10-0410-9e16-a142ea72ac34:/cig:793
db209038-57f2-0310-97fa-b160e0ae9d04:/trunk:646
   + 38867592-cf10-0410-9e16-a142ea72ac34:/cig:794
db209038-57f2-0310-97fa-b160e0ae9d04:/trunk:647

Modified: long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.c
===================================================================
--- long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.c	2006-10-11 20:50:37 UTC (rev 4907)
+++ long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.c	2006-10-11 20:50:39 UTC (rev 4908)
@@ -62,7 +62,7 @@
 const Type FeEquationNumber_Type = "FeEquationNumber";
 
 /** struct to store sub-totals: what is the equation number up to at a given
-	node? These can then be exchanged between processors */
+    node? These can then be exchanged between processors */
 typedef struct CritPointInfo {
 	Node_GlobalIndex	index;
 	Dof_EquationNumber	eqNum;
@@ -70,7 +70,7 @@
 
 
 /** An element of linked list of critical point info. Several of the functions
-	use this to keep track of key points */
+    use this to keep track of key points */
 typedef struct AddListEntry {
 	CritPointInfo*		critPointInfo;
 	struct AddListEntry*	next;
@@ -83,8 +83,8 @@
 } PrintValuesFlag;
 
 /**	MPI datatype handle for efficiently exchanging CritPointInfo structures.
-	\see FeEquationNumber_Create_CritPointInfo_MPI_Datatype() for where this
-	handle is defined. */
+   \see FeEquationNumber_Create_CritPointInfo_MPI_Datatype() for where this
+   handle is defined. */
 MPI_Datatype MPI_critPointInfoType;
 
 /*###### Private Function Declarations ######*/
@@ -92,18 +92,18 @@
 static void _FeEquationNumber_BuildRemappedNodeInfoTable( void* feEquationNumber );
 
 static void _FeEquationNumber_CalculateDomainKnownCriticalPoints( FeEquationNumber* self, Node_DomainIndex nodeDomainCount,
-	CritPointInfo* mySetEnds, Index* const mySetEndsTotal,
-	Node_GlobalIndex* myWantedCriticalPoints, Index* const myWantedCriticalPointsTotal );
+								  CritPointInfo* mySetEnds, Index* const mySetEndsTotal,
+								  Node_GlobalIndex* myWantedCriticalPoints, Index* const myWantedCriticalPointsTotal );
 	
 static void _FeEquationNumber_HandleNode( FeEquationNumber* self, const Node_DomainIndex dNode_I,
-	Dof_EquationNumber* const currEqNum );
+					  Dof_EquationNumber* const currEqNum );
 	
 static void _FeEquationNumber_PostProcessLinkedDofs( FeEquationNumber* self );
 
 static void _FeEquationNumber_CalculateCritPointsIHave( FeEquationNumber* self,
-	Node_GlobalIndex** const myWantedCriticalPointsPtr, Node_GlobalIndex myWantedCriticalPointsTotal,
-	CritPointInfo** const critPointsIHave, Index* const critPointsIHaveTotal,
-	CritPointInfo* const critPointsToSend, Index* const critPointsToSendTotal );
+							Node_GlobalIndex** const myWantedCriticalPointsPtr, Node_GlobalIndex myWantedCriticalPointsTotal,
+							CritPointInfo** const critPointsIHave, Index* const critPointsIHaveTotal,
+							CritPointInfo* const critPointsToSend, Index* const critPointsToSendTotal );
 	
 static void _FeEquationNumber_ShareCriticalPoints(
 	FeEquationNumber* self,
@@ -123,12 +123,12 @@
 	PrintValuesFlag printValuesFlag );
 
 static void _FeEquationNumber_DoPartialTotals( FeEquationNumber* self,
-	CritPointInfo* const critPointsIHave, Index critPointsIHaveTotal,
-	CritPointInfo* const critPointsToSend, Index critPointsToSendTotal );
+					       CritPointInfo* const critPointsIHave, Index critPointsIHaveTotal,
+					       CritPointInfo* const critPointsToSend, Index critPointsToSendTotal );
 
 static void _FeEquationNumber_AddAllPartialTotals( FeEquationNumber* self, CritPointInfo* mySubTotals,
-	Index myCritPointInfoTotal, CritPointInfo* allSubTotals, Index* procCritPointInfoTotals,
-	Index maxSubTotalsPerProc );
+						   Index myCritPointInfoTotal, CritPointInfo* allSubTotals, Index* procCritPointInfoTotals,
+						   Index maxSubTotalsPerProc );
 
 Node_RemappedGlobalIndex _FeEquationNumber_RemapNode( 
 	HexaMD* hexaMD,
@@ -136,7 +136,7 @@
 	Node_GlobalIndex gNode_I );
 
 /** Tests if the critical point from another processor is held by ours.
-Is complicated by the possibility of remapping. */
+    Is complicated by the possibility of remapping. */
 static Bool _FeEquationNumber_IHaveCritPoint(
 	FeEquationNumber* self,
 	Node_RemappedGlobalIndex critPoint );
@@ -147,37 +147,37 @@
 void* FeEquationNumber_DefaultNew( Name name )
 {
 	return _FeEquationNumber_New( sizeof(FeEquationNumber), FeEquationNumber_Type, _FeEquationNumber_Delete,
-		_FeEquationNumber_Print, _FeEquationNumber_Copy,
-		(Stg_Component_DefaultConstructorFunction*)FeEquationNumber_DefaultNew,
-		_FeEquationNumber_Construct, (Stg_Component_BuildFunction*)_FeEquationNumber_Build, 
-		(Stg_Component_InitialiseFunction*)_FeEquationNumber_Initialise,
-		_FeEquationNumber_Execute, _FeEquationNumber_Destroy, name, False, NULL, NULL, NULL, NULL );
+				      _FeEquationNumber_Print, _FeEquationNumber_Copy,
+				      (Stg_Component_DefaultConstructorFunction*)FeEquationNumber_DefaultNew,
+				      _FeEquationNumber_Construct, (Stg_Component_BuildFunction*)_FeEquationNumber_Build, 
+				      (Stg_Component_InitialiseFunction*)_FeEquationNumber_Initialise,
+				      _FeEquationNumber_Execute, _FeEquationNumber_Destroy, name, False, NULL, NULL, NULL, NULL );
 }
 
 FeEquationNumber* FeEquationNumber_New(
-		Name						name,
-		void* 						mesh,
-		DofLayout*					dofLayout,
-		VariableCondition*				bcs,
-		LinkedDofInfo*					linkedDofInfo )
+	Name						name,
+	void* 						mesh,
+	DofLayout*					dofLayout,
+	VariableCondition*				bcs,
+	LinkedDofInfo*					linkedDofInfo )
 {
 	return _FeEquationNumber_New( sizeof(FeEquationNumber), FeEquationNumber_Type, _FeEquationNumber_Delete,
-		_FeEquationNumber_Print, _FeEquationNumber_Copy, 
-		(Stg_Component_DefaultConstructorFunction*)FeEquationNumber_DefaultNew,
-		_FeEquationNumber_Construct, (Stg_Component_BuildFunction*)_FeEquationNumber_Build, 
-		(Stg_Component_InitialiseFunction*)_FeEquationNumber_Initialise,
-		_FeEquationNumber_Execute, _FeEquationNumber_Destroy, name, True, mesh, dofLayout, bcs, linkedDofInfo );
+				      _FeEquationNumber_Print, _FeEquationNumber_Copy, 
+				      (Stg_Component_DefaultConstructorFunction*)FeEquationNumber_DefaultNew,
+				      _FeEquationNumber_Construct, (Stg_Component_BuildFunction*)_FeEquationNumber_Build, 
+				      (Stg_Component_InitialiseFunction*)_FeEquationNumber_Initialise,
+				      _FeEquationNumber_Execute, _FeEquationNumber_Destroy, name, True, mesh, dofLayout, bcs, linkedDofInfo );
 }
 
 
 /** Constructor for when the FeEquationNumber is already allocated. */
 void FeEquationNumber_Init(
-		FeEquationNumber*				self, 
-		Name						name,
-		void* 						mesh,
-		DofLayout*					dofLayout,
-		VariableCondition*				bcs,
-		LinkedDofInfo*					linkedDofInfo )
+	FeEquationNumber*				self, 
+	Name						name,
+	void* 						mesh,
+	DofLayout*					dofLayout,
+	VariableCondition*				bcs,
+	LinkedDofInfo*					linkedDofInfo )
 {
 	/* General info */
 	self->type = FeEquationNumber_Type;
@@ -206,30 +206,30 @@
 
 /** Constructor implementation. */
 FeEquationNumber* _FeEquationNumber_New(
-		SizeT						_sizeOfSelf, 
-		Type						type,
-		Stg_Class_DeleteFunction*				_delete,
-		Stg_Class_PrintFunction*				_print, 
-		Stg_Class_CopyFunction*				_copy, 
-		Stg_Component_DefaultConstructorFunction*	_defaultConstructor,
-		Stg_Component_ConstructFunction*			_construct,
-		Stg_Component_BuildFunction*		_build,
-		Stg_Component_InitialiseFunction*		_initialise,
-		Stg_Component_ExecuteFunction*		_execute,
-		Stg_Component_DestroyFunction*		_destroy,
-		Name							name,
-		Bool						initFlag,
-		void*						mesh,
-		DofLayout*					dofLayout,
-		VariableCondition*				bcs,
-		LinkedDofInfo*					linkedDofInfo )
+	SizeT						_sizeOfSelf, 
+	Type						type,
+	Stg_Class_DeleteFunction*				_delete,
+	Stg_Class_PrintFunction*				_print, 
+	Stg_Class_CopyFunction*				_copy, 
+	Stg_Component_DefaultConstructorFunction*	_defaultConstructor,
+	Stg_Component_ConstructFunction*			_construct,
+	Stg_Component_BuildFunction*		_build,
+	Stg_Component_InitialiseFunction*		_initialise,
+	Stg_Component_ExecuteFunction*		_execute,
+	Stg_Component_DestroyFunction*		_destroy,
+	Name							name,
+	Bool						initFlag,
+	void*						mesh,
+	DofLayout*					dofLayout,
+	VariableCondition*				bcs,
+	LinkedDofInfo*					linkedDofInfo )
 {
 	FeEquationNumber* self;
 	
 	/* Allocate memory */
 	assert( _sizeOfSelf >= sizeof(FeEquationNumber) );
 	self = (FeEquationNumber*)_Stg_Component_New( _sizeOfSelf, type, _delete, _print, _copy, _defaultConstructor, _construct, _build, 
-			_initialise, _execute, _destroy, name, NON_GLOBAL );
+						      _initialise, _execute, _destroy, name, NON_GLOBAL );
 	
 	/* General info */
 	
@@ -247,13 +247,13 @@
 
 
 /** Constructor variables initialisation. Doesn't allocate any
-	memory, just saves arguments and sets counters to 0. */
+    memory, just saves arguments and sets counters to 0. */
 void _FeEquationNumber_Init(
-		FeEquationNumber*				self, 
-		void*						feMesh,
-		DofLayout*					dofLayout,
-		VariableCondition*				bcs,
-		LinkedDofInfo*					linkedDofInfo )
+	FeEquationNumber*				self, 
+	void*						feMesh,
+	DofLayout*					dofLayout,
+	VariableCondition*				bcs,
+	LinkedDofInfo*					linkedDofInfo )
 {
 	
 	/* General and Virtual info should already be set */
@@ -291,7 +291,7 @@
 	
 }
 	
-	/* Copy */
+/* Copy */
 
 /** Stg_Class_Delete implementation. */
 void _FeEquationNumber_Delete( void* feEquationNumber ) {
@@ -482,23 +482,29 @@
 
 void _FeEquationNumber_Build( void* feEquationNumber ) {
 	FeEquationNumber* self = (FeEquationNumber*) feEquationNumber;
-	#if DEBUG
+#if DEBUG
 	assert(self);
-	#endif
+#endif
 	
 	Journal_DPrintf( self->debug, "In %s:\n",  __func__ );
 	Stream_IndentBranch( StG_FEM_Debug );
 
-	_FeEquationNumber_BuildRemappedNodeInfoTable( self );
-	_FeEquationNumber_BuildDestinationArray( self );
-	_FeEquationNumber_CalculateGlobalUnconstrainedDofTotal( self );
-	_FeEquationNumber_CalculateEqNumsDecomposition( self );
+	/* If we have new mesh topology information, do this differently. */
+	if( self->feMesh->topo->domains && self->feMesh->topo->domains[MT_VERTEX] ) {
+		FeEquationNumber_BuildWithTopology( self );
+	}
+	else {
+		_FeEquationNumber_BuildRemappedNodeInfoTable( self );
+		_FeEquationNumber_BuildDestinationArray( self );
+		_FeEquationNumber_CalculateGlobalUnconstrainedDofTotal( self );
+		_FeEquationNumber_CalculateEqNumsDecomposition( self );
+	}
 
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 3 ) ) {
 		FeEquationNumber_PrintDestinationArray( self, self->debug );
 	}
-	#endif
+#endif
 	
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }
@@ -513,10 +519,10 @@
 
 /** Initialise implementation. Currently does nothing. */
 void _FeEquationNumber_Initialise( void* feEquationNumber ) {
-	#if DEBUG
+#if DEBUG
 	FeEquationNumber* self = (FeEquationNumber*) feEquationNumber;
 	assert(self);
-	#endif
+#endif
 }
 
 
@@ -553,15 +559,15 @@
 	Node_DomainIndex nodeDomainCount = meshDecomp->nodeDomainCount;
 	Node_GlobalIndex gNode_I = 0;
 	Node_DomainIndex dNode_I = 0;
-	#if DEBUG
+#if DEBUG
 	RemappedNodeInfo_Index rNodeInfo_I;
-	#endif
+#endif
 	
 	Journal_DPrintfL( self->debug, 1, "In %s:\n",  __func__ );
 	Stream_IndentBranch( StG_FEM_Debug );
 
 	self->remappedNodeInfos = Memory_Alloc_Array( RemappedNodeInfo, nodeDomainCount, 
-		"FeEquationNumber->remappedNodeInfos" );
+						      "FeEquationNumber->remappedNodeInfos" );
 	
 	if ( 1 == meshDecomp->procsInUse ) {
 		Journal_DPrintfL( self->debug, 1, "Serial code: No remapping required...filling remapping table with normal values.\n" );
@@ -576,7 +582,7 @@
 		RemappedNodeInfo_Index	insertIndex;
 	
 		Journal_DPrintfL( self->debug, 1, "Parallel, but non-hexa decomp: building the remapping table by "
-			"insertion-sorting based on global node number order.\n" );
+				  "insertion-sorting based on global node number order.\n" );
 		for (dNode_I = 0; dNode_I < nodeDomainCount; dNode_I++ ) {
 			gNode_I = mesh->nodeD2G[dNode_I];
 
@@ -590,7 +596,7 @@
 				insertIndex = 0;
 				while ( gNode_I > self->remappedNodeInfos[insertIndex].remappedGlobal ) insertIndex++;
 				memmove( &self->remappedNodeInfos[insertIndex+1], &self->remappedNodeInfos[insertIndex],
-					sizeof(RemappedNodeInfo) * (dNode_I - insertIndex + 1) );
+					 sizeof(RemappedNodeInfo) * (dNode_I - insertIndex + 1) );
 				self->remappedNodeInfos[insertIndex].remappedGlobal = gNode_I;
 				self->remappedNodeInfos[insertIndex].domain = dNode_I;
 			}
@@ -604,8 +610,8 @@
 		RemappedNodeInfo_Index	insertIndex = 0;
 	
 		Journal_DPrintfL( self->debug, 1, "Parallel, hexa decomp but multiple partition dims:\n"
-			"filling remapping table with insertion sort\n"
-			"of local & shadow elements.\n" );
+				  "filling remapping table with insertion sort\n"
+				  "of local & shadow elements.\n" );
 		
 		if ( meshDecomp->shadowDepth > 0 ) {
 			nextShadowGlobalMap = mesh->nodeD2G[sNode_I];
@@ -639,11 +645,11 @@
 			self->remappedNodeInfos[insertIndex++].domain = sNode_I;
 		}
 		Journal_Firewall( (insertIndex == mesh->nodeDomainCount), self->debug,
-			"Stuffup: should have inserted exactly the right number of values by here.\n" );
+				  "Stuffup: should have inserted exactly the right number of values by here.\n" );
 	}
 	else {
 		/* Otherwise, we remap each of the global node values, so the eqNums
-		will cause minimal communication during mesh assembly */
+		   will cause minimal communication during mesh assembly */
 		Node_RemappedGlobalIndex currRemappedGNode_I = 0;
 		Node_RemappedGlobalIndex firstRemappedGNode_I = 0;
 		Index newDimOrder[3] = {0,0,0};
@@ -656,10 +662,10 @@
 		self->remappingActivated = True;
 
 		Journal_DPrintfL( self->debug, 1, "Parallel, hexa decomp with 1 partition dim:\n"
-			"remapping eqNum traversal order to be aligned with mesh decomposition:\n" );
+				  "remapping eqNum traversal order to be aligned with mesh decomposition:\n" );
 
 		/* Work out the new dimension order for the EqNums. The partitioned index
-		goes last */
+		   goes last */
 		for ( dim_I = I_AXIS; dim_I < 3; dim_I++ ) {
 			if (True == ((HexaMD*)meshDecomp)->partitionedAxis[dim_I]) {
 				partitionedAxis = dim_I;
@@ -669,25 +675,25 @@
 			}
 		}
 		Journal_DPrintfL( self->debug, 1, "Given partition axis is %c, calc. remapping dimension order as %c,%c,%c\n",
-			IJKTopology_DimNumToDimLetter[partitionedAxis],
-			IJKTopology_DimNumToDimLetter[newDimOrder[0]],
-			IJKTopology_DimNumToDimLetter[newDimOrder[1]],
-			IJKTopology_DimNumToDimLetter[newDimOrder[2]] );
+				  IJKTopology_DimNumToDimLetter[partitionedAxis],
+				  IJKTopology_DimNumToDimLetter[newDimOrder[0]],
+				  IJKTopology_DimNumToDimLetter[newDimOrder[1]],
+				  IJKTopology_DimNumToDimLetter[newDimOrder[2]] );
 
 		Journal_DPrintfL( self->debug, 4, "Calculating over %d domain nodes, %d local, %d shadow.\n",
-			mesh->nodeDomainCount, mesh->nodeLocalCount, mesh->nodeShadowCount );
+				  mesh->nodeDomainCount, mesh->nodeLocalCount, mesh->nodeShadowCount );
 
 		/* Work out the lowest remapped global index (so we know where to insert into the table) */
 		gNode_I = mesh->nodeD2G[0];
 		firstRemappedGNode_I = _FeEquationNumber_RemapNode( (HexaMD*)meshDecomp, newDimOrder, gNode_I );
 		/* if shadow elements are enabled, check if the first shadow element has lower
-		global index than first local element */
+		   global index than first local element */
 		if ( meshDecomp->shadowDepth > 0 ) {
 			Node_RemappedGlobalIndex firstRemappedShadowGNode_I;
 		
 			gNode_I = mesh->nodeD2G[mesh->nodeLocalCount];
 			firstRemappedShadowGNode_I = _FeEquationNumber_RemapNode( (HexaMD*)meshDecomp,
-				newDimOrder, gNode_I );
+										  newDimOrder, gNode_I );
 			
 			if ( firstRemappedShadowGNode_I < firstRemappedGNode_I ) {
 				firstRemappedGNode_I = firstRemappedShadowGNode_I;
@@ -695,7 +701,7 @@
 			}
 		}
 		Journal_DPrintfL( self->debug, 4, "Calculated lowest remapped global index is %d",
-			firstRemappedGNode_I );
+				  firstRemappedGNode_I );
 		if ( lowestIsShadow ) {
 			Journal_DPrintfL( self->debug, 4, " (a shadow node.)\n" );
 		}
@@ -712,22 +718,22 @@
 			self->remappedNodeInfos[insertIndex].remappedGlobal = currRemappedGNode_I;
 			self->remappedNodeInfos[insertIndex].domain = dNode_I;
 			Journal_DPrintfL( self->debug, 4, "Processing domain node %d: original global = %d, remaps to %d.\n",
-				dNode_I, gNode_I, currRemappedGNode_I );
+					  dNode_I, gNode_I, currRemappedGNode_I );
 		}	
 		Stream_UnIndentBranch( StG_FEM_Debug );
 	}
 
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 3 ) ) {
 		Journal_DPrintf( self->debug, "Calculated remapped node info table as:\n{\n" );
 		for (rNodeInfo_I = 0; rNodeInfo_I < nodeDomainCount; rNodeInfo_I++ ) {
 			Journal_DPrintf( self->debug, " %d:(remap=%d,domain=%d),\n", rNodeInfo_I,
-				self->remappedNodeInfos[rNodeInfo_I].remappedGlobal,
-				self->remappedNodeInfos[rNodeInfo_I].domain );
+					 self->remappedNodeInfos[rNodeInfo_I].remappedGlobal,
+					 self->remappedNodeInfos[rNodeInfo_I].domain );
 		}
 		Journal_DPrintf( self->debug, "}\n");
 	}
-	#endif
+#endif
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }	
 
@@ -746,20 +752,20 @@
 	Stream_IndentBranch( StG_FEM_Debug );
 
 	Journal_Firewall( ( nodeDomainCount == self->dofLayout->_numItemsInLayout ),
-		errorStream, "Error: In %s: DofLayout's size is %d, which isn't equal to "
-		"Node domain count of %d. Did you create the dofLayout of size node local count? "
-		"It should be aset up to size node domain count.\n",
-		__func__, self->dofLayout->_numItemsInLayout, nodeDomainCount );
+			  errorStream, "Error: In %s: DofLayout's size is %d, which isn't equal to "
+			  "Node domain count of %d. Did you create the dofLayout of size node local count? "
+			  "It should be aset up to size node domain count.\n",
+			  __func__, self->dofLayout->_numItemsInLayout, nodeDomainCount );
 		
 
 	self->destinationArray = Memory_Alloc_2DComplex( Dof_EquationNumber, nodeDomainCount,
-		self->dofLayout->dofCounts, "FeEquationNumber->destinationArray" );
+							 self->dofLayout->dofCounts, "FeEquationNumber->destinationArray" );
 
 
 	if (meshDecomp->procsInUse == 1)
 	{
 		/* for serial jobs, don't worry about the critical points, just calculate
-		the totals normally */
+		   the totals normally */
 		critPointsIHave = Memory_Alloc( CritPointInfo, "critPointsIHave (BuildDestinationArray)" );
 		critPointsToSend = Memory_Alloc( CritPointInfo, "critPointsToSend (BuildDestinationArray)" );
 		critPointsIHave[0].index = meshDecomp->nodeGlobalCount;
@@ -775,44 +781,44 @@
 		Index maxSendCritPointsPerProc = 0;
 
 		critPointsIHave = Memory_Alloc_Array( CritPointInfo, nodeDomainCount,
-			"critPointsIHave (BuildDestinationArray)" );
+						      "critPointsIHave (BuildDestinationArray)" );
 		critPointsToSend = Memory_Alloc_Array( CritPointInfo, nodeDomainCount,
-			"critPointsToSend (BuildDestinationArray)" );
+						       "critPointsToSend (BuildDestinationArray)" );
 		myWantedCriticalPoints = Memory_Alloc_Array( Node_GlobalIndex, nodeDomainCount,
-			"myWantedCritialPoints (BuildDestinationArray)" );
+							     "myWantedCritialPoints (BuildDestinationArray)" );
 
 		/* Go through all domain nodes, work out end of runs, and interfaces needed */
 		_FeEquationNumber_CalculateDomainKnownCriticalPoints( self, nodeDomainCount, critPointsIHave, &critPointsIHaveTotal,
-		myWantedCriticalPoints, &myWantedCriticalPointsTotal );
+								      myWantedCriticalPoints, &myWantedCriticalPointsTotal );
 	
 		/* initialise the below to the set end critical nodes I already worked out I have */
 		critPointsToSend = (CritPointInfo*) memcpy( critPointsToSend, critPointsIHave, (critPointsIHaveTotal * sizeof(CritPointInfo)) );
 		critPointsToSendTotal = critPointsIHaveTotal;
 		
 		/* work out critical points (inc. those needed by others) I hold, and those to send.
-		The list I have could be greater than my end of runs, in an irregular mesh case */
+		   The list I have could be greater than my end of runs, in an irregular mesh case */
 		_FeEquationNumber_CalculateCritPointsIHave( self, 
-			&myWantedCriticalPoints, myWantedCriticalPointsTotal, &critPointsIHave, &critPointsIHaveTotal,
-			critPointsToSend, &critPointsToSendTotal );
+							    &myWantedCriticalPoints, myWantedCriticalPointsTotal, &critPointsIHave, &critPointsIHaveTotal,
+							    critPointsToSend, &critPointsToSendTotal );
 	
 		Memory_Free( myWantedCriticalPoints );
 
 		/* OK: We now know all the critical points on this processor, so go ahead and work out the partial ID number
-		at all of them. */
+		   at all of them. */
 
 		Journal_DPrintfL( self->debug, 2, "Looping through nodes again, calculating eq nums (re-setting to 0 at critical points)\n,"
-			"and storing subtotal of each each critical node:\n" );
+				  "and storing subtotal of each each critical node:\n" );
 		_FeEquationNumber_DoPartialTotals( self, critPointsIHave, critPointsIHaveTotal,
-			critPointsToSend, critPointsToSendTotal );
+						   critPointsToSend, critPointsToSendTotal );
 
 		Journal_DPrintfL( self->debug, 2, "Now sub-totals have been calculated, share all of my critical point sub-totals "
-			"and get any that I need:\n" );
+				  "and get any that I need:\n" );
 		_FeEquationNumber_ShareCritPointInfo( self, &critPointsToSend, critPointsToSendTotal,
-			&allSendCriticalPoints, &procSendCritPointsTotals, &maxSendCritPointsPerProc, PRINT_VALUES );
+						      &allSendCriticalPoints, &procSendCritPointsTotals, &maxSendCritPointsPerProc, PRINT_VALUES );
 
 		Journal_DPrintfL( self->debug, 2, "Final loop: add the sub-totals at all the critical points to the existing values:\n" );
 		_FeEquationNumber_AddAllPartialTotals( self, critPointsIHave, critPointsIHaveTotal,
-			allSendCriticalPoints, procSendCritPointsTotals, maxSendCritPointsPerProc );
+						       allSendCriticalPoints, procSendCritPointsTotals, maxSendCritPointsPerProc );
 
 		/* Post process the linked dofs */
 		if ( self->linkedDofInfo ) {
@@ -857,7 +863,7 @@
 
 
 /** Works out 'critical nodes' I know from domain information (those I want are the start
-	of my runs -1, those I know others want are the end of my runs. */
+    of my runs -1, those I know others want are the end of my runs. */
 static void _FeEquationNumber_CalculateDomainKnownCriticalPoints(
 	FeEquationNumber* self,
 	Node_DomainIndex nodeDomainCount,
@@ -875,18 +881,18 @@
 
 	if ( self->remappingActivated ) {
 		/* if remapping has been done, the only possible crit points are the first and last
-		points */
+		   points */
 		remappedGlobal_I = self->remappedNodeInfos[0].remappedGlobal;
 		if (remappedGlobal_I != 0) {
 			Journal_DPrintfL( self->debug, 3, "Adding interface c.p. to myWantedCriticalPoints[%d] = %d\n", 
-				(*myWantedCriticalPointsTotal), remappedGlobal_I-1 );
+					  (*myWantedCriticalPointsTotal), remappedGlobal_I-1 );
 			myWantedCriticalPoints[(*myWantedCriticalPointsTotal)++] = remappedGlobal_I-1;
 		}	
 
 		remappedGlobal_I = self->remappedNodeInfos[nodeDomainCount-1].remappedGlobal;
 		if ( remappedGlobal_I != (meshDecomp->nodeGlobalCount-1) ) {
 			Journal_DPrintfL( self->debug, 3, "Adding set end c.p. mySetEnds[%d] = %d\n", 
-				(*mySetEndsTotal), remappedGlobal_I );
+					  (*mySetEndsTotal), remappedGlobal_I );
 			mySetEnds[(*mySetEndsTotal)++].index = remappedGlobal_I;
 		}
 		Stream_UnIndentBranch( StG_FEM_Debug );
@@ -899,7 +905,7 @@
 
 		if (remappedGlobal_I != 0) {
 			Journal_DPrintfL( self->debug, 3, "Adding interface c.p. to myWantedCriticalPoints[%d] = %d\n", 
-				(*myWantedCriticalPointsTotal), remappedGlobal_I-1 );
+					  (*myWantedCriticalPointsTotal), remappedGlobal_I-1 );
 			myWantedCriticalPoints[(*myWantedCriticalPointsTotal)++] = remappedGlobal_I-1;
 		}	
 		rNodeInfo_I++;
@@ -907,11 +913,11 @@
 		/* skip to end of contiguous set */
 		while ( (rNodeInfo_I < nodeDomainCount)
 			&& (self->remappedNodeInfos[rNodeInfo_I].remappedGlobal ==
-				(self->remappedNodeInfos[rNodeInfo_I-1].remappedGlobal + 1) ) )
+			    (self->remappedNodeInfos[rNodeInfo_I-1].remappedGlobal + 1) ) )
 		{
 			Journal_DPrintfL( self->debug, 4, "skipping remapped gNode[%d] domain = %d\n", 
-				self->remappedNodeInfos[rNodeInfo_I].remappedGlobal,
-				self->remappedNodeInfos[rNodeInfo_I].domain );
+					  self->remappedNodeInfos[rNodeInfo_I].remappedGlobal,
+					  self->remappedNodeInfos[rNodeInfo_I].domain );
 			rNodeInfo_I++;
 		}	
 
@@ -919,12 +925,12 @@
 		remappedGlobal_I = self->remappedNodeInfos[rNodeInfo_I-1].remappedGlobal;
 		if ( remappedGlobal_I != (meshDecomp->nodeGlobalCount-1) ) {
 			Journal_DPrintfL( self->debug, 3, "Adding set end c.p. mySetEnds[%d] = %d\n", 
-				(*mySetEndsTotal), remappedGlobal_I );
+					  (*mySetEndsTotal), remappedGlobal_I );
 			mySetEnds[(*mySetEndsTotal)++].index = remappedGlobal_I;
 		}
 	}
 
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
 		Index cPoint_I;
 
@@ -940,17 +946,17 @@
 		}
 		Journal_DPrintf( self->debug, "]\n" );
 	}
-	#endif
+#endif
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }
 
 
 /* work out critical points (inc. those needed by others) I hold, and those to send.
-	The list I have could be greater than my end of runs, in an irregular mesh case */
+   The list I have could be greater than my end of runs, in an irregular mesh case */
 static void _FeEquationNumber_CalculateCritPointsIHave( FeEquationNumber* self,
-	Node_GlobalIndex** const myWantedCriticalPointsPtr, Node_GlobalIndex myWantedCriticalPointsTotal,
-	CritPointInfo** const critPointsIHavePtr, Index* const critPointsIHaveTotal,
-	CritPointInfo* const critPointsToSend, Index* const critPointsToSendTotal )
+							Node_GlobalIndex** const myWantedCriticalPointsPtr, Node_GlobalIndex myWantedCriticalPointsTotal,
+							CritPointInfo** const critPointsIHavePtr, Index* const critPointsIHaveTotal,
+							CritPointInfo* const critPointsToSend, Index* const critPointsToSendTotal )
 {
 	MeshDecomp* meshDecomp = self->feMesh->layout->decomp;
 	Partition_Index proc_I; 
@@ -968,11 +974,11 @@
 
 	Journal_DPrintfL( self->debug, 2, "Globally sharing locally known wanted C.N. lists:\n",  __func__ );
 	_FeEquationNumber_ShareCriticalPoints( self, myWantedCriticalPointsPtr, myWantedCriticalPointsTotal,
-		 &allWantedCriticalPoints, &procWantedCritPointsTotals, &maxWantedCritPointsPerProc );
+					       &allWantedCriticalPoints, &procWantedCritPointsTotals, &maxWantedCritPointsPerProc );
 
 	Journal_DPrintfL( self->debug, 2, "Globally sharing locally known 'have' C.N. lists:\n",  __func__ );
 	_FeEquationNumber_ShareCritPointInfo( self, critPointsIHavePtr, (*critPointsIHaveTotal),
-		&allHaveCriticalPoints, &procHaveCritPointsTotals, &maxHaveCritPointsPerProc, DONT_PRINT_VALUES );
+					      &allHaveCriticalPoints, &procHaveCritPointsTotals, &maxHaveCritPointsPerProc, DONT_PRINT_VALUES );
 
 	/* now we build our lists of crit points on my processor, and crit points on my processor to send to others */
 	for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
@@ -1000,12 +1006,12 @@
 				}
 				else if ( otherCritPoint != (*critPointsIHavePtr)[myCritPoint_I].index ) {
 					memmove( &((*critPointsIHavePtr)[myCritPoint_I+1]),
-						&((*critPointsIHavePtr)[myCritPoint_I]),
-						((*critPointsIHaveTotal) - myCritPoint_I) * sizeof(CritPointInfo) );
+						 &((*critPointsIHavePtr)[myCritPoint_I]),
+						 ((*critPointsIHaveTotal) - myCritPoint_I) * sizeof(CritPointInfo) );
 					(*critPointsIHavePtr)[myCritPoint_I].index = otherCritPoint;
 					(*critPointsIHaveTotal)++;
 					memmove( &critPointsToSend[myCritPoint_I+1], &critPointsToSend[myCritPoint_I],
-						((*critPointsToSendTotal) - myCritPoint_I) * sizeof(CritPointInfo) );
+						 ((*critPointsToSendTotal) - myCritPoint_I) * sizeof(CritPointInfo) );
 					critPointsToSend[myCritPoint_I].index = otherCritPoint;
 					(*critPointsToSendTotal)++;
 				}
@@ -1039,8 +1045,8 @@
 				}
 				else if ( otherCritPoint != (*critPointsIHavePtr)[myCritPoint_I].index ) {
 					memmove( &((*critPointsIHavePtr)[myCritPoint_I+1]),
-						&((*critPointsIHavePtr)[myCritPoint_I]),
-						((*critPointsIHaveTotal) - myCritPoint_I) * sizeof(CritPointInfo) );
+						 &((*critPointsIHavePtr)[myCritPoint_I]),
+						 ((*critPointsIHaveTotal) - myCritPoint_I) * sizeof(CritPointInfo) );
 					(*critPointsIHavePtr)[myCritPoint_I].index = otherCritPoint;
 					(*critPointsIHaveTotal)++;
 				}
@@ -1049,7 +1055,7 @@
 			}	
 		}		
 	}			
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
 		Journal_DPrintf( self->debug, "Calculated crit points I Have:\n[" );
 		for ( myCritPoint_I = 0; myCritPoint_I < (*critPointsIHaveTotal); myCritPoint_I++ ) {
@@ -1061,7 +1067,7 @@
 		}
 		Journal_DPrintf( self->debug, "]\n");
 	}	
-	#endif
+#endif
 	
 	Memory_Free( allWantedCriticalPoints );
 	Memory_Free( procWantedCritPointsTotals );
@@ -1082,7 +1088,7 @@
 	/* Case 1: for remapped hexa mesh, just check if its between max and min numbers */	
 	if ( self->remappingActivated ) {
 		if ( ( self->remappedNodeInfos[0].remappedGlobal <= critPoint ) &&
-			( critPoint <= self->remappedNodeInfos[nodeDomainCount-1].remappedGlobal ) )
+		     ( critPoint <= self->remappedNodeInfos[nodeDomainCount-1].remappedGlobal ) )
 		{
 			return True;
 		}
@@ -1096,7 +1102,7 @@
 			return True;
 		}
 		else if ( (meshDecomp->shadowDepth > 0) 
-			&& IndexSet_IsMember( meshDecomp->shadowNodeSets[myRank], critPoint ) ) {
+			  && IndexSet_IsMember( meshDecomp->shadowNodeSets[myRank], critPoint ) ) {
 			return True;
 		}
 		else {
@@ -1107,7 +1113,7 @@
 
 
 /** Performs an AllGather on a set of critical points: Each processer will afterwards have all the critical points
-	of all the processors, in one large array. The user must then index into the array by processor carefully. */
+    of all the processors, in one large array. The user must then index into the array by processor carefully. */
 static void _FeEquationNumber_ShareCriticalPoints(
 	FeEquationNumber* self,
 	Node_GlobalIndex** const myCriticalPoints,
@@ -1118,16 +1124,16 @@
 {
 	MeshDecomp* meshDecomp = self->feMesh->layout->decomp;
 	Partition_Index proc_I; 
-	#if DEBUG
+#if DEBUG
 	Index point_I = 0;
-	#endif
+#endif
 
 	Journal_DPrintf( self->debug, "In %s\n", __func__ );
 	Stream_IndentBranch( StG_FEM_Debug );
 	
 	(*maxCritPointsPerProc) = 0;
 	
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
 		Journal_DPrintf( self->debug, "myCriticalPointsTotal=%u\n",  myCriticalPointsTotal);
 		Journal_DPrintf( self->debug, "myCritPoints:(" );
@@ -1137,59 +1143,59 @@
 		}	
 		Journal_DPrintf( self->debug, ")\n");
 	}	
-	#endif
+#endif
 
 	/* First, we allocate and calculate how many critical points are on each node (this way, we don't have to guess
-	how much memory to allocate in the main array.) */
+	   how much memory to allocate in the main array.) */
 	(*procCritPointsTotals) = Memory_Alloc_Array( Node_GlobalIndex, meshDecomp->nproc,
-			"*procCritPointsTotals (ShareCriticalPoints)" ); 
+						      "*procCritPointsTotals (ShareCriticalPoints)" ); 
 	
 	MPI_Allgather( &myCriticalPointsTotal, 1, MPI_UNSIGNED,
-		(*procCritPointsTotals), 1, MPI_UNSIGNED,
-		meshDecomp->communicator );
+		       (*procCritPointsTotals), 1, MPI_UNSIGNED,
+		       meshDecomp->communicator );
 	
 	for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
 		if ( (*procCritPointsTotals)[proc_I] > (*maxCritPointsPerProc) )
 			(*maxCritPointsPerProc) = (*procCritPointsTotals)[proc_I];
 	}
 
-	#if DEBUG
-		Journal_DPrintfL( self->debug, 2, "procCritPoints totals:(");
-		for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
-			Journal_DPrintfL( self->debug, 2, "%d, ", (*procCritPointsTotals)[proc_I]);
-		}
-		Journal_DPrintfL( self->debug, 2, ")\n");
-		Journal_DPrintfL( self->debug, 2, "MaxCritPointsPerProc = %d\n", (*maxCritPointsPerProc));
-	#endif
+#if DEBUG
+	Journal_DPrintfL( self->debug, 2, "procCritPoints totals:(");
+	for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
+		Journal_DPrintfL( self->debug, 2, "%d, ", (*procCritPointsTotals)[proc_I]);
+	}
+	Journal_DPrintfL( self->debug, 2, ")\n");
+	Journal_DPrintfL( self->debug, 2, "MaxCritPointsPerProc = %d\n", (*maxCritPointsPerProc));
+#endif
 
 	/* Now share the actual point values */
 	(*myCriticalPoints) = Memory_Realloc_Array( (*myCriticalPoints), Node_GlobalIndex, *maxCritPointsPerProc );
 	(*allCriticalPoints) = Memory_Alloc_Array( Node_GlobalIndex, meshDecomp->nproc * (*maxCritPointsPerProc),
-		"*allCriticalPoints (ShareCriticalPoints)" );
+						   "*allCriticalPoints (ShareCriticalPoints)" );
 
 	MPI_Allgather( (*myCriticalPoints), (*maxCritPointsPerProc), MPI_UNSIGNED,
-		(*allCriticalPoints), (*maxCritPointsPerProc), MPI_UNSIGNED,
-		meshDecomp->communicator );
+		       (*allCriticalPoints), (*maxCritPointsPerProc), MPI_UNSIGNED,
+		       meshDecomp->communicator );
 
-	#if DEBUG
-		Journal_DPrintfL( self->debug, 2, "procCritPoints:(" );
-		for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
-			for ( point_I = 0; point_I < (*procCritPointsTotals)[proc_I]; point_I++)
-			{
-				Journal_DPrintfL( self->debug, 2, "%u, ", 
-					(*allCriticalPoints)[(*maxCritPointsPerProc)*proc_I + point_I]);
-			}	
-		}
-		Journal_DPrintfL( self->debug, 2, ")\n");
-	#endif
+#if DEBUG
+	Journal_DPrintfL( self->debug, 2, "procCritPoints:(" );
+	for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
+		for ( point_I = 0; point_I < (*procCritPointsTotals)[proc_I]; point_I++)
+		{
+			Journal_DPrintfL( self->debug, 2, "%u, ", 
+					  (*allCriticalPoints)[(*maxCritPointsPerProc)*proc_I + point_I]);
+		}	
+	}
+	Journal_DPrintfL( self->debug, 2, ")\n");
+#endif
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }
 
 
 
 /** Performs an AllGather on a set of CritPointInfo structs: Each processer will afterwards have all the CritPointInfo
-	structs of all the processors, in one large array. The user must then index into the array by processor
-	carefully. */
+    structs of all the processors, in one large array. The user must then index into the array by processor
+    carefully. */
 static void _FeEquationNumber_ShareCritPointInfo(
 	FeEquationNumber* self,
 	CritPointInfo** const myCritPointInfo,
@@ -1201,9 +1207,9 @@
 {
 	MeshDecomp* meshDecomp = self->feMesh->layout->decomp;
 	Partition_Index proc_I; 
-	#if DEBUG
-		Index point_I = 0;
-	#endif
+#if DEBUG
+	Index point_I = 0;
+#endif
 
 	Journal_DPrintfL( self->debug, 1, "In %s\n",  __func__ );
 	Stream_IndentBranch( StG_FEM_Debug );
@@ -1213,14 +1219,14 @@
 	(*procCritPointInfoTotals) = Memory_Alloc_Array( Node_GlobalIndex, meshDecomp->nproc, "*procCritPointInfoTotals" ); 
 
 	MPI_Allgather( &myCritPointInfoTotal, 1, MPI_INT, (*procCritPointInfoTotals), 1, MPI_INT,
-		meshDecomp->communicator );
+		       meshDecomp->communicator );
 
 	for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
 		if ( (*procCritPointInfoTotals)[proc_I] > (*maxCritPointInfoPerProc) )
 			(*maxCritPointInfoPerProc) = (*procCritPointInfoTotals)[proc_I];
 	}
 
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
 		Journal_DPrintf( self->debug, "procCritPointInfo totals:(" );
 		for (proc_I = 0; proc_I < meshDecomp->nproc; proc_I++) {
@@ -1229,18 +1235,18 @@
 		Journal_DPrintf( self->debug, ")\n");
 		Journal_DPrintf( self->debug, "MaxCritPointInfoPerProc = %d\n", (*maxCritPointInfoPerProc));
 	}	
-	#endif
+#endif
 
 	/* Now share the actual critPointInfo arrays */
 	(*allCritPointInfo) = Memory_Alloc_Array( CritPointInfo, meshDecomp->nproc * (*maxCritPointInfoPerProc), 
-		"allCritPointInfo (ShareCritPointInfo)" );
+						  "allCritPointInfo (ShareCritPointInfo)" );
 
 	/* below changed by PatrickSunter 27/1/2004 to work on grendel */
 	MPI_Allgather( (*myCritPointInfo), (*maxCritPointInfoPerProc), MPI_critPointInfoType,
-		(*allCritPointInfo), (*maxCritPointInfoPerProc), MPI_critPointInfoType,
-		meshDecomp->communicator );
+		       (*allCritPointInfo), (*maxCritPointInfoPerProc), MPI_critPointInfoType,
+		       meshDecomp->communicator );
 	
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {	
 		Journal_DPrintf( self->debug, "procCritPointInfos" );
 		if ( DONT_PRINT_VALUES == printValuesFlag ) {
@@ -1260,16 +1266,16 @@
 		}
 		Journal_DPrintf( self->debug, ")\n");
 	}	
-	#endif
+#endif
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }
 
 
 /** For each domain node, calculate the Equation number, restarting at 0 whenever a critical node is reached and
-	saving the value I was up to. */
+    saving the value I was up to. */
 static void _FeEquationNumber_DoPartialTotals( FeEquationNumber* self,
-	CritPointInfo* const critPointsIHave, Index critPointsIHaveTotal,
-	CritPointInfo* const critPointsToSend, Index critPointsToSendTotal )
+					       CritPointInfo* const critPointsIHave, Index critPointsIHaveTotal,
+					       CritPointInfo* const critPointsToSend, Index critPointsToSendTotal )
 {
 	MeshDecomp* meshDecomp = self->feMesh->layout->decomp;
 	Node_DomainIndex nodeDomainCount = meshDecomp->nodeDomainCount;
@@ -1291,16 +1297,16 @@
 
 		// if the node is next critical point
 		if ( ( haveCritPoint_I < critPointsIHaveTotal ) 
-				&& ( remappedGlobal_I == critPointsIHave[haveCritPoint_I].index ) ) 
+		     && ( remappedGlobal_I == critPointsIHave[haveCritPoint_I].index ) ) 
 		{
 			Journal_DPrintfL( self->debug, 3, "storing c.p. subtotal at remapped global index %u = %d\n", 
-				remappedGlobal_I, currSubTotalEqNum );
+					  remappedGlobal_I, currSubTotalEqNum );
 			// store current value
 			critPointsIHave[haveCritPoint_I++].eqNum = currSubTotalEqNum;
 
 			// check if its also a to send point
 			if ( ( sendCritPoint_I < critPointsToSendTotal )
-					&& ( remappedGlobal_I == critPointsToSend[sendCritPoint_I].index ) )
+			     && ( remappedGlobal_I == critPointsToSend[sendCritPoint_I].index ) )
 			{
 				// store current value
 				critPointsToSend[sendCritPoint_I++].eqNum = currSubTotalEqNum;
@@ -1311,7 +1317,7 @@
 		rNodeInfo_I++;
 	}
 
-	#if DEBUG
+#if DEBUG
 	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
 		Journal_DPrintf( self->debug, "Totals I have:[ " );
 		haveCritPoint_I = 0;
@@ -1328,16 +1334,16 @@
 		}	
 		Journal_Printf( self->debug, "]\n" );
 	}
-	#endif
+#endif
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }
 
 
 /** Called right at the end of calculating ID array, to add the locally calculated partial totals with the final
-	global information, and arrive at the complete figures. */
+    global information, and arrive at the complete figures. */
 static void _FeEquationNumber_AddAllPartialTotals( FeEquationNumber* self, CritPointInfo* mySubTotals,
-	Index myCritPointInfoTotal, CritPointInfo* allSubTotals, Index* procCritPointInfoTotals,
-	Index maxSubTotalsPerProc )
+						   Index myCritPointInfoTotal, CritPointInfo* allSubTotals, Index* procCritPointInfoTotals,
+						   Index maxSubTotalsPerProc )
 {	
 	Partition_Index proc_I;
 	FiniteElement_Mesh* mesh = self->feMesh;
@@ -1361,7 +1367,7 @@
 	Bool* haveUpdatedLinkedDofSetTbl = NULL;
 	Index linkedDof_I;
 	Index linkedDofSet;
-	#define NEXT_PROCESSOR_SECTION_START ( maxSubTotalsPerProc * (proc_I) + procCritPointInfoTotals[proc_I] )
+#define NEXT_PROCESSOR_SECTION_START ( maxSubTotalsPerProc * (proc_I) + procCritPointInfoTotals[proc_I] )
 
 	Journal_DPrintfL( self->debug, 1, "In %s:\n",  __func__ );
 	Stream_IndentBranch( StG_FEM_Debug );
@@ -1379,7 +1385,7 @@
 							
 	if ( self->linkedDofInfo ) {
 		haveUpdatedLinkedDofSetTbl = Memory_Alloc_Array( Bool, self->linkedDofInfo->linkedDofSetsCount,
-			"haveUpdatedLinkedDofSetTbl" );
+								 "haveUpdatedLinkedDofSetTbl" );
 		for ( linkedDof_I=0; linkedDof_I < self->linkedDofInfo->linkedDofSetsCount; linkedDof_I++ ) {
 			haveUpdatedLinkedDofSetTbl[linkedDof_I] = False;
 		}	
@@ -1409,7 +1415,7 @@
 
 			/* Only add critical node totals from other processors lower than current index */
 			while (( procCritPointInfo_Is[proc_I] < NEXT_PROCESSOR_SECTION_START ) &&
-				(newCritInfoIndex = allSubTotals[(procCritPointInfo_Is[proc_I])].index) < remappedGlobal_I )
+			       (newCritInfoIndex = allSubTotals[(procCritPointInfo_Is[proc_I])].index) < remappedGlobal_I )
 			{
 				/* See if we are scheduled to add this critical point yet, and if not where it should go */
 				while ( addListPtr && ( newCritInfoIndex > addListPtr->critPointInfo->index ) ) {
@@ -1420,19 +1426,19 @@
 				 * just progress on. */
 				if ( (addListPtr == NULL) || ( addListPtr->critPointInfo->index != newCritInfoIndex )) {
 					/*
-					if inside this condition we are either:
-					1. Adding the first crit node to be added
-					2. Have found a non-duplicate crit node to be added, either at the end or
-					partway through.
-					So go ahead and add to the addList, reconnecting both the prev and next.
+					  if inside this condition we are either:
+					  1. Adding the first crit node to be added
+					  2. Have found a non-duplicate crit node to be added, either at the end or
+					  partway through.
+					  So go ahead and add to the addList, reconnecting both the prev and next.
 					*/
 					AddListEntry* newAddListEntry = Memory_Alloc( AddListEntry,
-						"newAddListEntry (AddAllPartialTotals)" );
+										      "newAddListEntry (AddAllPartialTotals)" );
 					adjustSum += allSubTotals[procCritPointInfo_Is[proc_I]].eqNum;
 					Journal_DPrintfL( self->debug, 3, "at other c.p. (%d):incrementing adjustSum by %d to = %d\n", 
-						allSubTotals[procCritPointInfo_Is[proc_I]].index,
-						allSubTotals[procCritPointInfo_Is[proc_I]].eqNum,
-						adjustSum );
+							  allSubTotals[procCritPointInfo_Is[proc_I]].index,
+							  allSubTotals[procCritPointInfo_Is[proc_I]].eqNum,
+							  adjustSum );
 					newAddListEntry->critPointInfo = &allSubTotals[procCritPointInfo_Is[proc_I]];
 					newAddListEntry->next = addListPtr;
 					if ( addListPrev ) {
@@ -1461,8 +1467,8 @@
 
 		while ( (firstOfSet) ||
 			( (rNodeInfo_I < nodeDomainCount)
-			&& (self->remappedNodeInfos[rNodeInfo_I].remappedGlobal ==
-				(self->remappedNodeInfos[rNodeInfo_I-1].remappedGlobal + 1) ) ) )
+			  && (self->remappedNodeInfos[rNodeInfo_I].remappedGlobal ==
+			      (self->remappedNodeInfos[rNodeInfo_I-1].remappedGlobal + 1) ) ) )
 		{
 			remappedGlobal_I = self->remappedNodeInfos[rNodeInfo_I].remappedGlobal;
 			dNode_I = self->remappedNodeInfos[rNodeInfo_I].domain;
@@ -1470,24 +1476,24 @@
 			firstOfSet = False;
 			
 			Journal_DPrintfL( self->debug, 3, "At remapped gNode[%d]: I own it, so adjusting EqNums\n", 
-					remappedGlobal_I );
+					  remappedGlobal_I );
 
 			/* increment the value at each non-bc dof by adjustSum */
 			currNodeNumDofs = self->dofLayout->dofCounts[ dNode_I ];
 
 			for ( nodeLocalDof_I = 0; nodeLocalDof_I < currNodeNumDofs; nodeLocalDof_I++ ) {
 				if( !self->bcs || !VariableCondition_IsCondition( self->bcs, dNode_I,
-					self->dofLayout->varIndices[dNode_I][nodeLocalDof_I] ) )
+										  self->dofLayout->varIndices[dNode_I][nodeLocalDof_I] ) )
 				{
 					if ( (NULL == self->linkedDofInfo) ||	// TAG: bcs
-						( self->linkedDofInfo->linkedDofTbl[dNode_I][nodeLocalDof_I] == -1 ) )
+					     ( self->linkedDofInfo->linkedDofTbl[dNode_I][nodeLocalDof_I] == -1 ) )
 					{
 						/* A normal point, so increment */
 						self->destinationArray[dNode_I][nodeLocalDof_I] += adjustSum;
 					}
 					else {
 						/* Handling of linked dofs: the first time we encounter one of a set,
-						add the adjust sum. Afterwards, just set to the updated value. */
+						   add the adjust sum. Afterwards, just set to the updated value. */
 						linkedDofSet = self->linkedDofInfo->linkedDofTbl[dNode_I][nodeLocalDof_I];
 						if ( False == haveUpdatedLinkedDofSetTbl[linkedDofSet] ) {
 							self->destinationArray[dNode_I][nodeLocalDof_I] += adjustSum;
@@ -1522,11 +1528,11 @@
 
 			/* if the node is my next critical point, add the sub-total */
 			if ( ( myCritPointInfo_I < myCritPointInfoTotal ) &&
-				( remappedGlobal_I == mySubTotals[myCritPointInfo_I].index)  ) 
+			     ( remappedGlobal_I == mySubTotals[myCritPointInfo_I].index)  ) 
 			{
 				adjustSum += mySubTotals[myCritPointInfo_I].eqNum;
 				Journal_DPrintfL( self->debug, 3, "at   my    c.p. %d(%d):incrementing adjustSum by %d to = %d\n", 
-					myCritPointInfo_I, remappedGlobal_I, mySubTotals[myCritPointInfo_I].eqNum, adjustSum );
+						  myCritPointInfo_I, remappedGlobal_I, mySubTotals[myCritPointInfo_I].eqNum, adjustSum );
 				myCritPointInfo_I++;
 			}
 
@@ -1538,7 +1544,7 @@
 			for ( proc_I = 0; proc_I < meshDecomp->nproc; proc_I++ ) {
 
 				while (( procCritPointInfo_Is[proc_I] < NEXT_PROCESSOR_SECTION_START ) &&
-					allSubTotals[(procCritPointInfo_Is[proc_I])].index <= remappedGlobal_I )
+				       allSubTotals[(procCritPointInfo_Is[proc_I])].index <= remappedGlobal_I )
 				{
 					(procCritPointInfo_Is[proc_I])++;
 				}
@@ -1556,22 +1562,22 @@
 
 
 /** Updates the currEqNum argument for the number of dofs at the current node, and increments the ID Eq. Num for each
-	dof where appropriate (setting it to -1 if its a B.C.) */
+    dof where appropriate (setting it to -1 if its a B.C.) */
 static void _FeEquationNumber_HandleNode( FeEquationNumber* self, const Node_DomainIndex dNode_I,
-	Dof_EquationNumber* const currEqNum )
+					  Dof_EquationNumber* const currEqNum )
 {
 	Dof_Index	currNodeNumDofs;
 	Dof_Index	nodeLocalDof_I;
-	#if DEBUG
+#if DEBUG
 	Stream*		error;
-	#endif
+#endif
 	
 	Journal_DPrintfL( self->debug, 3, "In %s:",  __func__ );
-	#if DEBUG
+#if DEBUG
 	error = Journal_Register( Error_Type, self->type );
 	Journal_Firewall( (dNode_I < self->feMesh->nodeDomainCount), error, "Stuffup: %s() asked to operate on node %d, bigger "
-		"than domain count %d. Exiting.\n", __func__, dNode_I, self->feMesh->nodeDomainCount );
-	#endif
+			  "than domain count %d. Exiting.\n", __func__, dNode_I, self->feMesh->nodeDomainCount );
+#endif
 	
 	/* get the number of dofs */
 	currNodeNumDofs = self->dofLayout->dofCounts[ dNode_I ];
@@ -1594,8 +1600,8 @@
 			
 			if ( self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] != -1 ) {
 				Journal_DPrintfL( self->debug, 3, "is a linked Dof, so setting ID[%d][%d] to the "
-					"previous value = %d.\n", dNode_I, nodeLocalDof_I, 
-					self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] );
+						  "previous value = %d.\n", dNode_I, nodeLocalDof_I, 
+						  self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] );
 				/* value is same as the first part of the linked node set */
 				self->destinationArray[dNode_I][nodeLocalDof_I] =
 					self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet];
@@ -1603,7 +1609,7 @@
 			else /* This is the first time we've hit this linked eq num, so treat as normal */
 			{
 				Journal_DPrintfL( self->debug, 3, "is a linked Dof hit the first time, so setting "
-					"ID[%d][%d] = %d.\n", dNode_I, nodeLocalDof_I, *currEqNum );
+						  "ID[%d][%d] = %d.\n", dNode_I, nodeLocalDof_I, *currEqNum );
 			
 				self->destinationArray[dNode_I][nodeLocalDof_I] = (*currEqNum)++;
 				if ( dNode_I < self->feMesh->nodeLocalCount ) {
@@ -1641,14 +1647,14 @@
 	
 	Journal_DPrintfL( self->debug, 1, "In Func %s():\n", __func__ );
 	adjustDueToNonLocalLinkedDofsTbl = Memory_Alloc_Array( Bool, self->linkedDofInfo->linkedDofSetsCount,
-		"adjustDueToNonLocalLinkedDofsTbl" );
+							       "adjustDueToNonLocalLinkedDofsTbl" );
 		
 	/* We need to re-calculate these, since the post processing may alter both the lowest and higest eqNum values */
 	self->_lowestLocalEqNum = -1;
 	self->_highestLocalEqNum = -1;
 		
 	Journal_DPrintfL( self->debug, 2, "Note: in reductions to follow, value of %d, the node global count, "
-		"denotes that this processor doesn't hold any of that linked dof.\n", self->feMesh->nodeGlobalCount );
+			  "denotes that this processor doesn't hold any of that linked dof.\n", self->feMesh->nodeGlobalCount );
 
 	Stream_Indent( self->debug );
 	for ( linkedDof_I=0; linkedDof_I < self->linkedDofInfo->linkedDofSetsCount; linkedDof_I++ ) {
@@ -1689,7 +1695,7 @@
 			Journal_DPrintfL( self->debug, 3, "At dof %d: ", nodeLocalDof_I );
 		
 			if( self->bcs && VariableCondition_IsCondition( self->bcs, dNode_I,
-				self->dofLayout->varIndices[dNode_I][nodeLocalDof_I] ) )
+									self->dofLayout->varIndices[dNode_I][nodeLocalDof_I] ) )
 			{
 				Journal_DPrintfL( self->debug, 3, "is a BC: ignoring.\n" );
 			}
@@ -1697,19 +1703,19 @@
 				linkedDofSet = self->linkedDofInfo->linkedDofTbl[dNode_I][nodeLocalDof_I];
 
 				Journal_DPrintfL( self->debug, 3, "is a linked Dof, so setting value to %d\n",
-					self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] );
+						  self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] );
 
 				self->destinationArray[dNode_I][nodeLocalDof_I] -= subtractionAdjustmentTotal;
 
 				if ( True == adjustDueToNonLocalLinkedDofsTbl[linkedDofSet] ) {
 					/* We now need to test, after the subtraction adjustment has been made, if the 
-					value of this linked dof now matches the minimum. If so, don't subtract further. */
+					   value of this linked dof now matches the minimum. If so, don't subtract further. */
 					if ( self->destinationArray[dNode_I][nodeLocalDof_I] != 
-						self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] )
+					     self->linkedDofInfo->eqNumsOfLinkedDofs[linkedDofSet] )
 					{
 						subtractionAdjustmentTotal++;
 						Journal_DPrintfL( self->debug, 3, "And since first was non-local, increasing "
-							"subtraction adj. to %d\n", subtractionAdjustmentTotal );
+								  "subtraction adj. to %d\n", subtractionAdjustmentTotal );
 					}	
 						
 					/* Make sure we only adjust once */
@@ -1810,9 +1816,9 @@
 	}
 
 	self->locationMatrix = Memory_Alloc_3DComplex( Dof_EquationNumber, elementLocalCount, mesh->elementNodeCountTbl,
-		dofCountsAtElementNodesArray, "FeEquationNumber->locationMatrix" );
+						       dofCountsAtElementNodesArray, "FeEquationNumber->locationMatrix" );
 	/* Free the dof counts array:- we have to look up domain node numbers anyway later, might as
-	well just use the dof counts array then. */
+	   well just use the dof counts array then. */
 	Memory_Free( dofCountsAtElementNodesArray );
 
 	for ( lElement_I = 0; lElement_I < elementLocalCount; lElement_I++ ) {
@@ -1853,7 +1859,7 @@
 		}
 
 		localLocationMatrix = Memory_Alloc_2DComplex( Dof_EquationNumber, numNodesThisElement, numDofsEachNode,
-			"localLocationMatrix (set of ptrs to dof lists, indexed by element-local node)" );
+							      "localLocationMatrix (set of ptrs to dof lists, indexed by element-local node)" );
 		Memory_Free( numDofsEachNode );
 	}
 
@@ -1867,7 +1873,7 @@
 
 			/* copy pointers to dof eq nums from ID array relevant to that node */
 			Journal_DPrintfL( self->debugLM, 3, "copying %d dof eq. numbers from ID[%d] to LM[%d][%d]\n", 
-				numDofsThisNode, procDomainNode, lElement_I, elLocalNode_I );
+					  numDofsThisNode, procDomainNode, lElement_I, elLocalNode_I );
 			memcpy( localLocationMatrix[elLocalNode_I],
 				self->destinationArray[procDomainNode], numDofsThisNode * sizeof(Dof_EquationNumber) );
 		}
@@ -1950,10 +1956,10 @@
 
 
 void FeEquationNumber_PrintElementLocationMatrix(
-		void*			feEquationNumber,
-		Dof_EquationNumber**	elementLM,
-		Element_LocalIndex	element_lI,
-		Stream*			stream )
+	void*			feEquationNumber,
+	Dof_EquationNumber**	elementLM,
+	Element_LocalIndex	element_lI,
+	Stream*			stream )
 {
 	FeEquationNumber*	self = (FeEquationNumber*)feEquationNumber;
 	Dof_Index		dof_elLocalI;
@@ -1999,14 +2005,14 @@
 
 	if ( (self->remappingActivated) && ( (self->linkedDofInfo == NULL) || (self->linkedDofInfo->linkedDofSetsCount == 0 ) ) ) {
 		/* If the remapping is activated, and things aren't complicated by periodic BCs,
-		then each processor should hold the Matrix/Vector
-		component corresponding to the lowest local eqNum, to the lowest eqNum of the next
-		processor. This means that _only shared boundary nodes_ will need to be communicated,
-		and the last processor will have no communication. */
+		   then each processor should hold the Matrix/Vector
+		   component corresponding to the lowest local eqNum, to the lowest eqNum of the next
+		   processor. This means that _only shared boundary nodes_ will need to be communicated,
+		   and the last processor will have no communication. */
 		Journal_DPrintfL( self->debug, 2, "Remapping active and no periodic bcs: using lowest local eqNums as boundaries.\n");
 
 		self->_lowestGlobalEqNums = Memory_Alloc_Array( Dof_EquationNumber, nProc,
-			"FeEquationNumber->_lowestGlobalEqNums" );
+								"FeEquationNumber->_lowestGlobalEqNums" );
 		MPI_Allgather(
 			&self->_lowestLocalEqNum, 1, MPI_INT,
 			self->_lowestGlobalEqNums, 1, MPI_INT,
@@ -2031,15 +2037,15 @@
 	}	
 	else {
 		/* If the remapping isn't activated and the eqNum ordering isn't aligned with the mesh
-		decomposition, or there are periodic BCs, we can't get a clear idea of where the processor boundaries are:
-		therefore just split up the EqNums equally between processors: still should be 
-		fairly good alignment. */
+		   decomposition, or there are periodic BCs, we can't get a clear idea of where the processor boundaries are:
+		   therefore just split up the EqNums equally between processors: still should be 
+		   fairly good alignment. */
 		Journal_DPrintfL( self->debug, 2, "Remapping inactive and/or periodic bcs used: just dividing eqNums as evenly as possible.\n");
 
 		self->_eqNumsPerProcDivisor = self->globalSumUnconstrainedDofs / nProc;
 		self->_eqNumsRemainder = self->globalSumUnconstrainedDofs % nProc;
 		Journal_DPrintfL( self->debug, 2, "Calculated %d eqNums per proc, with %d remainder\n",
-			self->_eqNumsPerProcDivisor, self->_eqNumsRemainder );
+				  self->_eqNumsPerProcDivisor, self->_eqNumsRemainder );
 		self->_remNotAddedChangeover = (self->_eqNumsPerProcDivisor+1) * self->_eqNumsRemainder;
 
 		self->localEqNumsOwnedCount = self->_eqNumsPerProcDivisor;
@@ -2058,9 +2064,9 @@
 	}	
 
 	Journal_DPrintfL( self->debug, 1, "Calculated I own %d eqNums, between indices %d to %d\n",
-		self->localEqNumsOwnedCount, self->firstOwnedEqNum, self->lastOwnedEqNum );
+			  self->localEqNumsOwnedCount, self->firstOwnedEqNum, self->lastOwnedEqNum );
 	Journal_DPrintfL( self->debug, 1, "(Range of eqNums on local mesh segment is %d to %d)\n",
-		self->_lowestLocalEqNum, self->_highestLocalEqNum );
+			  self->_lowestLocalEqNum, self->_highestLocalEqNum );
 
 	Stream_UnIndentBranch( StG_FEM_Debug );
 }
@@ -2071,42 +2077,56 @@
 
 	Partition_Index ownerProc = (unsigned int)-1;
 
-	if ( (self->remappingActivated) && ( (self->linkedDofInfo == NULL) || (self->linkedDofInfo->linkedDofSetsCount == 0 ) ) ) {
-		MeshDecomp*		meshDecomp = self->feMesh->layout->decomp;
-		Partition_Index		myRank = meshDecomp->rank;
-		Partition_Index		nProc = meshDecomp->nproc;
+	/* If we used the new mesh topology junk, do something a little different. */
+	if( self->feMesh->topo->domains && self->feMesh->topo->domains[MT_VERTEX] ) {
+		CommTopology*	commTopo = self->feMesh->topo->domains[0]->commTopo;
+		unsigned	p_i;
 
-		/* Expect it to be on the next processor, so try there first */
-		if ( eqNum > self->lastOwnedEqNum ) {
-			ownerProc = myRank + 1;
-			while ( (ownerProc+1) < nProc ) {
-				if ( eqNum >= self->_lowestGlobalEqNums[ownerProc+1] ) {
-					ownerProc++;
-				}	
-				else {
-					break;
-				}	
-			}
+		for( p_i = 1; p_i < commTopo->nProcs; p_i++ ) {
+			if( eqNum < self->_lowestGlobalEqNums[p_i] )
+				break;
 		}
-		/* otherwise count back from current */
-		else {
-			ownerProc = myRank;
-			while ( ownerProc > 0 ) {
-				if ( eqNum < self->_lowestGlobalEqNums[ownerProc] ) {
-					ownerProc--;
+
+		return p_i - 1;
+	}
+	else {
+		if ( (self->remappingActivated) && ( (self->linkedDofInfo == NULL) || (self->linkedDofInfo->linkedDofSetsCount == 0 ) ) ) {
+			MeshDecomp*		meshDecomp = self->feMesh->layout->decomp;
+			Partition_Index		myRank = meshDecomp->rank;
+			Partition_Index		nProc = meshDecomp->nproc;
+
+			/* Expect it to be on the next processor, so try there first */
+			if ( eqNum > self->lastOwnedEqNum ) {
+				ownerProc = myRank + 1;
+				while ( (ownerProc+1) < nProc ) {
+					if ( eqNum >= self->_lowestGlobalEqNums[ownerProc+1] ) {
+						ownerProc++;
+					}	
+					else {
+						break;
+					}	
 				}
-				else {
-					break;
-				}	
 			}
+			/* otherwise count back from current */
+			else {
+				ownerProc = myRank;
+				while ( ownerProc > 0 ) {
+					if ( eqNum < self->_lowestGlobalEqNums[ownerProc] ) {
+						ownerProc--;
+					}
+					else {
+						break;
+					}	
+				}
+			}
 		}
-	}
-	else {
-		if ( eqNum < self->_remNotAddedChangeover ) {
-			ownerProc = eqNum / (self->_eqNumsPerProcDivisor+1);
-		}
 		else {
-			ownerProc = self->_eqNumsRemainder + (eqNum - self->_remNotAddedChangeover) / self->_eqNumsPerProcDivisor;
+			if ( eqNum < self->_remNotAddedChangeover ) {
+				ownerProc = eqNum / (self->_eqNumsPerProcDivisor+1);
+			}
+			else {
+				ownerProc = self->_eqNumsRemainder + (eqNum - self->_remNotAddedChangeover) / self->_eqNumsPerProcDivisor;
+			}
 		}
 	}
 
@@ -2115,7 +2135,7 @@
 
 
 void FeEquationNumber_Create_CritPointInfo_MPI_Datatype( void ) {
-	#define CRIT_POINT_INFO_NBLOCKS 2
+#define CRIT_POINT_INFO_NBLOCKS 2
 	MPI_Aint indexExtent = 0;
 	MPI_Datatype critPointInfoTypes[CRIT_POINT_INFO_NBLOCKS] = {MPI_UNSIGNED, MPI_INT };
 	MPI_Aint critPointInfoBlockDisplacements[CRIT_POINT_INFO_NBLOCKS];
@@ -2126,8 +2146,154 @@
 	critPointInfoBlockDisplacements[1] = indexExtent;
 
 	MPI_Type_struct( CRIT_POINT_INFO_NBLOCKS, critPointInfoBlockLengths, critPointInfoBlockDisplacements,
-		critPointInfoTypes, &MPI_critPointInfoType );
+			 critPointInfoTypes, &MPI_critPointInfoType );
 	
 	MPI_Type_commit( &MPI_critPointInfoType );
 }
 
+
+void FeEquationNumber_BuildWithTopology( FeEquationNumber* self ) {
+	MeshTopology*		topo;
+	CommTopology*		commTopo;
+	unsigned		nDomainNodes, nDomainEls;
+	unsigned		nLocalNodes;
+	unsigned*		nNodalDofs;
+	unsigned*		nElNodes;
+	unsigned**		elNodes;
+	int**			dstArray;
+	unsigned**		nLocMatDofs;
+	int***			locMat;
+	unsigned		varInd;
+	unsigned		curEqNum;
+	unsigned		base;
+	unsigned		subTotal;
+	MPI_Status		status;
+	unsigned		maxDofs;
+	unsigned*		tuples;
+	Decomp_Sync_Array*	array;
+	unsigned		e_i, n_i, dof_i;
+
+	assert( self );
+
+	/* Shortcuts. */
+	topo = self->feMesh->topo;
+	commTopo = topo->domains[MT_VERTEX]->commTopo;
+	nDomainNodes = self->feMesh->nodeDomainCount;
+	nDomainEls = self->feMesh->elementDomainCount;
+	nLocalNodes = MeshTopology_GetLocalSize( topo, MT_VERTEX );
+	nNodalDofs = self->dofLayout->dofCounts;
+	nElNodes = self->feMesh->elementNodeCountTbl;
+	elNodes = self->feMesh->elementNodeTbl;
+
+	/* Allocate for destination array. */
+	dstArray = Memory_Alloc_2DComplex( int, nDomainNodes, nNodalDofs, 
+					   "FeEquationNumber::destinationArray" );
+
+	/* Allocate for the location matrix. */
+	nLocMatDofs = Memory_Alloc_2DComplex_Unnamed( unsigned, nDomainEls, nElNodes );
+	for( e_i = 0; e_i < nDomainEls; e_i++ ) {
+		for( n_i = 0; n_i < nElNodes[e_i]; n_i++ )
+			nLocMatDofs[e_i][n_i] = nNodalDofs[elNodes[e_i][n_i]];
+	}
+	locMat = Memory_Alloc_3DComplex( int, nDomainEls, nElNodes, nLocMatDofs, 
+					 "FeEquationNumber::locationMatrix" );
+	FreeArray( nLocMatDofs );
+
+	/* Build initial destination array and store max dofs. */
+	curEqNum = 0;
+	maxDofs = 0;
+	for( n_i = 0; n_i < nLocalNodes; n_i++ ) {
+		unsigned	lInd;
+
+		lInd = MeshTopology_DomainToGlobal( topo, MT_VERTEX, n_i );
+		lInd = Mesh_NodeMapGlobalToDomain( self->feMesh, lInd );
+
+		if( nNodalDofs[lInd] > maxDofs )
+			maxDofs = nNodalDofs[lInd];
+
+		for( dof_i = 0; dof_i < nNodalDofs[lInd]; dof_i++ ) {
+			varInd = self->dofLayout->varIndices[lInd][dof_i];
+			if( !self->bcs || !VariableCondition_IsCondition( self->bcs, lInd, varInd ) )
+				dstArray[lInd][dof_i] = curEqNum++;
+			else
+				dstArray[lInd][dof_i] = -1;
+		}
+	}
+
+	/* Order the equation numbers based on processor rank; cascade counts forward. */
+	base = 0;
+	subTotal = curEqNum;
+	if( commTopo->rank > 0 ) {
+		MPI_Recv( &base, 1, MPI_UNSIGNED, commTopo->rank - 1, 6669, commTopo->comm, &status );
+		subTotal = base + curEqNum;
+	}
+	if( commTopo->rank < commTopo->nProcs - 1 )
+		MPI_Send( &subTotal, 1, MPI_UNSIGNED, commTopo->rank + 1, 6669, commTopo->comm );
+
+	/* Modify existing destination array and dump to a tuple array. */
+	tuples = Memory_Alloc_Array_Unnamed( unsigned, nDomainNodes * maxDofs );
+	for( n_i = 0; n_i < nLocalNodes; n_i++ ) {
+		unsigned	lInd;
+
+		lInd = MeshTopology_DomainToGlobal( topo, MT_VERTEX, n_i );
+		lInd = Mesh_NodeMapGlobalToDomain( self->feMesh, lInd );
+
+		for( dof_i = 0; dof_i < nNodalDofs[lInd]; dof_i++ ) {
+			varInd = self->dofLayout->varIndices[lInd][dof_i];
+			if( !self->bcs || !VariableCondition_IsCondition( self->bcs, lInd, varInd ) )
+				dstArray[lInd][dof_i] += base;
+			tuples[n_i * maxDofs + dof_i] = dstArray[lInd][dof_i];
+		}
+	}
+
+	/* Update all other procs. */
+	array = Decomp_Sync_AddArray( topo->domains[MT_VERTEX], tuples, tuples + nLocalNodes * maxDofs, 
+				      maxDofs * sizeof(unsigned), maxDofs * sizeof(unsigned), 
+				      maxDofs * sizeof(unsigned) );
+	Decomp_Sync_SyncArray( topo->domains[MT_VERTEX], array );
+	Decomp_Sync_RemoveArray( topo->domains[MT_VERTEX], array );
+
+	/* Update destination array's domain indices. */
+	for( n_i = nLocalNodes; n_i < nDomainNodes; n_i++ ) {
+		unsigned	dInd;
+
+		dInd = MeshTopology_DomainToGlobal( topo, MT_VERTEX, n_i );
+		dInd = Mesh_NodeMapGlobalToDomain( self->feMesh, dInd );
+
+		for( dof_i = 0; dof_i < nNodalDofs[dInd]; dof_i++ ) {
+			varInd = self->dofLayout->varIndices[dInd][dof_i];
+			if( !self->bcs || !VariableCondition_IsCondition( self->bcs, dInd, varInd ) )
+				dstArray[dInd][dof_i] = tuples[n_i * maxDofs + dof_i];
+			else
+				dstArray[dInd][dof_i] = -1;
+		}
+	}
+
+	/* Destroy tuple array. */
+	FreeArray( tuples );
+
+	/* Build location matrix. */
+	for( e_i = 0; e_i < nDomainEls; e_i++ ) {
+		for( n_i = 0; n_i < nElNodes[e_i]; n_i++ ) {
+			for( dof_i = 0; dof_i < nNodalDofs[elNodes[e_i][n_i]]; dof_i++ )
+				locMat[e_i][n_i][dof_i] = dstArray[elNodes[e_i][n_i]][dof_i];
+		}
+	}
+
+	/* Store stuff on class. */
+	self->destinationArray = dstArray;
+	self->locationMatrix = locMat;
+	self->locationMatrixBuilt = True;
+	self->remappingActivated = False;
+	self->localEqNumsOwnedCount = curEqNum;
+	self->firstOwnedEqNum = base;
+	self->lastOwnedEqNum = subTotal - 1;
+
+	/* Bcast global sum from highest rank. */
+	MPI_Bcast( &self->globalSumUnconstrainedDofs, 1, MPI_UNSIGNED, commTopo->nProcs - 1, commTopo->comm );
+
+	/* Construct lowest global equation number list. */
+	self->_lowestGlobalEqNums = Memory_Alloc_Array_Unnamed( int, self->feMesh->topo->domains[0]->commTopo->nProcs );
+	MPI_Allgather( &self->firstOwnedEqNum, 1, MPI_UNSIGNED, self->_lowestGlobalEqNums, 1, MPI_UNSIGNED, 
+		       self->feMesh->topo->domains[0]->commTopo->comm );
+}

Modified: long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.h
===================================================================
--- long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.h	2006-10-11 20:50:37 UTC (rev 4907)
+++ long/3D/Gale/trunk/src/StgFEM/Discretisation/src/FeEquationNumber.h	2006-10-11 20:50:39 UTC (rev 4908)
@@ -281,4 +281,6 @@
 	TWiki page http://csd.vpac.org/twiki/bin/view/Stgermain/FeEquationNumber */
 	void _FeEquationNumber_BuildDestinationArray( FeEquationNumber* self );
 
+	void FeEquationNumber_BuildWithTopology( FeEquationNumber* self );
+
 #endif /* __StG_FEM_Discretisation_EquationNumber_h__ */



More information about the cig-commits mailing list