[cig-commits] r4328 - in long/3D/Gale/trunk/src/StGermain: . Discretisation/Mesh/tests

walter at geodynamics.org walter at geodynamics.org
Thu Aug 17 17:17:18 PDT 2006


Author: walter
Date: 2006-08-17 17:17:17 -0700 (Thu, 17 Aug 2006)
New Revision: 4328

Added:
   long/3D/Gale/trunk/src/StGermain/Discretisation/Mesh/tests/testDecomp.c
Modified:
   long/3D/Gale/trunk/src/StGermain/
Log:
 r2706 at earth:  boo | 2006-08-17 17:14:20 -0700
  r2660 at earth (orig r3741):  LukeHodkinson | 2006-08-01 22:38:54 -0700
  Adding a test for the new Decomp class.
  
 



Property changes on: long/3D/Gale/trunk/src/StGermain
___________________________________________________________________
Name: svk:merge
   - 1ef209d2-b310-0410-a72d-e20c9eb0015c:/cig:2705
afb6c753-b9d0-0310-b4e7-dbd8d91cdd35:/trunk/StGermain:3740
   + 1ef209d2-b310-0410-a72d-e20c9eb0015c:/cig:2706
afb6c753-b9d0-0310-b4e7-dbd8d91cdd35:/trunk/StGermain:3741

Added: long/3D/Gale/trunk/src/StGermain/Discretisation/Mesh/tests/testDecomp.c
===================================================================
--- long/3D/Gale/trunk/src/StGermain/Discretisation/Mesh/tests/testDecomp.c	2006-08-18 00:17:14 UTC (rev 4327)
+++ long/3D/Gale/trunk/src/StGermain/Discretisation/Mesh/tests/testDecomp.c	2006-08-18 00:17:17 UTC (rev 4328)
@@ -0,0 +1,615 @@
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+**
+** Copyright (C), 2003, Victorian Partnership for Advanced Computing (VPAC) Ltd, 110 Victoria Street, Melbourne, 3053, Australia.
+**
+** Authors:
+**	Stevan M. Quenette, Senior Software Engineer, VPAC. (steve at vpac.org)
+**	Patrick D. Sunter, Software Engineer, VPAC. (pds at vpac.org)
+**	Luke J. Hodkinson, Computational Engineer, VPAC. (lhodkins at vpac.org)
+**	Siew-Ching Tan, Software Engineer, VPAC. (siew at vpac.org)
+**	Alan H. Lo, Computational Engineer, VPAC. (alan at vpac.org)
+**	Raquibul Hassan, Computational Engineer, VPAC. (raq at vpac.org)
+**
+**  This library is free software; you can redistribute it and/or
+**  modify it under the terms of the GNU Lesser General Public
+**  License as published by the Free Software Foundation; either
+**  version 2.1 of the License, or (at your option) any later version.
+**
+**  This library is distributed in the hope that it will be useful,
+**  but WITHOUT ANY WARRANTY; without even the implied warranty of
+**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+**  Lesser General Public License for more details.
+**
+**  You should have received a copy of the GNU Lesser General Public
+**  License along with this library; if not, write to the Free Software
+**  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+**
+** $Id: testDecomp.c 2136 2004-09-30 02:47:13Z PatrickSunter $
+**
+**~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <mpi.h>
+
+#include "Base/Base.h"
+
+#include "Discretisation/Mesh/types.h"
+#include "Discretisation/Mesh/Decomp.h"
+
+Bool testSetLocals( unsigned rank, unsigned nProcs, unsigned watch );
+Bool testSetLeased( unsigned rank, unsigned nProcs, unsigned watch );
+Bool testSetRemotes( unsigned rank, unsigned nProcs, unsigned watch );
+Bool testMappings( unsigned rank, unsigned nProcs, unsigned watch );
+Bool testLocals( unsigned rank, unsigned nProcs, unsigned watch );
+Bool testRemotes( unsigned rank, unsigned nProcs, unsigned watch );
+Bool testArrays( unsigned rank, unsigned nProcs, unsigned watch );
+
+int main( int argc, char* argv[] ) {
+	unsigned	rank;
+	unsigned	nProcs;
+	unsigned	watch;
+	Bool		result;
+
+	/* Initialise MPI, get world info. */
+	MPI_Init( &argc, &argv );
+	MPI_Comm_size( MPI_COMM_WORLD, (int*)&nProcs );
+	MPI_Comm_rank( MPI_COMM_WORLD, (int*)&rank );
+
+	/* Initialise StGermain. */
+	BaseFoundation_Init( &argc, &argv );
+	BaseIO_Init( &argc, &argv );
+	BaseContainer_Init( &argc, &argv );
+
+	/* Watching a particular processor? */
+	watch = (argc >= 2) ? atoi( argv[1] ) : 0;
+
+	/* Run some tests. */
+#if 0
+	result = testSetLocals( rank, nProcs, watch );
+	if( rank == watch )
+		printf( "Testing local setting/negotiation... %s\n", result ? "passed" : "failed" );
+
+	result = testSetLeased( rank, nProcs, watch );
+	if( rank == watch )
+		printf( "Testing leased setting/negotiation... %s\n", result ? "passed" : "failed" );
+
+	result = testSetRemotes( rank, nProcs, watch );
+	if( rank == watch )
+		printf( "Testing remotes setting/negotiation... %s\n", result ? "passed" : "failed" );
+
+	result = testMappings( rank, nProcs, watch );
+	if( rank == watch )
+		printf( "Testing mappings... %s\n", result ? "passed" : "failed" );
+#endif
+
+	result = testArrays( rank, nProcs, watch );
+	if( rank == watch )
+		printf( "Testing arrays... %s\n", result ? "passed" : "failed" );
+
+	/* Finalise StGermain. */
+	BaseContainer_Finalise();
+	BaseIO_Finalise();
+	BaseFoundation_Finalise();
+
+	/* Close off MPI */
+	MPI_Finalize();
+
+	return MPI_SUCCESS;
+}
+
+Bool testSetLocals( unsigned rank, unsigned nProcs, unsigned watch ) {
+	Decomp*		decomp = Decomp_New( "" );
+	unsigned	nLocals = 1000;
+	unsigned	nGlobals = nProcs * nLocals;
+	unsigned*	locals;
+	unsigned	nReps = 10;
+	unsigned	l_i, r_i;
+
+	locals = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	for( l_i = 0; l_i < nLocals; l_i++ )
+		locals[l_i] = rank * nLocals + l_i;
+
+	Decomp_SetNGlobals( decomp, nGlobals );
+	for( r_i = 0; r_i < nReps; r_i++ ) {
+		Decomp_SetLocals( decomp, nLocals, locals, 0, NULL );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeObject( decomp );
+				return False;
+			}
+		}
+
+		Decomp_Negotiate( decomp );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeObject( decomp );
+				return False;
+			}
+		}
+	}
+
+	FreeArray( locals );
+	FreeObject( decomp );
+
+	return True;
+}
+
+Bool testSetLeased( unsigned rank, unsigned nProcs, unsigned watch ) {
+	Decomp*		decomp = Decomp_New( "" );
+	unsigned	nLocals = 1000;
+	unsigned	nGlobals = nProcs * nLocals;
+	unsigned	nLeased = 0;
+	unsigned*	locals;
+	unsigned*	leased;
+	unsigned	nReps = 10;
+	unsigned	l_i, r_i;
+
+	locals = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	leased = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	for( l_i = 0; l_i < nLocals; l_i++ ) {
+		locals[l_i] = rank * nLocals + l_i;
+
+		if( rank > 0 && l_i >= nLocals / 2 )
+			leased[nLeased++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i < nLocals / 2 )
+			leased[nLeased++] = locals[l_i] + nLocals;
+	}
+
+	Decomp_SetNGlobals( decomp, nGlobals );
+	for( r_i = 0; r_i < nReps; r_i++ ) {
+		Decomp_SetLocals( decomp, nLocals, locals, nLeased, leased );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeObject( decomp );
+				return False;
+			}
+
+			for( l_i = 0; l_i < nLeased; l_i++ )
+				if( decomp->leased[l_i] != leased[l_i] ) break;
+			if( l_i < nLeased ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeObject( decomp );
+				return False;
+			}
+		}
+
+		Decomp_Negotiate( decomp );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeObject( decomp );
+				return False;
+			}
+
+			for( l_i = 0; l_i < nLeased; l_i++ )
+				if( decomp->leased[l_i] != leased[l_i] ) break;
+			if( l_i < nLeased ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeObject( decomp );
+				return False;
+			}
+		}
+	}
+
+	FreeArray( locals );
+	FreeArray( leased );
+	FreeObject( decomp );
+
+	return True;
+}
+
+Bool testSetRemotes( unsigned rank, unsigned nProcs, unsigned watch ) {
+	Decomp*		decomp = Decomp_New( "" );
+	unsigned	nLocals = 1000;
+	unsigned	nGlobals = nProcs * nLocals;
+	unsigned	nLeased = 0;
+	unsigned	nRemotes = 0;
+	unsigned*	locals;
+	unsigned*	leased;
+	unsigned*	remotes;
+	unsigned	nReps = 10;
+	unsigned	success = 0;
+	unsigned	failure = 1;
+	unsigned	l_i, r_i, rem_i;
+
+	locals = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	leased = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	remotes = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	for( l_i = 0; l_i < nLocals; l_i++ ) {
+		locals[l_i] = rank * nLocals + l_i;
+
+		if( rank > 0 && l_i >= nLocals / 2 )
+			leased[nLeased++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i < nLocals / 2 )
+			leased[nLeased++] = locals[l_i] + nLocals;
+
+		if( rank > 0 && l_i < nLocals / 2 )
+			remotes[nRemotes++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i >= nLocals / 2 )
+			remotes[nRemotes++] = locals[l_i] + nLocals;
+	}
+
+	Decomp_SetNGlobals( decomp, nGlobals );
+	for( r_i = 0; r_i < nReps; r_i++ ) {
+		Decomp_SetLocals( decomp, nLocals, locals, nLeased, leased );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			for( l_i = 0; l_i < nLeased; l_i++ )
+				if( decomp->leased[l_i] != leased[l_i] ) break;
+			if( l_i < nLeased ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			MPI_Bcast( &success, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+		}
+		else {
+			unsigned	status;
+
+			MPI_Bcast( &status, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+			if( status == failure ) break;
+		}
+
+		Decomp_AddRemotes( decomp, nRemotes, remotes );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			for( l_i = 0; l_i < nLeased; l_i++ )
+				if( decomp->leased[l_i] != leased[l_i] ) break;
+			if( l_i < nLeased ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			for( rem_i = 0; rem_i < nRemotes; rem_i++ )
+				if( decomp->remotes[rem_i] != remotes[rem_i] ) break;
+			if( rem_i < nRemotes ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			MPI_Bcast( &success, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+		}
+		else {
+			unsigned	status;
+
+			MPI_Bcast( &status, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+			if( status == failure ) break;
+		}
+
+		Decomp_Negotiate( decomp );
+		if( rank == watch ) {
+			for( l_i = 0; l_i < nLocals; l_i++ )
+				if( decomp->locals[l_i] != locals[l_i] ) break;
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			for( l_i = 0; l_i < nLeased; l_i++ )
+				if( decomp->leased[l_i] != leased[l_i] ) break;
+			if( l_i < nLeased ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			for( rem_i = 0; rem_i < nRemotes; rem_i++ )
+				if( decomp->remotes[rem_i] != remotes[rem_i] ) break;
+			if( rem_i < nRemotes ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			MPI_Bcast( &success, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+		}
+		else {
+			unsigned	status;
+
+			MPI_Bcast( &status, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+			if( status == failure ) break;
+		}
+	}
+
+	FreeArray( locals );
+	FreeArray( leased );
+	FreeArray( remotes );
+	FreeObject( decomp );
+
+	return True;
+}
+
+Bool testMappings( unsigned rank, unsigned nProcs, unsigned watch ) {
+	Decomp*		decomp = Decomp_New( "" );
+	unsigned	nLocals = 1000;
+	unsigned	nGlobals = nProcs * nLocals;
+	unsigned	nLeased = 0;
+	unsigned	nRemotes = 0;
+	unsigned*	locals;
+	unsigned*	leased;
+	unsigned*	remotes;
+	unsigned	nReps = 10;
+	unsigned	success = 0;
+	unsigned	failure = 1;
+	unsigned	l_i, r_i;
+
+	locals = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	leased = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	remotes = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	for( l_i = 0; l_i < nLocals; l_i++ ) {
+		locals[l_i] = rank * nLocals + l_i;
+
+		if( rank > 0 && l_i >= nLocals / 2 )
+			leased[nLeased++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i < nLocals / 2 )
+			leased[nLeased++] = locals[l_i] + nLocals;
+
+		if( rank > 0 && l_i < nLocals / 2 )
+			remotes[nRemotes++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i >= nLocals / 2 )
+			remotes[nRemotes++] = locals[l_i] + nLocals;
+	}
+
+	for( r_i = 0; r_i < nReps; r_i++ ) {
+		Decomp_SetNGlobals( decomp, nGlobals );
+		Decomp_SetLocals( decomp, nLocals, locals, nLeased, leased );
+		Decomp_AddRemotes( decomp, nRemotes, remotes );
+		Decomp_Negotiate( decomp );
+
+		if( rank == watch ) {
+			unsigned	curLeased = 0;
+			unsigned	curRemote = 0;
+
+			for( l_i = 0; l_i < nLocals; l_i++ ) {
+				unsigned	global = rank * nLocals + l_i;
+
+				if( Decomp_GlobalToDomain( decomp, global ) != l_i || 
+				    Decomp_DomainToGlobal( decomp, l_i ) != global )
+				{
+					break;
+				}
+
+				if( rank > 0 && l_i >= nLocals / 2 && 
+				    (Decomp_GlobalToDomain( decomp, global - nLocals ) != nLocals + curLeased || 
+				     Decomp_DomainToGlobal( decomp, nLocals + curLeased++ ) != global - nLocals) )
+				{
+					break;
+				}
+				if( rank < nProcs - 1 && l_i < nLocals / 2 && 
+				    (Decomp_GlobalToDomain( decomp, global + nLocals ) != nLocals + curLeased || 
+				     Decomp_DomainToGlobal( decomp, nLocals + curLeased++ ) != global + nLocals) )
+				{
+					break;
+				}
+
+				if( rank > 0 && l_i < nLocals / 2 && 
+				    (Decomp_GlobalToDomain( decomp, global - nLocals ) != nLocals + nLeased + curRemote || 
+				     Decomp_DomainToGlobal( decomp, nLocals + nLeased + curRemote++ ) != global - nLocals) )
+				{
+					break;
+				}
+				if( rank < nProcs - 1 && l_i >= nLocals / 2 && 
+				    (Decomp_GlobalToDomain( decomp, global + nLocals ) != nLocals + nLeased + curRemote || 
+				     Decomp_DomainToGlobal( decomp, nLocals + nLeased + curRemote++ ) != global + nLocals) )
+				{
+					break;
+				}
+			}
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			MPI_Bcast( &success, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+		}
+		else {
+			unsigned	status;
+
+			MPI_Bcast( &status, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+			if( status == failure ) break;
+		}
+	}
+
+	FreeArray( locals );
+	FreeArray( leased );
+	FreeArray( remotes );
+	FreeObject( decomp );
+
+	return True;
+}
+
+typedef struct {
+	int	one;
+	int	two;
+	int	three;
+} aStruct;
+
+Bool testArrays( unsigned rank, unsigned nProcs, unsigned watch ) {
+	Decomp*		decomp = Decomp_New( "" );
+	unsigned	nLocals = 10;
+	unsigned	nGlobals = nProcs * nLocals;
+	unsigned	nLeased = 0;
+	unsigned	nRemotes = 0;
+	unsigned*	locals;
+	unsigned*	leased;
+	unsigned*	remotes;
+	unsigned*	intLocals;
+	aStruct*	structLocals;
+	unsigned*	intShadows;
+	aStruct*	structShadows;
+	unsigned	nReps = 10;
+	unsigned	success = 0;
+	unsigned	failure = 1;
+	unsigned	l_i, s_i, r_i;
+
+	locals = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	leased = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	remotes = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	for( l_i = 0; l_i < nLocals; l_i++ ) {
+		locals[l_i] = rank * nLocals + l_i;
+
+		if( rank > 0 && l_i >= nLocals / 2 )
+			leased[nLeased++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i < nLocals / 2 )
+			leased[nLeased++] = locals[l_i] + nLocals;
+
+		if( rank > 0 && l_i < nLocals / 2 )
+			remotes[nRemotes++] = locals[l_i] - nLocals;
+		if( rank < nProcs - 1 && l_i >= nLocals / 2 )
+			remotes[nRemotes++] = locals[l_i] + nLocals;
+	}
+
+	intLocals = Memory_Alloc_Array_Unnamed( unsigned, nLocals );
+	structLocals = Memory_Alloc_Array_Unnamed( aStruct, nLocals );
+	intShadows = Memory_Alloc_Array_Unnamed( unsigned, nLeased + nRemotes );
+	structShadows = Memory_Alloc_Array_Unnamed( aStruct, nLeased + nRemotes );
+	for( l_i = 0; l_i < nLocals; l_i++ ) {
+		intLocals[l_i] = rank;
+		structLocals[l_i].two = rank;
+	}
+
+	for( r_i = 0; r_i < nReps; r_i++ ) {
+		Decomp_SetNGlobals( decomp, nGlobals );
+		Decomp_SetLocals( decomp, nLocals, locals, nLeased, leased );
+		Decomp_AddRemotes( decomp, nRemotes, remotes );
+		Decomp_Negotiate( decomp );
+		Decomp_AddArray( decomp, intLocals, intShadows, sizeof(int), sizeof(int), sizeof(int) );
+		Decomp_AddArray( decomp, &structLocals[0].two, &structShadows[0].two, sizeof(aStruct), sizeof(aStruct), 
+				 sizeof(int) );
+
+		for( s_i = 0; s_i < nLeased + nRemotes; s_i++ ) {
+			intShadows[s_i] = rank;
+			structShadows[s_i].two = rank;
+		}
+
+		Decomp_Sync( decomp );
+
+		if( rank == watch ) {
+			unsigned	curLeased = 0;
+			unsigned	curRemote = 0;
+
+			for( l_i = 0; l_i < nLocals; l_i++ ) {
+				if( intLocals[l_i] != rank || structLocals[l_i].two != rank )
+					break;
+
+				if( rank > 0 && l_i >= nLocals / 2 && 
+				    (intShadows[curLeased] != rank - 1 || structShadows[curLeased++].two != rank - 1) )
+				{
+					break;
+				}
+				if( rank < nProcs - 1 && l_i < nLocals / 2 && 
+				    (intShadows[curLeased] != rank + 1 || structShadows[curLeased++].two != rank + 1) )
+				{
+					break;
+				}
+
+				if( rank > 0 && l_i < nLocals / 2 && 
+				    (intShadows[nLeased + curRemote] != rank - 1 || 
+				     structShadows[nLeased + curRemote++].two != rank - 1) )
+				{
+					break;
+				}
+				if( rank < nProcs - 1 && l_i >= nLocals / 2 && 
+				    (intShadows[nLeased + curRemote] != rank + 1 || 
+				     structShadows[nLeased + curRemote++].two != rank + 1) )
+				{
+					break;
+				}
+			}
+			if( l_i < nLocals ) {
+				FreeArray( locals );
+				FreeArray( leased );
+				FreeArray( remotes );
+				FreeArray( intLocals );
+				FreeArray( intShadows );
+				FreeArray( structLocals );
+				FreeArray( structShadows );
+				FreeObject( decomp );
+				MPI_Bcast( &failure, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+				return False;
+			}
+
+			MPI_Bcast( &success, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+		}
+		else {
+			unsigned	status;
+
+			MPI_Bcast( &status, 1, MPI_UNSIGNED, watch, MPI_COMM_WORLD );
+			if( status == failure ) break;
+		}
+	}
+
+	FreeArray( locals );
+	FreeArray( leased );
+	FreeArray( remotes );
+	FreeArray( intLocals );
+	FreeArray( intShadows );
+	FreeArray( structLocals );
+	FreeArray( structShadows );
+	FreeObject( decomp );
+
+	return True;
+}



More information about the cig-commits mailing list