[cig-commits] r1402 - in branches/s-wang2: for_deal.II/include/deal.II/lac for_deal.II/source/lac source source/simulator

s-wang at dealii.org s-wang at dealii.org
Fri Nov 30 14:02:48 PST 2012


Author: s-wang
Date: 2012-11-30 15:02:48 -0700 (Fri, 30 Nov 2012)
New Revision: 1402

Added:
   branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_vector.h
   branches/s-wang2/for_deal.II/source/lac/petsc_parallel_sparse_matrix.cc
   branches/s-wang2/for_deal.II/source/lac/petsc_parallel_vector.cc
   branches/s-wang2/for_deal.II/source/lac/petsc_precondition.cc
Removed:
   branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_solver.h
Modified:
   branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_block_vector.h
   branches/s-wang2/for_deal.II/source/lac/petsc_solver.cc
   branches/s-wang2/source/adiabatic_conditions.cc
   branches/s-wang2/source/main.cc
   branches/s-wang2/source/simulator/assembly.cc
   branches/s-wang2/source/simulator/checkpoint_restart.cc
   branches/s-wang2/source/simulator/core.cc
   branches/s-wang2/source/simulator/helper_functions.cc
   branches/s-wang2/source/simulator/initial_conditions.cc
   branches/s-wang2/source/simulator/solver.cc
Log:
passed several tests

Modified: branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_block_vector.h
===================================================================
--- branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_block_vector.h	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_block_vector.h	2012-11-30 22:02:48 UTC (rev 1402)
@@ -367,7 +367,7 @@
       for (unsigned int i=0; i<this->n_blocks(); ++i)
         this->components[i] = v.components[i];
 
-      collect_sizes();     // shuqiangwang
+      //collect_sizes();     // shuqiangwang
     }
 
 
@@ -428,7 +428,7 @@
         this->components[i].reinit(communicator, block_sizes[i],
                                    local_sizes[i], fast);
 
-      collect_sizes();     // shuqiangwang
+      //collect_sizes();     // shuqiangwang
     }
 
     inline
@@ -440,7 +440,9 @@
     	if (this->components.size() != this->n_blocks())
     		this->components.resize(this->n_blocks());
 
-    	collect_sizes();     // shuqiangwang
+    	for (unsigned int i=0; i<this->n_blocks(); ++i)
+    		this->components[i].set_mpi_communicator(communicator);
+    	//collect_sizes();     // shuqiangwang
     }
 
     inline
@@ -455,7 +457,7 @@
       for (unsigned int i=0; i<this->n_blocks(); ++i)
         block(i).reinit(v.block(i), fast);
 
-      collect_sizes();     // shuqiangwang
+      //collect_sizes();     // shuqiangwang
     }
 
 

Added: branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_vector.h
===================================================================
--- branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_vector.h	                        (rev 0)
+++ branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_parallel_vector.h	2012-11-30 22:02:48 UTC (rev 1402)
@@ -0,0 +1,580 @@
+//---------------------------------------------------------------------------
+//    $Id: petsc_parallel_vector.h 27628 2012-11-20 22:49:26Z heister $
+//
+//    Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010, 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__petsc_parallel_vector_h
+#define __deal2__petsc_parallel_vector_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_USE_PETSC
+
+#  include <deal.II/base/subscriptor.h>
+#  include <deal.II/lac/exceptions.h>
+#  include <deal.II/lac/vector.h>
+#  include <deal.II/lac/petsc_vector_base.h>
+#  include <deal.II/base/index_set.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+// forward declaration
+template <typename> class Vector;
+class IndexSet;
+
+
+/*! @addtogroup PETScWrappers
+ *@{
+ */
+namespace PETScWrappers
+{
+  /**
+   * Namespace for PETSc classes that work in parallel over MPI, such as
+   * distributed vectors and matrices.
+   *
+   * @ingroup PETScWrappers
+   * @author Wolfgang Bangerth, 2004
+   */
+  namespace MPI
+  {
+
+    /**
+     * Implementation of a parallel vector class based on PETSC and using MPI
+     * communication to synchronise distributed operations. All the functionality
+     * is actually in the base class, except for the calls to generate a
+     * parallel vector. This is possible since PETSc only works on an abstract
+     * vector type and internally distributes to functions that do the actual work
+     * depending on the actual vector type (much like using virtual
+     * functions). Only the functions creating a vector of specific type differ,
+     * and are implemented in this particular class.
+     *
+     * <h3>Parallel communication model</h3>
+     *
+     * The parallel functionality of PETSc is built on top of the Message Passing
+     * Interface (MPI). MPI's communication model is built on collective
+     * communications: if one process wants something from another, that other
+     * process has to be willing to accept this communication. A process cannot
+     * query data from another process by calling a remote function, without that
+     * other process expecting such a transaction. The consequence is that most of
+     * the operations in the base class of this class have to be called
+     * collectively. For example, if you want to compute the l2 norm of a parallel
+     * vector, @em all processes across which this vector is shared have to call
+     * the @p l2_norm function. If you don't do this, but instead only call the @p
+     * l2_norm function on one process, then the following happens: This one
+     * process will call one of the collective MPI functions and wait for all the
+     * other processes to join in on this. Since the other processes don't call
+     * this function, you will either get a time-out on the first process, or,
+     * worse, by the time the next a callto a PETSc function generates an MPI
+     * message on the other processes , you will get a cryptic message that only a
+     * subset of processes attempted a communication. These bugs can be very hard
+     * to figure out, unless you are well-acquainted with the communication model
+     * of MPI, and know which functions may generate MPI messages.
+     *
+     * One particular case, where an MPI message may be generated unexpectedly is
+     * discussed below.
+     *
+     *
+     * <h3>Accessing individual elements of a vector</h3>
+     *
+     * PETSc does allow read access to individual elements of a vector, but in the
+     * distributed case only to elements that are stored locally. We implement
+     * this through calls like <tt>d=vec(i)</tt>. However, if you access an
+     * element outside the locally stored range, an exception is generated.
+     *
+     * In contrast to read access, PETSc (and the respective deal.II wrapper
+     * classes) allow to write (or add) to individual elements of vectors, even if
+     * they are stored on a different process. You can do this writing, for
+     * example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>, or similar
+     * operations. There is one catch, however, that may lead to very confusing
+     * error messages: PETSc requires application programs to call the compress()
+     * function when they switch from adding, to elements to writing to
+     * elements. The reasoning is that all processes might accumulate addition
+     * operations to elements, even if multiple processes write to the same
+     * elements. By the time we call compress() the next time, all these additions
+     * are executed. However, if one process adds to an element, and another
+     * overwrites to it, the order of execution would yield non-deterministic
+     * behavior if we don't make sure that a synchronisation with compress()
+     * happens in between.
+     *
+     * In order to make sure these calls to compress() happen at the appropriate
+     * time, the deal.II wrappers keep a state variable that store which is the
+     * presently allowed operation: additions or writes. If it encounters an
+     * operation of the opposite kind, it calls compress() and flips the
+     * state. This can sometimes lead to very confusing behavior, in code that may
+     * for example look like this:
+     * @verbatim
+     *   PETScWrappers::MPI::Vector vector;
+     *   ...
+     *                   // do some write operations on the vector
+     *   for (unsigned int i=0; i<vector.size(); ++i)
+     *     vector(i) = i;
+     *
+     *                   // do some additions to vector elements, but
+     *                   // only for some elements
+     *   for (unsigned int i=0; i<vector.size(); ++i)
+     *     if (some_condition(i) == true)
+     *       vector(i) += 1;
+     *
+     *                   // do another collective operation
+     *   const double norm = vector.l2_norm();
+     * @endverbatim
+     *
+     * This code can run into trouble: by the time we see the first addition
+     * operation, we need to flush the overwrite buffers for the vector, and the
+     * deal.II library will do so by calling compress(). However, it will only do
+     * so for all processes that actually do an addition -- if the condition is
+     * never true for one of the processes, then this one will not get to the
+     * actual compress() call, whereas all the other ones do. This gets us into
+     * trouble, since all the other processes hang in the call to flush the write
+     * buffers, while the one other process advances to the call to compute the l2
+     * norm. At this time, you will get an error that some operation was attempted
+     * by only a subset of processes. This behavior may seem surprising, unless
+     * you know that write/addition operations on single elements may trigger this
+     * behavior.
+     *
+     * The problem described here may be avoided by placing additional calls to
+     * compress(), or making sure that all processes do the same type of
+     * operations at the same time, for example by placing zero additions if
+     * necessary.
+     *
+     * @ingroup PETScWrappers
+     * @ingroup Vectors
+     * @author Wolfgang Bangerth, 2004
+     */
+    class Vector : public VectorBase
+    {
+    public:
+      /**
+       * Default constructor. Initialize the
+       * vector as empty.
+       */
+      Vector ();
+
+      /**
+       * Constructor. Set dimension to
+       * @p n and initialize all
+       * elements with zero.
+       *
+       * @arg local_size denotes the size
+       * of the chunk that shall be stored
+       * on the present process.
+       *
+       * @arg communicator denotes the MPI
+       * communicator over which the
+       * different parts of the vector
+       * shall communicate
+       *
+       * The constructor is made explicit
+       * to avoid accidents like this:
+       * <tt>v=0;</tt>. Presumably, the user
+       * wants to set every element of the
+       * vector to zero, but instead, what
+       * happens is this call:
+       * <tt>v=Vector@<number@>(0);</tt>, i.e. the
+       * vector is replaced by one of
+       * length zero.
+       */
+      explicit Vector (const MPI_Comm     &communicator,
+                       const unsigned int  n,
+                       const unsigned int  local_size);
+
+
+      /**
+       * Copy-constructor from deal.II
+       * vectors. Sets the dimension to that
+       * of the given vector, and copies all
+       * elements.
+       *
+       * @arg local_size denotes the size
+       * of the chunk that shall be stored
+       * on the present process.
+       *
+       * @arg communicator denotes the MPI
+       * communicator over which the
+       * different parts of the vector
+       * shall communicate
+       */
+      template <typename Number>
+      explicit Vector (const MPI_Comm         &communicator,
+                       const dealii::Vector<Number> &v,
+                       const unsigned int      local_size);
+
+
+      /**
+       * Copy-constructor the
+       * values from a PETSc wrapper vector
+       * class.
+       *
+       * @arg local_size denotes the size
+       * of the chunk that shall be stored
+       * on the present process.
+       *
+       * @arg communicator denotes the MPI
+       * communicator over which the
+       * different parts of the vector
+       * shall communicate
+       */
+      explicit Vector (const MPI_Comm     &communicator,
+                       const VectorBase   &v,
+                       const unsigned int  local_size);
+
+
+      /**
+       * Constructs a new parallel PETSc
+       * vector from an Indexset. Note that
+       * @p local must be contiguous and
+       * the global size of the vector is
+       * determined by local.size(). The
+       * global indices in @p ghost are
+       * supplied as ghost indices that can
+       * also be read locally.
+       *
+       * Note that the @p ghost IndexSet
+       * may be empty and that any indices
+       * already contained in @p local are
+       * ignored during construction. That
+       * way, the ghost parameter can equal
+       * the set of locally relevant
+       * degrees of freedom, see step-32.
+       */
+      explicit Vector (const MPI_Comm     &communicator,
+                       const IndexSet   &local,
+                       const IndexSet &ghost = IndexSet(0));
+
+
+      /**
+       * Copy the given vector. Resize the
+       * present vector if necessary. Also
+       * take over the MPI communicator of
+       * @p v.
+       */
+      Vector &operator = (const Vector &v);
+
+
+      /**
+       * Copy the given sequential
+       * (non-distributed) vector
+       * into the present parallel
+       * vector. It is assumed that
+       * they have the same size,
+       * and this operation does
+       * not change the
+       * partitioning of the
+       * parallel vector by which
+       * its elements are
+       * distributed across several
+       * MPI processes. What this
+       * operation therefore does
+       * is to copy that chunk of
+       * the given vector @p v that
+       * corresponds to elements of
+       * the target vector that are
+       * stored locally, and copies
+       * them. Elements that are
+       * not stored locally are not
+       * touched.
+       *
+       * This being a parallel
+       * vector, you must make sure
+       * that @em all processes
+       * call this function at the
+       * same time. It is not
+       * possible to change the
+       * local part of a parallel
+       * vector on only one
+       * process, independent of
+       * what other processes do,
+       * with this function.
+       */
+      Vector &operator = (const PETScWrappers::Vector &v);
+
+      /**
+       * Set all components of the vector to
+       * the given number @p s. Simply pass
+       * this down to the base class, but we
+       * still need to declare this function
+       * to make the example given in the
+       * discussion about making the
+       * constructor explicit work.
+       */
+      Vector &operator = (const PetscScalar s);
+
+      /**
+       * Copy the values of a deal.II vector
+       * (as opposed to those of the PETSc
+       * vector wrapper class) into this
+       * object.
+       *
+       * Contrary to the case of sequential
+       * vectors, this operators requires
+       * that the present vector already
+       * has the correct size, since we
+       * need to have a partition and a
+       * communicator present which we
+       * otherwise can't get from the
+       * source vector.
+       */
+      template <typename number>
+      Vector &operator = (const dealii::Vector<number> &v);
+
+      /**
+       * Change the dimension of the vector
+       * to @p N. It is unspecified how
+       * resizing the vector affects the
+       * memory allocation of this object;
+       * i.e., it is not guaranteed that
+       * resizing it to a smaller size
+       * actually also reduces memory
+       * consumption, or if for efficiency
+       * the same amount of memory is used
+       *
+       * @p local_size denotes how many
+       * of the @p N values shall be
+       * stored locally on the present
+       * process.
+       * for less data.
+       *
+       * @p communicator denotes the MPI
+       * communicator henceforth to be used
+       * for this vector.
+       *
+       * If @p fast is false, the vector
+       * is filled by zeros. Otherwise, the
+       * elements are left an unspecified
+       * state.
+       */
+      void reinit (const MPI_Comm     &communicator,
+                   const unsigned int  N,
+                   const unsigned int  local_size,
+                   const bool          fast = false);
+
+      /**
+       * Change the dimension to that of
+       * the vector @p v, and also take
+       * over the partitioning into local
+       * sizes as well as the MPI
+       * communicator. The same applies as
+       * for the other @p reinit function.
+       *
+       * The elements of @p v are not
+       * copied, i.e. this function is the
+       * same as calling
+       * <tt>reinit(v.size(),
+       * v.local_size(), fast)</tt>.
+       */
+      void reinit (const Vector &v,
+                   const bool    fast = false);
+
+      /**
+       * Reinit as a ghosted vector. See
+       * constructor with same signature
+       * for more details.
+       */
+      void reinit (const MPI_Comm     &communicator,
+                   const IndexSet   &local,
+                   const IndexSet &ghost = IndexSet(0));
+
+
+      /**
+       * Return a reference to the MPI
+       * communicator object in use with
+       * this vector.
+       */
+      const MPI_Comm &get_mpi_communicator () const;
+      void set_mpi_communicator (const MPI_Comm     &communicator);		// shuqiangwang
+
+    protected:
+      /**
+       * Create a vector of length
+       * @p n. For this class, we create a
+       * parallel vector. @p n denotes
+       * the total size of the vector to be
+       * created. @p local_size denotes
+       * how many of these elements shall
+       * be stored locally.
+       */
+      virtual void create_vector (const unsigned int n,
+                                  const unsigned int local_size);
+
+
+
+      /**
+       * Create a vector of global length
+       * @p n, local size @p local_size and
+       * with the specified ghost
+       * indices. Note that you need to
+       * call update_ghost_values() before
+       * accessing those.
+       */
+      virtual void create_vector (const unsigned int  n,
+                                  const unsigned int  local_size,
+                                  const IndexSet &ghostnodes);
+
+
+    private:
+      /**
+       * Copy of the communicator object to
+       * be used for this parallel vector.
+       */
+      MPI_Comm communicator;
+    };
+
+
+// ------------------ template and inline functions -------------
+
+
+    /**
+     * Global function @p swap which overloads the default implementation
+     * of the C++ standard library which uses a temporary object. The
+     * function simply exchanges the data of the two vectors.
+     *
+     * @relates PETScWrappers::MPI::Vector
+     * @author Wolfgang Bangerth, 2004
+     */
+    inline
+    void swap (Vector &u, Vector &v)
+    {
+      u.swap (v);
+    }
+
+
+#ifndef DOXYGEN
+
+    template <typename number>
+    Vector::Vector (const MPI_Comm         &communicator,
+                    const dealii::Vector<number> &v,
+                    const unsigned int      local_size)
+      :
+      communicator (communicator)
+    {
+      Vector::create_vector (v.size(), local_size);
+
+      *this = v;
+    }
+
+
+
+    inline
+    Vector &
+    Vector::operator = (const PetscScalar s)
+    {
+      VectorBase::operator = (s);
+
+      return *this;
+    }
+
+
+
+    inline
+    Vector &
+    Vector::operator = (const Vector &v)
+    {
+      // if the vectors have different sizes,
+      // then first resize the present one
+      if (size() != v.size())
+        reinit (v.communicator, v.size(), v.local_size(), true);
+
+      const int ierr = VecCopy (v.vector, vector);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      return *this;
+    }
+
+
+
+    template <typename number>
+    inline
+    Vector &
+    Vector::operator = (const dealii::Vector<number> &v)
+    {
+      Assert (size() == v.size(),
+              ExcDimensionMismatch (size(), v.size()));
+
+      // the following isn't necessarily fast,
+      // but this is due to the fact that PETSc
+      // doesn't offer an inlined access
+      // operator.
+      //
+      // if someone wants to contribute some
+      // code: to make this code faster, one
+      // could either first convert all values
+      // to PetscScalar, and then set them all
+      // at once using VecSetValues. This has
+      // the drawback that it could take quite
+      // some memory, if the vector is large,
+      // and it would in addition allocate
+      // memory on the heap, which is
+      // expensive. an alternative would be to
+      // split the vector into chunks of, say,
+      // 128 elements, convert a chunk at a
+      // time and set it in the output vector
+      // using VecSetValues. since 128 elements
+      // is small enough, this could easily be
+      // allocated on the stack (as a local
+      // variable) which would make the whole
+      // thing much more efficient.
+      //
+      // a second way to make things faster is
+      // for the special case that
+      // number==PetscScalar. we could then
+      // declare a specialization of this
+      // template, and omit the conversion. the
+      // problem with this is that the best we
+      // can do is to use VecSetValues, but
+      // this isn't very efficient either: it
+      // wants to see an array of indices,
+      // which in this case a) again takes up a
+      // whole lot of memory on the heap, and
+      // b) is totally dumb since its content
+      // would simply be the sequence
+      // 0,1,2,3,...,n. the best of all worlds
+      // would probably be a function in Petsc
+      // that would take a pointer to an array
+      // of PetscScalar values and simply copy
+      // n elements verbatim into the vector...
+      for (unsigned int i=0; i<v.size(); ++i)
+        (*this)(i) = v(i);
+
+      compress (::dealii::VectorOperation::insert);
+
+      return *this;
+    }
+
+
+
+    inline
+    const MPI_Comm &
+    Vector::get_mpi_communicator () const
+    {
+      return communicator;
+    }
+
+    inline
+    void Vector::set_mpi_communicator (const MPI_Comm     &communicator_)
+    {
+    	communicator = communicator_;
+    }
+
+#endif // DOXYGEN
+  }
+}
+
+/**@}*/
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_USE_PETSC
+
+/*----------------------------   petsc_parallel_vector.h     ---------------------------*/
+
+#endif
+/*----------------------------   petsc_parallel_vector.h     ---------------------------*/

Deleted: branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_solver.h
===================================================================
--- branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_solver.h	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/for_deal.II/include/deal.II/lac/petsc_solver.h	2012-11-30 22:02:48 UTC (rev 1402)
@@ -1,1272 +0,0 @@
-//---------------------------------------------------------------------------
-//    $Id: petsc_solver.h 27666 2012-11-21 22:05:49Z bangerth $
-//
-//    Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010, 2012 by the deal.II authors
-//
-//    This file is subject to QPL and may not be  distributed
-//    without copyright and license information. Please refer
-//    to the file deal.II/doc/license.html for the  text  and
-//    further information on this license.
-//
-//---------------------------------------------------------------------------
-#ifndef __deal2__petsc_solver_h
-#define __deal2__petsc_solver_h
-
-
-#include <deal.II/base/config.h>
-
-#ifdef DEAL_II_USE_PETSC
-
-#  include <deal.II/lac/exceptions.h>
-#  include <deal.II/lac/solver_control.h>
-#  include <deal.II/base/std_cxx1x/shared_ptr.h>
-
-#  include <petscksp.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-namespace PETScWrappers
-{
-  // forward declarations
-  class MatrixBase;
-  class VectorBase;
-  class PreconditionerBase;
-
-
-  /**
-   * Base class for solver classes using the PETSc solvers. Since solvers in
-   * PETSc are selected based on flags passed to a generic solver object,
-   * basically all the actual solver calls happen in this class, and derived
-   * classes simply set the right flags to select one solver or another, or to
-   * set certain parameters for individual solvers.
-   *
-   * Optionally, the user can create a solver derived from the
-   * SolverBase class and can set the default arguments necessary to
-   * solve the linear system of equations with SolverControl. These
-   * default options can be overridden by specifying command line
-   * arguments of the form @p -ksp_*. For example,
-   * @p -ksp_monitor_true_residual prints out true residual norm
-   * (unpreconditioned) at each iteration and @p -ksp_view provides
-   * information about the linear solver and the preconditioner used in
-   * the current context. The type of the solver can also be changed
-   * during runtime by specifying @p -ksp_type {richardson, cg, gmres,
-   * fgmres, ..} to dynamically test the optimal solver along with a
-   * suitable preconditioner set using @p -pc_type {jacobi, bjacobi,
-   * ilu, lu, ..}. There are several other command line options
-   * available to modify the behavior of the PETSc linear solver and can
-   * be obtained from the <a
-   * href="http://www.mcs.anl.gov/petsc">documentation and manual
-   * pages</a>.
-   *
-   * @note Repeated calls to solve() on a solver object with a Preconditioner
-   * must be used with care. The preconditioner is initialized in the first call
-   * to solve() and subsequent calls reuse the solver and preconditioner
-   * object. This is done for performance reasons. The solver and preconditioner
-   * can be reset by calling reset().
-   *
-   * One of the gotchas of PETSc is that -- in particular in MPI mode -- it
-   * often does not produce very helpful error messages. In order to save
-   * other users some time in searching a hard to track down error, here is
-   * one situation and the error message one gets there:
-   * when you don't specify an MPI communicator to your solver's constructor. In
-   * this case, you will get an error of the following form from each of your
-   * parallel processes:
-   * @verbatim
-   *   [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c
-   *   [1]PETSC ERROR:   Arguments must have same communicators!
-   *   [1]PETSC ERROR:   Different communicators in the two objects: Argument # 1 and 2!
-   *   [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c
-   * @endverbatim
-   *
-   * This error, on which one can spend a very long time figuring out
-   * what exactly goes wrong, results from not specifying an MPI
-   * communicator. Note that the communicator @em must match that of the
-   * matrix and all vectors in the linear system which we want to
-   * solve. Aggravating the situation is the fact that the default
-   * argument to the solver classes, @p PETSC_COMM_SELF, is the
-   * appropriate argument for the sequential case (which is why it is
-   * the default argument), so this error only shows up in parallel
-   * mode.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverBase
-  {
-  public:
-    /**
-     * Constructor. Takes the solver
-     * control object and the MPI
-     * communicator over which parallel
-     * computations are to happen.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverBase (SolverControl &cn,
-                const MPI_Comm &mpi_communicator);
-
-    /**
-     * Destructor.
-     */
-    virtual ~SolverBase ();
-
-    /**
-     * Solve the linear system
-     * <tt>Ax=b</tt>. Depending on the
-     * information provided by derived
-     * classes and the object passed as a
-     * preconditioner, one of the linear
-     * solvers and preconditioners of PETSc
-     * is chosen.  Repeated calls to
-     * solve() do not reconstruct the
-     * preconditioner for performance
-     * reasons. See class Documentation.
-     */
-    void
-    solve (const MatrixBase         &A,
-           VectorBase               &x,
-           const VectorBase         &b,
-           const PreconditionerBase &preconditioner);
-
-
-    /**
-     * Resets the contained preconditioner
-     * and solver object. See class
-     * description for more details.
-     */
-    virtual void reset();
-
-
-    /**
-      * Sets a prefix name for the solver
-      * object. Useful when customizing the
-      * PETSc KSP object with command-line
-      * options.
-      */
-    void set_prefix(const std::string &prefix);
-
-
-    /**
-     * Access to object that controls
-     * convergence.
-     */
-    SolverControl &control() const;
-
-    /**
-     * Exception
-     */
-    DeclException1 (ExcPETScError,
-                    int,
-                    << "An error with error number " << arg1
-                    << " occurred while calling a PETSc function");
-
-  protected:
-
-    /**
-     * Reference to the object that
-     * controls convergence of the
-     * iterative solver. In fact, for these
-     * PETSc wrappers, PETSc does so
-     * itself, but we copy the data from
-     * this object before starting the
-     * solution process, and copy the data
-     * back into it afterwards.
-     */
-    SolverControl &solver_control;
-
-    /**
-     * Copy of the MPI communicator object
-     * to be used for the solver.
-     */
-    const MPI_Comm mpi_communicator;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     * requested by the derived class.
-     */
-    virtual void set_solver_type (KSP &ksp) const = 0;
-
-    /**
-     * Solver prefix name to qualify options
-     * specific to the PETSc KSP object in the
-     * current context.
-     * Note: A hyphen (-) must NOT be given
-     * at the beginning of the prefix name.
-     * The first character of all runtime
-     * options is AUTOMATICALLY the hyphen.
-     */
-    std::string prefix_name;
-
-  private:
-    /**
-     * A function that is used in PETSc as
-     * a callback to check on
-     * convergence. It takes the
-     * information provided from PETSc and
-     * checks it against deal.II's own
-     * SolverControl objects to see if
-     * convergence has been reached.
-     */
-    static
-#ifdef PETSC_USE_64BIT_INDICES
-    PetscErrorCode
-#else
-    int
-#endif
-    convergence_test (KSP                 ksp,
-#ifdef PETSC_USE_64BIT_INDICES
-                      const PetscInt      iteration,
-#else
-                      const int           iteration,
-#endif
-                      const PetscReal     residual_norm,
-                      KSPConvergedReason *reason,
-                      void               *solver_control);
-
-    /**
-     * A structure that contains the PETSc
-     * solver and preconditioner
-     * objects. This object is preserved
-     * between subsequent calls to the
-     * solver if the same preconditioner is
-     * used as in the previous solver
-     * step. This may save some computation
-     * time, if setting up a preconditioner
-     * is expensive, such as in the case of
-     * an ILU for example.
-     *
-     * The actual declaration of this class
-     * is complicated by the fact that
-     * PETSc changed its solver interface
-     * completely and incompatibly between
-     * versions 2.1.6 and 2.2.0 :-(
-     *
-     * Objects of this type are explicitly
-     * created, but are destroyed when the
-     * surrounding solver object goes out
-     * of scope, or when we assign a new
-     * value to the pointer to this
-     * object. The respective *Destroy
-     * functions are therefore written into
-     * the destructor of this object, even
-     * though the object does not have a
-     * constructor.
-     */
-    struct SolverData
-    {
-      /**
-       * Destructor
-       */
-      ~SolverData ();
-
-      /**
-       * Objects for Krylov subspace
-       * solvers and preconditioners.
-       */
-      KSP  ksp;
-      PC   pc;
-    };
-
-    /**
-     * Pointer to an object that stores the
-     * solver context. This is recreated in
-     * the main solver routine if
-     * necessary.
-     */
-    std_cxx1x::shared_ptr<SolverData> solver_data;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc Richardson
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverRichardson : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {
-      /**
-       * Constructor. By default,
-       * set the damping parameter
-       * to one.
-       */
-      AdditionalData (const double omega = 1);
-
-      /**
-       * Relaxation parameter.
-       */
-      double omega;
-    };
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverRichardson (SolverControl        &cn,
-                      const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                      const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc Chebychev
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverChebychev : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverChebychev (SolverControl        &cn,
-                     const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                     const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc CG
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverCG : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverCG (SolverControl        &cn,
-              const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-              const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc BiCG
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverBiCG : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverBiCG (SolverControl        &cn,
-                const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc GMRES
-   * solver.
-   *
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverGMRES : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {
-      /**
-       * Constructor. By default, set the
-       * number of temporary vectors to
-       * 30, i.e. do a restart every 30
-       * iterations.
-       */
-      AdditionalData (const unsigned int restart_parameter = 30,
-                      const bool right_preconditioning = false);
-
-      /**
-       * Maximum number of
-       * tmp vectors.
-       */
-      unsigned int restart_parameter;
-
-      /**
-       * Flag for right
-       * preconditioning.
-       */
-      bool right_preconditioning;
-    };
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverGMRES (SolverControl        &cn,
-                 const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                 const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc BiCGStab
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverBicgstab : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverBicgstab (SolverControl        &cn,
-                    const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                    const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-  /**
-   * An implementation of the solver interface using the PETSc CG Squared
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverCGS : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverCGS (SolverControl        &cn,
-               const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-               const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc TFQMR
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverTFQMR : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverTFQMR (SolverControl        &cn,
-                 const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                 const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc TFQMR-2 solver
-   * (called TCQMR in PETSc). Note that this solver had a serious bug in
-   * versions up to and including PETSc 2.1.6, in that it did not check
-   * convergence and always returned an error code. Thus, this class will abort
-   * with an error indicating failure to converge with PETSc 2.1.6 and
-   * prior. This should be fixed in later versions of PETSc, though.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverTCQMR : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverTCQMR (SolverControl        &cn,
-                 const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                 const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc CR
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverCR : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverCR (SolverControl        &cn,
-              const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-              const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-
-  /**
-   * An implementation of the solver interface using the PETSc Least Squares
-   * solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  class SolverLSQR : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverLSQR (SolverControl        &cn,
-                const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     *appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-
-  /**
-   * An implementation of the solver interface using the PETSc PREONLY
-   * solver. Actually this is NOT a real solution algorithm. solve() only
-   * applies the preconditioner once and returns immediately. Its only purpose
-   * is to provide a solver object, when the preconditioner should be used as a
-   * real solver. It is very useful in conjunction with the complete LU
-   * decomposition preconditioner <tt> PreconditionLU </tt>, which in
-   * conjunction with this solver class becomes a direct solver.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004, Oliver Kayser-Herold, 2004
-   */
-  class SolverPreOnly : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data struct to
-     * pipe additional data to the
-     * solver.
-     */
-    struct AdditionalData
-    {};
-
-    /**
-     * Constructor. In contrast to
-     * deal.II's own solvers, there is no
-     * need to give a vector memory
-     * object. However, PETSc solvers want
-     * to have an MPI communicator context
-     * over which computations are
-     * parallelized. By default,
-     * @p PETSC_COMM_SELF is used here,
-     * but you can change this. Note that
-     * for single processor (non-MPI)
-     * versions, this parameter does not
-     * have any effect.
-     *
-     * The last argument takes a structure
-     * with additional, solver dependent
-     * flags for tuning.
-     *
-     * Note that the communicator used here
-     * must match the communicator used in
-     * the system matrix, solution, and
-     * right hand side object of the solve
-     * to be done with this
-     * solver. Otherwise, PETSc will
-     * generate hard to track down errors,
-     * see the documentation of the
-     * SolverBase class.
-     */
-    SolverPreOnly (SolverControl        &cn,
-                   const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                   const AdditionalData &data = AdditionalData());
-
-  protected:
-    /**
-     * Store a copy of the flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    /**
-     * Function that takes a Krylov
-     * Subspace Solver context object, and
-     * sets the type of solver that is
-     * appropriate for this class.
-     */
-    virtual void set_solver_type (KSP &ksp) const;
-  };
-
-  /**
-   * An implementation of the solver interface using the sparse direct MUMPS
-   * solver through PETSc. This class has the usual interface of all other
-   * solver classes but it is of course different in that it doesn't implement
-   * an iterative solver. As a consequence, things like the SolverControl object
-   * have no particular meaning here.
-   *
-   * MUMPS allows to make use of symmetry in this matrix. In this class this is
-   * made possible by the set_symmetric_mode() function. If your matrix is
-   * symmetric, you can use this class as follows:
-   * @code
-   *    SolverControl cn;
-   *    PETScWrappers::SparseDirectMUMPS solver(cn, mpi_communicator);
-   *    solver.set_symmetric_mode(true);
-   *    solver.solve(system_matrix, solution, system_rhs);
-   * @endcode
-   *
-   * @note The class internally calls KSPSetFromOptions thus you are
-   * able to use all the PETSc parameters for MATSOLVERMUMPS package.
-   * See http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MATSOLVERMUMPS.html
-   *
-   * @ingroup PETScWrappers
-   * @author Daniel Brauss, Alexander Grayver, 2012
-   */
-  class SparseDirectMUMPS : public SolverBase
-  {
-  public:
-    /**
-     * Standardized data structure
-     * to pipe additional data to
-     * the solver.
-     */
-    struct AdditionalData
-    {};
-    /**
-     * Constructor
-     */
-    SparseDirectMUMPS (SolverControl        &cn,
-                       const MPI_Comm       &mpi_communicator = PETSC_COMM_SELF,
-                       const AdditionalData &data = AdditionalData());
-
-    /**
-     * The method to solve the
-     * linear system.
-     */
-    void solve (const MatrixBase &A,
-                VectorBase       &x,
-                const VectorBase &b);
-
-    /**
-    * The method allows to take advantage
-    * if the system matrix is symmetric by
-    * using LDL^T decomposition unstead of
-    * more expensive LU. The argument
-    * indicates whether the matrix is
-    * symmetric or not.
-    */
-    void set_symmetric_mode (const bool flag);
-
-  protected:
-    /**
-     * Store a copy of flags for this
-     * particular solver.
-     */
-    const AdditionalData additional_data;
-
-    virtual void set_solver_type (KSP &ksp) const;
-
-  private:
-    /**
-     * A function that is used in PETSc
-     * as a callback to check convergence.
-     * It takes the information provided
-     * from PETSc and checks it against
-     * deal.II's own SolverControl objects
-     * to see if convergence has been reached.
-     */
-    static
-#ifdef PETSC_USE_64BIT_INDICES
-    PetscErrorCode
-#else
-    int
-#endif
-    convergence_test (KSP                ksp,
-#ifdef PETSC_USE_64BIT_INDICES
-                      const PetscInt     iteration,
-#else
-                      const int          iteration,
-#endif
-                      const PetscReal    residual_norm,
-                      KSPConvergedReason *reason,
-                      void               *solver_control);
-    /**
-     * A structure that contains the
-     * PETSc solver and preconditioner
-     * objects.  Since the solve member
-     * function in the base is not used
-     * here, the private SolverData struct
-     * located in the base could not be used
-     * either
-     */
-    struct SolverDataMUMPS
-    {
-      KSP ksp;
-      PC  pc;
-    };
-
-    std_cxx1x::shared_ptr<SolverDataMUMPS> solver_data;
-
-    /**
-     * Flag specifies whether matrix
-     * being factorized is symmetric
-     * or not. It influences the type
-     * of the used preconditioner
-     * (PCLU or PCCHOLESKY)
-     */
-    bool symmetric_mode;
-  };
-}
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif // DEAL_II_USE_PETSC
-
-/*----------------------------   petsc_solver.h     ---------------------------*/
-
-#endif
-/*----------------------------   petsc_solver.h     ---------------------------*/

Added: branches/s-wang2/for_deal.II/source/lac/petsc_parallel_sparse_matrix.cc
===================================================================
--- branches/s-wang2/for_deal.II/source/lac/petsc_parallel_sparse_matrix.cc	                        (rev 0)
+++ branches/s-wang2/for_deal.II/source/lac/petsc_parallel_sparse_matrix.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -0,0 +1,708 @@
+//---------------------------------------------------------------------------
+//    $Id: petsc_parallel_sparse_matrix.cc 27628 2012-11-20 22:49:26Z heister $
+//    Version: $Name$
+//
+//    Copyright (C) 2004, 2005, 2006, 2008, 2009, 2010, 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+
+#ifdef DEAL_II_USE_PETSC
+
+#  include <deal.II/lac/petsc_vector.h>
+#  include <deal.II/lac/sparsity_pattern.h>
+#  include <deal.II/lac/compressed_sparsity_pattern.h>
+#  include <deal.II/lac/compressed_simple_sparsity_pattern.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace PETScWrappers
+{
+  namespace MPI
+  {
+
+    SparseMatrix::SparseMatrix ()
+    {
+      // just like for vectors: since we
+      // create an empty matrix, we can as
+      // well make it sequential
+      const int m=0, n=0, n_nonzero_per_row=0;
+      const int ierr
+        = MatCreateSeqAIJ(PETSC_COMM_SELF, m, n, n_nonzero_per_row,						// shuqiangwang
+                          0, &matrix);
+//      const int n = 0, d_nz = 0, o_nz = 0;
+//      const int ierr = MatCreateAIJ(MPI_COMM_WORLD,n,n,PETSC_DECIDE,PETSC_DECIDE,		// shuqiangwang
+//    		  d_nz,PETSC_NULL,o_nz,PETSC_NULL,&matrix);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+    }
+
+
+
+    SparseMatrix::SparseMatrix (const MPI_Comm    &communicator,
+                                const unsigned int m,
+                                const unsigned int n,
+                                const unsigned int local_rows,
+                                const unsigned int local_columns,
+                                const unsigned int n_nonzero_per_row,
+                                const bool         is_symmetric)
+      :
+      communicator (communicator)
+    {
+      do_reinit (m, n, local_rows, local_columns,
+                 n_nonzero_per_row, is_symmetric);
+    }
+
+
+
+    SparseMatrix::SparseMatrix (const MPI_Comm    &communicator,
+                                const unsigned int m,
+                                const unsigned int n,
+                                const unsigned int local_rows,
+                                const unsigned int local_columns,
+                                const std::vector<unsigned int> &row_lengths,
+                                const bool         is_symmetric)
+      :
+      communicator (communicator)
+    {
+      do_reinit (m, n, local_rows, local_columns,
+                 row_lengths, is_symmetric);
+    }
+
+
+
+    template <typename SparsityType>
+    SparseMatrix::
+    SparseMatrix (const MPI_Comm                  &communicator,
+                  const SparsityType              &sparsity_pattern,
+                  const std::vector<unsigned int> &local_rows_per_process,
+                  const std::vector<unsigned int> &local_columns_per_process,
+                  const unsigned int               this_process,
+                  const bool                       preset_nonzero_locations)
+      :
+      communicator (communicator)
+    {
+      do_reinit (sparsity_pattern, local_rows_per_process,
+                 local_columns_per_process, this_process,
+                 preset_nonzero_locations);
+    }
+
+
+
+    SparseMatrix &
+    SparseMatrix::operator = (const value_type d)
+    {
+      MatrixBase::operator = (d);
+      return *this;
+    }
+
+
+
+    void
+    SparseMatrix::reinit (const MPI_Comm    &communicator,
+                          const unsigned int m,
+                          const unsigned int n,
+                          const unsigned int local_rows,
+                          const unsigned int local_columns,
+                          const unsigned int n_nonzero_per_row,
+                          const bool         is_symmetric)
+    {
+      this->communicator = communicator;
+
+      // get rid of old matrix and generate a
+      // new one
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+      const int ierr = MatDestroy (matrix);
+#else
+      const int ierr = MatDestroy (&matrix);
+#endif
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      do_reinit (m, n, local_rows, local_columns,
+                 n_nonzero_per_row, is_symmetric);
+    }
+
+
+
+    void
+    SparseMatrix::reinit (const MPI_Comm    &communicator,
+                          const unsigned int m,
+                          const unsigned int n,
+                          const unsigned int local_rows,
+                          const unsigned int local_columns,
+                          const std::vector<unsigned int> &row_lengths,
+                          const bool         is_symmetric)
+    {
+      this->communicator = communicator;
+
+      // get rid of old matrix and generate a
+      // new one
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+      const int ierr = MatDestroy (matrix);
+#else
+      const int ierr = MatDestroy (&matrix);
+#endif
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      do_reinit (m, n, local_rows, local_columns, row_lengths, is_symmetric);
+    }
+
+
+
+    template <typename SparsityType>
+    void
+    SparseMatrix::
+    reinit (const MPI_Comm                  &communicator,
+            const SparsityType              &sparsity_pattern,
+            const std::vector<unsigned int> &local_rows_per_process,
+            const std::vector<unsigned int> &local_columns_per_process,
+            const unsigned int               this_process,
+            const bool                       preset_nonzero_locations)
+    {
+      this->communicator = communicator;
+
+      // get rid of old matrix and generate a
+      // new one
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+      const int ierr = MatDestroy (matrix);
+#else
+      const int ierr = MatDestroy (&matrix);
+#endif
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      do_reinit (sparsity_pattern, local_rows_per_process,
+                 local_columns_per_process, this_process,
+                 preset_nonzero_locations);
+    }
+
+
+
+    void
+    SparseMatrix::do_reinit (const unsigned int m,
+                             const unsigned int n,
+                             const unsigned int local_rows,
+                             const unsigned int local_columns,
+                             const unsigned int n_nonzero_per_row,
+                             const bool         is_symmetric)
+    {
+      Assert (local_rows <= m, ExcLocalRowsTooLarge (local_rows, m));
+
+      // use the call sequence indicating only
+      // a maximal number of elements per row
+      // for all rows globally
+      int ierr;
+
+#if DEAL_II_PETSC_VERSION_LT(3,3,0)
+      ierr
+        = MatCreateMPIAIJ (communicator,
+                           local_rows, local_columns,
+                           m, n,
+                           n_nonzero_per_row, 0, 0, 0,
+                           &matrix);
+#else
+      ierr
+        = MatCreateAIJ (communicator,
+                        local_rows, local_columns,
+                        m, n,
+                        n_nonzero_per_row, 0, 0, 0,
+                        &matrix);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      ierr = MatSetOption (matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
+#endif
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      // set symmetric flag, if so requested
+      if (is_symmetric == true)
+        {
+#if DEAL_II_PETSC_VERSION_LT(3,0,0)
+          const int ierr
+            = MatSetOption (matrix, MAT_SYMMETRIC);
+#else
+          const int ierr
+            = MatSetOption (matrix, MAT_SYMMETRIC, PETSC_TRUE);
+#endif
+
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+        }
+    }
+
+
+
+    void
+    SparseMatrix::do_reinit (const unsigned int m,
+                             const unsigned int n,
+                             const unsigned int local_rows,
+                             const unsigned int local_columns,
+                             const std::vector<unsigned int> &row_lengths,
+                             const bool         is_symmetric)
+    {
+      Assert (local_rows <= m, ExcLocalRowsTooLarge (local_rows, m));
+
+      Assert (row_lengths.size() == m,
+              ExcDimensionMismatch (row_lengths.size(), m));
+
+      // For the case that
+      // local_columns is smaller
+      // than one of the row lengths
+      // MatCreateMPIAIJ throws an
+      // error. In this case use a
+      // PETScWrappers::SparseMatrix
+      for (unsigned int i=0; i<row_lengths.size(); ++i)
+        Assert(row_lengths[i]<=local_columns,
+               ExcIndexRange(row_lengths[i], 1, local_columns+1));
+
+      // use the call sequence indicating a
+      // maximal number of elements for each
+      // row individually. annoyingly, we
+      // always use unsigned ints for cases
+      // like this, while PETSc wants to see
+      // signed integers. so we have to
+      // convert, unless we want to play dirty
+      // tricks with conversions of pointers
+#ifdef PETSC_USE_64BIT_INDICES
+      const std::vector<PetscInt> int_row_lengths (row_lengths.begin(),
+                                                   row_lengths.end());
+#else
+      const std::vector<signed int> int_row_lengths (row_lengths.begin(),
+                                                     row_lengths.end());
+#endif
+
+//TODO: There must be a significantly better way to provide information about the off-diagonal blocks of the matrix. this way, petsc keeps allocating tiny chunks of memory, and gets completely hung up over this
+
+      int ierr;
+
+#if DEAL_II_PETSC_VERSION_LT(3,3,0)
+      ierr
+        = MatCreateMPIAIJ (communicator,
+                           local_rows, local_columns,
+                           m, n,
+                           0, &int_row_lengths[0], 0, 0,
+                           &matrix);
+#else
+      ierr
+        = MatCreateAIJ (communicator,
+                        local_rows, local_columns,
+                        m, n,
+                        0, &int_row_lengths[0], 0, 0,
+                        &matrix);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+//TODO: Sometimes the actual number of nonzero entries allocated is greater than the number of nonzero entries, which petsc will complain about unless explicitly disabled with MatSetOption. There is probably a way to prevent a different number nonzero elements being allocated in the first place. (See also previous TODO).
+
+      ierr = MatSetOption (matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
+#endif
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      // set symmetric flag, if so requested
+      if (is_symmetric == true)
+        {
+#if DEAL_II_PETSC_VERSION_LT(3,0,0)
+          const int ierr
+            = MatSetOption (matrix, MAT_SYMMETRIC);
+#else
+          const int ierr
+            = MatSetOption (matrix, MAT_SYMMETRIC, PETSC_TRUE);
+#endif
+
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+        }
+    }
+
+
+
+    template <typename SparsityType>
+    void
+    SparseMatrix::
+    do_reinit (const SparsityType              &sparsity_pattern,
+               const std::vector<unsigned int> &local_rows_per_process,
+               const std::vector<unsigned int> &local_columns_per_process,
+               const unsigned int               this_process,
+               const bool                       preset_nonzero_locations)
+    {
+      Assert (local_rows_per_process.size() == local_columns_per_process.size(),
+              ExcDimensionMismatch (local_rows_per_process.size(),
+                                    local_columns_per_process.size()));
+      Assert (this_process < local_rows_per_process.size(),
+              ExcInternalError());
+      // for each row that we own locally, we
+      // have to count how many of the
+      // entries in the sparsity pattern lie
+      // in the column area we have locally,
+      // and how many arent. for this, we
+      // first have to know which areas are
+      // ours
+      unsigned int local_row_start = 0;
+      unsigned int local_col_start = 0;
+      for (unsigned int p=0; p<this_process; ++p)
+        {
+          local_row_start += local_rows_per_process[p];
+          local_col_start += local_columns_per_process[p];
+        }
+      const unsigned int
+      local_row_end = local_row_start + local_rows_per_process[this_process];
+
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+      //old version to create the matrix, we
+      //can skip calculating the row length
+      //at least starting from 2.3.3 (tested,
+      //see below)
+
+      const unsigned int
+      local_col_end = local_col_start + local_columns_per_process[this_process];
+
+      // then count the elements in- and
+      // out-of-window for the rows we own
+#ifdef PETSC_USE_64BIT_INDICES
+      std::vector<PetscInt>
+#else
+      std::vector<int>
+#endif
+      row_lengths_in_window (local_row_end - local_row_start),
+                            row_lengths_out_of_window (local_row_end - local_row_start);
+      for (unsigned int row = local_row_start; row<local_row_end; ++row)
+        for (unsigned int c=0; c<sparsity_pattern.row_length(row); ++c)
+          {
+            const unsigned int column = sparsity_pattern.column_number(row,c);
+
+            if ((column >= local_col_start) &&
+                (column < local_col_end))
+              ++row_lengths_in_window[row-local_row_start];
+            else
+              ++row_lengths_out_of_window[row-local_row_start];
+          }
+
+
+      // create the matrix. completely
+      // confusingly, PETSc wants us to pass
+      // arrays for the local number of
+      // elements that starts with zero for
+      // the first _local_ row, i.e. it
+      // doesn't index into an array for
+      // _all_ rows.
+      const int ierr
+        = MatCreateMPIAIJ(communicator,
+                          local_rows_per_process[this_process],
+                          local_columns_per_process[this_process],
+                          sparsity_pattern.n_rows(),
+                          sparsity_pattern.n_cols(),
+                          0, &row_lengths_in_window[0],
+                          0, &row_lengths_out_of_window[0],
+                          &matrix);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+#else //PETSC_VERSION>=2.3.3
+      // new version to create the matrix. We
+      // do not set row length but set the
+      // correct SparsityPattern later.
+      int ierr;
+
+      ierr = MatCreate(communicator,&matrix);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      ierr = MatSetSizes(matrix,
+                         local_rows_per_process[this_process],
+                         local_columns_per_process[this_process],
+                         sparsity_pattern.n_rows(),
+                         sparsity_pattern.n_cols());
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      ierr = MatSetType(matrix,MATMPIAIJ);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+
+
+      // next preset the exact given matrix
+      // entries with zeros, if the user
+      // requested so. this doesn't avoid any
+      // memory allocations, but it at least
+      // avoids some searches later on. the
+      // key here is that we can use the
+      // matrix set routines that set an
+      // entire row at once, not a single
+      // entry at a time
+      //
+      // for the usefulness of this option
+      // read the documentation of this
+      // class.
+      if (preset_nonzero_locations == true)
+        {
+          // starting with petsc 2.2.1, there is
+          // a function
+          // MatMPIAIJSetPreallocationCSR that
+          // can be used to allocate the sparsity
+          // pattern of a matrix if it is already
+          // available. if we don't have this, we
+          // have to somehow clumsily work around
+          // the whole thing:
+#if DEAL_II_PETSC_VERSION_LT(2,2,1)
+
+#ifdef PETSC_USE_64BIT_INDICES
+          std::vector<PetscInt>
+#else
+          std::vector<int>
+#endif
+          row_entries;
+          std::vector<PetscScalar> row_values;
+          for (unsigned int i=0; i<sparsity_pattern.n_rows(); ++i)
+            {
+              row_entries.resize (sparsity_pattern.row_length(i));
+              row_values.resize (sparsity_pattern.row_length(i), 0.0);
+              for (unsigned int j=0; j<sparsity_pattern.row_length(i); ++j)
+                row_entries[j] = sparsity_pattern.column_number (i,j);
+
+              const int int_row = i;
+              MatSetValues (matrix, 1, &int_row,
+                            sparsity_pattern.row_length(i), &row_entries[0],
+                            &row_values[0], INSERT_VALUES);
+            }
+
+          compress ();
+
+#else
+
+          // first set up the column number
+          // array for the rows to be stored
+          // on the local processor. have one
+          // dummy entry at the end to make
+          // sure petsc doesn't read past the
+          // end
+#ifdef PETSC_USE_64BIT_INDICES
+          std::vector<PetscInt>
+#else
+          std::vector<int>
+#endif
+          rowstart_in_window (local_row_end - local_row_start + 1, 0),
+                             colnums_in_window;
+          {
+            unsigned int n_cols = 0;
+            for (unsigned int i=local_row_start; i<local_row_end; ++i)
+              {
+                const unsigned int row_length = sparsity_pattern.row_length(i);
+                rowstart_in_window[i+1-local_row_start]
+                  = rowstart_in_window[i-local_row_start] + row_length;
+                n_cols += row_length;
+              }
+            colnums_in_window.resize (n_cols+1, -1);
+          }
+
+          // now copy over the information
+          // from the sparsity pattern.
+          {
+#ifdef PETSC_USE_64BIT_INDICES
+            PetscInt
+#else
+            int
+#endif
+            * ptr = & colnums_in_window[0];
+
+            for (unsigned int i=local_row_start; i<local_row_end; ++i)
+              {
+                typename SparsityType::row_iterator
+                row_start = sparsity_pattern.row_begin(i),
+                row_end = sparsity_pattern.row_end(i);
+
+                std::copy(row_start, row_end, ptr);
+                ptr += row_end - row_start;
+              }
+          }
+
+
+          // then call the petsc function
+          // that summarily allocates these
+          // entries:
+          MatMPIAIJSetPreallocationCSR (matrix,
+                                        &rowstart_in_window[0],
+                                        &colnums_in_window[0],
+                                        0);
+
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+          // this is only needed for old
+          // PETSc versions:
+
+          // for some reason, it does not
+          // seem to be possible to force
+          // actual allocation of actual
+          // entries by using the last
+          // arguments to the call above. if
+          // we don't initialize the entries
+          // like in the following loop, then
+          // the program is unbearably slow
+          // because elements are allocated
+          // and accessed in random order,
+          // which is not what PETSc likes
+          //
+          // note that we actually have to
+          // set the entries to something
+          // non-zero! do the allocation one
+          // row at a time
+          {
+            const std::vector<PetscScalar>
+            values (sparsity_pattern.max_entries_per_row(),
+                    1.);
+
+            for (unsigned int i=local_row_start; i<local_row_end; ++i)
+              {
+#ifdef PETSC_USE_64BIT_INDICES
+                PetscInt
+#else
+                int
+#endif
+                petsc_i = i;
+                MatSetValues (matrix, 1, &petsc_i,
+                              sparsity_pattern.row_length(i),
+                              &colnums_in_window[rowstart_in_window[i-local_row_start]],
+                              &values[0], INSERT_VALUES);
+              }
+          }
+
+          compress ();
+
+          // set the dummy entries set above
+          // back to zero
+          *this = 0;
+#endif // version <=2.3.3
+          compress ();
+
+#endif
+
+          // Tell PETSc that we are not
+          // planning on adding new entries
+          // to the matrix. Generate errors
+          // in debug mode.
+          int ierr;
+#if DEAL_II_PETSC_VERSION_LT(3,0,0)
+#ifdef DEBUG
+          ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+          ierr = MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+#else
+#ifdef DEBUG
+          ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+          ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+#endif
+
+          // Tell PETSc to keep the
+          // SparsityPattern entries even if
+          // we delete a row with
+          // clear_rows() which calls
+          // MatZeroRows(). Otherwise one can
+          // not write into that row
+          // afterwards.
+#if DEAL_II_PETSC_VERSION_LT(3,0,0)
+          ierr = MatSetOption (matrix, MAT_KEEP_ZEROED_ROWS);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#elif DEAL_II_PETSC_VERSION_LT(3,1,0)
+          ierr = MatSetOption (matrix, MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+          ierr = MatSetOption (matrix, MAT_KEEP_NONZERO_PATTERN, PETSC_TRUE);
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+
+        }
+    }
+
+    // explicit instantiations
+    //
+    template
+    SparseMatrix::SparseMatrix (const MPI_Comm &,
+                                const SparsityPattern &,
+                                const std::vector<unsigned int> &,
+                                const std::vector<unsigned int> &,
+                                const unsigned int,
+                                const bool);
+    template
+    SparseMatrix::SparseMatrix (const MPI_Comm &,
+                                const CompressedSparsityPattern &,
+                                const std::vector<unsigned int> &,
+                                const std::vector<unsigned int> &,
+                                const unsigned int,
+                                const bool);
+    template
+    SparseMatrix::SparseMatrix (const MPI_Comm &,
+                                const CompressedSimpleSparsityPattern &,
+                                const std::vector<unsigned int> &,
+                                const std::vector<unsigned int> &,
+                                const unsigned int,
+                                const bool);
+
+    template void
+    SparseMatrix::reinit (const MPI_Comm &,
+                          const SparsityPattern &,
+                          const std::vector<unsigned int> &,
+                          const std::vector<unsigned int> &,
+                          const unsigned int,
+                          const bool);
+    template void
+    SparseMatrix::reinit (const MPI_Comm &,
+                          const CompressedSparsityPattern &,
+                          const std::vector<unsigned int> &,
+                          const std::vector<unsigned int> &,
+                          const unsigned int,
+                          const bool);
+    template void
+    SparseMatrix::reinit (const MPI_Comm &,
+                          const CompressedSimpleSparsityPattern &,
+                          const std::vector<unsigned int> &,
+                          const std::vector<unsigned int> &,
+                          const unsigned int,
+                          const bool);
+
+    template void
+    SparseMatrix::do_reinit (const SparsityPattern &,
+                             const std::vector<unsigned int> &,
+                             const std::vector<unsigned int> &,
+                             const unsigned int ,
+                             const bool);
+    template void
+    SparseMatrix::do_reinit (const CompressedSparsityPattern &,
+                             const std::vector<unsigned int> &,
+                             const std::vector<unsigned int> &,
+                             const unsigned int ,
+                             const bool);
+    template void
+    SparseMatrix::do_reinit (const CompressedSimpleSparsityPattern &,
+                             const std::vector<unsigned int> &,
+                             const std::vector<unsigned int> &,
+                             const unsigned int ,
+                             const bool);
+
+
+    PetscScalar
+    SparseMatrix::matrix_norm_square (const Vector &v) const
+    {
+      Vector tmp (v);
+      vmult (tmp, v);
+      return tmp*v;
+    }
+
+    PetscScalar
+    SparseMatrix::matrix_scalar_product (const Vector &u,
+                                         const Vector &v) const
+    {
+      Vector tmp (v);
+      vmult (tmp, v);
+      return u*tmp;
+    }
+
+  }
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_USE_PETSC

Added: branches/s-wang2/for_deal.II/source/lac/petsc_parallel_vector.cc
===================================================================
--- branches/s-wang2/for_deal.II/source/lac/petsc_parallel_vector.cc	                        (rev 0)
+++ branches/s-wang2/for_deal.II/source/lac/petsc_parallel_vector.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -0,0 +1,296 @@
+//---------------------------------------------------------------------------
+//    $Id: petsc_parallel_vector.cc 27628 2012-11-20 22:49:26Z heister $
+//    Version: $Name$
+//
+//    Copyright (C) 2004, 2006, 2008, 2009, 2010, 2011, 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <deal.II/lac/petsc_parallel_vector.h>
+
+#ifdef DEAL_II_USE_PETSC
+
+#  include <deal.II/lac/petsc_vector.h>
+#  include <cmath>
+#  include <algorithm>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace PETScWrappers
+{
+  namespace MPI
+  {
+
+    Vector::Vector ()
+    {
+      // this is an invalid empty vector, so we
+      // can just as well create a sequential
+      // one to avoid all the overhead incurred
+      // by parallelism
+      const int n = 0;
+      const int ierr
+//        = VecCreateSeq (MPI_COMM_WORLD, n, &vector);		// shuqiangwang
+      = VecCreateMPI(MPI_COMM_WORLD, n, n, &vector);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+    }
+
+
+
+    Vector::Vector (const MPI_Comm    &communicator,
+                    const unsigned int n,
+                    const unsigned int local_size)
+      :
+      communicator (communicator)
+    {
+      Vector::create_vector (n, local_size);
+    }
+
+
+
+    Vector::Vector (const MPI_Comm    &communicator,
+                    const VectorBase &v,
+                    const unsigned int local_size)
+      :
+      communicator (communicator)
+    {
+      Vector::create_vector (v.size(), local_size);
+
+      VectorBase::operator = (v);
+    }
+
+
+
+    Vector::Vector (const MPI_Comm     &communicator,
+                    const IndexSet   &local,
+                    const IndexSet &ghost)
+      :
+      communicator (communicator)
+    {
+      Assert(local.is_contiguous(), ExcNotImplemented());
+
+      IndexSet ghost_set = ghost;
+      ghost_set.subtract_set(local);
+
+      //possible optmization: figure out if
+      //there are ghost indices (collective
+      //operation!) and then create a
+      //non-ghosted vector.
+//      Vector::create_vector (local.size(), local.n_elements());
+
+      Vector::create_vector(local.size(), local.n_elements(), ghost_set);
+    }
+
+
+
+    void
+    Vector::reinit (const MPI_Comm    &comm,
+                    const unsigned int n,
+                    const unsigned int local_sz,
+                    const bool         fast)
+    {
+      communicator = comm;
+
+      // only do something if the sizes
+      // mismatch (may not be true for every proc)
+
+      int k_global, k = ((size() != n) || (local_size() != local_sz));
+      MPI_Allreduce (&k, &k_global, 1,
+                     MPI_INT, MPI_LOR, communicator);
+
+      if (k_global)
+        {
+          // FIXME: I'd like to use this here,
+          // but somehow it leads to odd errors
+          // somewhere down the line in some of
+          // the tests:
+//         const int ierr = VecSetSizes (vector, n, n);
+//         AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+          // so let's go the slow way:
+          int ierr;
+
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+          ierr = VecDestroy (vector);
+#else
+          ierr = VecDestroy (&vector);
+#endif
+
+          AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+          create_vector (n, local_sz);
+        }
+
+      // finally clear the new vector if so
+      // desired
+      if (fast == false)
+        *this = 0;
+    }
+
+
+
+    void
+    Vector::reinit (const Vector &v,
+                    const bool    fast)
+    {
+      communicator = v.communicator;
+
+      reinit (communicator, v.size(), v.local_size(), fast);
+    }
+
+
+
+    void
+    Vector::reinit (const MPI_Comm     &comm,
+                    const IndexSet   &local,
+                    const IndexSet &ghost)
+    {
+      communicator = comm;
+
+      Assert(local.is_contiguous(), ExcNotImplemented());
+
+      IndexSet ghost_set = ghost;
+      ghost_set.subtract_set(local);
+
+      create_vector(local.size(), local.n_elements(), ghost_set);
+    }
+
+
+
+    Vector &
+    Vector::operator = (const PETScWrappers::Vector &v)
+    {
+      // first flush buffers
+      compress ();
+
+      int ierr;
+
+      // get a pointer to the local memory of
+      // this vector
+      PetscScalar *dest_array;
+      ierr = VecGetArray (vector, &dest_array);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      // then also a pointer to the source
+      // vector
+      PetscScalar *src_array;
+      ierr = VecGetArray (static_cast<const Vec &>(v), &src_array);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      // then copy:
+      const std::pair<unsigned int, unsigned int>
+      local_elements = local_range ();
+      std::copy (src_array + local_elements.first,
+                 src_array + local_elements.second,
+                 dest_array);
+
+      // finally restore the arrays
+      ierr = VecRestoreArray (vector, &dest_array);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      ierr = VecRestoreArray (static_cast<const Vec &>(v), &src_array);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      return *this;
+    }
+
+
+    void
+    Vector::create_vector (const unsigned int  n,
+                           const unsigned int  local_size)
+    {
+      Assert (local_size <= n, ExcIndexRange (local_size, 0, n));
+
+      const int ierr
+        = VecCreateMPI (communicator, local_size, PETSC_DETERMINE,
+                        &vector);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      Assert (size() == n,
+              ExcDimensionMismatch (size(), n));
+    }
+
+
+
+    void
+    Vector::create_vector (const unsigned int  n,
+                           const unsigned int  local_size,
+                           const IndexSet &ghostnodes)
+    {
+      Assert (local_size <= n, ExcIndexRange (local_size, 0, n));
+      ghosted = true;
+      ghost_indices = ghostnodes;
+
+      //64bit indices won't work yet:
+      Assert (sizeof(unsigned int)==sizeof(PetscInt), ExcInternalError());
+
+
+      std::vector<unsigned int> ghostindices;
+      ghostnodes.fill_index_vector(ghostindices);
+
+      const PetscInt *ptr
+        = (ghostindices.size() > 0
+           ?
+           (const PetscInt *)(&(ghostindices[0]))
+           :
+           0);
+
+      int ierr
+        = VecCreateGhost(communicator,
+                         local_size,
+                         PETSC_DETERMINE,
+                         ghostindices.size(),
+                         ptr,
+                         &vector);
+
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      Assert (size() == n,
+              ExcDimensionMismatch (size(), n));
+
+#if DEBUG
+      // test ghost allocation in debug mode
+
+#ifdef PETSC_USE_64BIT_INDICES
+      PetscInt
+#else
+      int
+#endif
+      begin, end;
+
+      ierr = VecGetOwnershipRange (vector, &begin, &end);
+
+      Assert(local_size==(unsigned int)(end-begin), ExcInternalError());
+
+      Vec l;
+      ierr = VecGhostGetLocalForm(vector, &l);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      PetscInt lsize;
+      ierr = VecGetSize(l, &lsize);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      ierr = VecGhostRestoreLocalForm(vector, &l);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      Assert( lsize==end-begin+(PetscInt)ghost_indices.n_elements() ,ExcInternalError());
+
+#endif
+
+
+    }
+
+
+
+  }
+
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_USE_PETSC

Added: branches/s-wang2/for_deal.II/source/lac/petsc_precondition.cc
===================================================================
--- branches/s-wang2/for_deal.II/source/lac/petsc_precondition.cc	                        (rev 0)
+++ branches/s-wang2/for_deal.II/source/lac/petsc_precondition.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -0,0 +1,709 @@
+//---------------------------------------------------------------------------
+//    $Id: petsc_precondition.cc 27628 2012-11-20 22:49:26Z heister $
+//    Version: $Name$
+//
+//    Copyright (C) 2004, 2006, 2008, 2009, 2010, 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <deal.II/lac/petsc_precondition.h>
+
+#ifdef DEAL_II_USE_PETSC
+
+#  include <deal.II/base/utilities.h>
+#  include <deal.II/lac/petsc_matrix_base.h>
+#  include <deal.II/lac/petsc_vector_base.h>
+#  include <deal.II/lac/petsc_solver.h>
+#  include <petscconf.h>
+#  include <cmath>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace PETScWrappers
+{
+  PreconditionerBase::PreconditionerBase ()
+    :
+    pc(NULL), matrix(NULL)
+  {}
+
+
+  PreconditionerBase::~PreconditionerBase ()
+  {
+    if (pc!=NULL)
+      {
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+        int ierr = PCDestroy(pc);
+#else
+        int ierr = PCDestroy(&pc);
+#endif
+        AssertThrow (ierr == 0, ExcPETScError(ierr));
+      }
+  }
+
+
+  void
+  PreconditionerBase::vmult (VectorBase       &dst,
+                             const VectorBase &src) const
+  {
+    AssertThrow (pc != NULL, StandardExceptions::ExcInvalidState ());
+
+    int ierr;
+    ierr = PCApply(pc, src, dst);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  void
+  PreconditionerBase::create_pc ()
+  {
+    // only allow the creation of the
+    // preconditioner once
+    AssertThrow (pc == NULL, StandardExceptions::ExcInvalidState ());
+
+    MPI_Comm comm;
+    int ierr;
+    // this ugly cast is necessay because the
+    // type Mat and PETScObject are
+    // unrelated.
+    ierr = PetscObjectGetComm(reinterpret_cast<PetscObject>(matrix), &comm);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCCreate(comm, &pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetOperators(pc , matrix, matrix, SAME_PRECONDITIONER);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  const PC &
+  PreconditionerBase::get_pc () const
+  {
+    return pc;
+  }
+
+
+  PreconditionerBase::operator Mat () const
+  {
+    return matrix;
+  }
+
+
+  /* ----------------- PreconditionJacobi -------------------- */
+
+  PreconditionJacobi::PreconditionJacobi ()
+  {}
+
+
+  PreconditionJacobi::PreconditionJacobi (const MatrixBase     &matrix,
+                                          const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionJacobi::initialize (const MatrixBase     &matrix_,
+                                  const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCJACOBI));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionBlockJacobi -------------------- */
+
+  PreconditionBlockJacobi::PreconditionBlockJacobi ()
+  {}
+
+
+  PreconditionBlockJacobi::
+  PreconditionBlockJacobi (const MatrixBase     &matrix,
+                           const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+  void
+  PreconditionBlockJacobi::initialize (const MatrixBase     &matrix_,
+                                       const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCBJACOBI));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionSOR -------------------- */
+
+  PreconditionSOR::AdditionalData::
+  AdditionalData (const double omega)
+    :
+    omega (omega)
+  {}
+
+
+  PreconditionSOR::PreconditionSOR ()
+  {}
+
+
+  PreconditionSOR::PreconditionSOR (const MatrixBase     &matrix,
+                                    const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionSOR::initialize (const MatrixBase     &matrix_,
+                               const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCSOR));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // then set flags as given
+    ierr = PCSORSetOmega (pc, additional_data.omega);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionSSOR -------------------- */
+
+  PreconditionSSOR::AdditionalData::
+  AdditionalData (const double omega)
+    :
+    omega (omega)
+  {}
+
+
+  PreconditionSSOR::PreconditionSSOR ()
+  {}
+
+
+  PreconditionSSOR::PreconditionSSOR (const MatrixBase     &matrix,
+                                      const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionSSOR::initialize (const MatrixBase     &matrix_,
+                                const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCSOR));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // then set flags as given
+    ierr = PCSORSetOmega (pc, additional_data.omega);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // convert SOR to SSOR
+    ierr = PCSORSetSymmetric (pc, SOR_SYMMETRIC_SWEEP);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionEisenstat -------------------- */
+
+  PreconditionEisenstat::AdditionalData::
+  AdditionalData (const double omega)
+    :
+    omega (omega)
+  {}
+
+
+  PreconditionEisenstat::PreconditionEisenstat ()
+  {}
+
+
+  PreconditionEisenstat::PreconditionEisenstat (const MatrixBase     &matrix,
+                                                const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionEisenstat::initialize (const MatrixBase     &matrix_,
+                                     const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCEISENSTAT));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // then set flags as given
+    ierr = PCEisenstatSetOmega (pc, additional_data.omega);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionICC -------------------- */
+
+
+  PreconditionICC::AdditionalData::
+  AdditionalData (const unsigned int levels)
+    :
+    levels (levels)
+  {}
+
+
+  PreconditionICC::PreconditionICC ()
+  {}
+
+
+  PreconditionICC::PreconditionICC (const MatrixBase     &matrix,
+                                    const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionICC::initialize (const MatrixBase     &matrix_,
+                               const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCICC));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // then set flags
+    PCFactorSetLevels (pc, additional_data.levels);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionILU -------------------- */
+
+  PreconditionILU::AdditionalData::
+  AdditionalData (const unsigned int levels)
+    :
+    levels (levels)
+  {}
+
+
+  PreconditionILU::PreconditionILU ()
+  {}
+
+
+  PreconditionILU::PreconditionILU (const MatrixBase     &matrix,
+                                    const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionILU::initialize (const MatrixBase     &matrix_,
+                               const AdditionalData &additional_data_)
+  {
+//    matrix = static_cast<Mat>(matrix_);
+	  matrix = matrix_;
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+//    ierr = PCSetType (pc, const_cast<char *>(PCILU));
+//    ierr = PCSetType (pc, PCSOR);
+//    ierr = PCSetType (pc, PCILU);
+//    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetType (pc, const_cast<char *>(PCHYPRE));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCHYPRESetType(pc, "pilut");
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // then set flags
+    PCFactorSetLevels (pc, additional_data.levels);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+//    PCFactorSetShiftType(pc,MAT_SHIFT_NONZERO);
+//    PCFactorSetShiftAmount(pc, 1);
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionBoomerAMG -------------------- */
+
+  PreconditionBoomerAMG::AdditionalData::
+  AdditionalData(const bool symmetric_operator,
+                 const double strong_threshold,
+                 const double max_row_sum,
+                 const unsigned int aggressive_coarsening_num_levels,
+                 const bool output_details
+                )
+    :
+    symmetric_operator(symmetric_operator),
+    strong_threshold(strong_threshold),
+    max_row_sum(max_row_sum),
+    aggressive_coarsening_num_levels(aggressive_coarsening_num_levels),
+    output_details(output_details)
+  {}
+
+
+  PreconditionBoomerAMG::PreconditionBoomerAMG ()
+  {}
+
+
+  PreconditionBoomerAMG::PreconditionBoomerAMG (const MatrixBase     &matrix,
+                                                const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionBoomerAMG::initialize (const MatrixBase     &matrix_,
+                                     const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+#ifdef PETSC_HAVE_HYPRE
+    //std::cout<<"here"<<std::endl;
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCHYPRE));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCHYPRESetType(pc, "boomeramg");
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    if (additional_data.output_details)
+      PetscOptionsSetValue("-pc_hypre_boomeramg_print_statistics","1");
+
+    PetscOptionsSetValue("-pc_hypre_boomeramg_agg_nl",
+                         Utilities::int_to_string(
+                           additional_data.aggressive_coarsening_num_levels
+                         ).c_str());
+
+    std::stringstream ssStream;
+    ssStream << additional_data.max_row_sum;
+    PetscOptionsSetValue("-pc_hypre_boomeramg_max_row_sum", ssStream.str().c_str());
+
+    ssStream.str(""); // empty the stringstream
+    ssStream << additional_data.strong_threshold;
+    PetscOptionsSetValue("-pc_hypre_boomeramg_strong_threshold", ssStream.str().c_str());
+
+    if (additional_data.symmetric_operator)
+      {
+        PetscOptionsSetValue("-pc_hypre_boomeramg_relax_type_up", "symmetric-SOR/Jacobi");
+        PetscOptionsSetValue("-pc_hypre_boomeramg_relax_type_down", "symmetric-SOR/Jacobi");
+        PetscOptionsSetValue("-pc_hypre_boomeramg_relax_type_coarse", "Gaussian-elimination");
+      }
+    else
+      {
+        PetscOptionsSetValue("-pc_hypre_boomeramg_relax_type_up", "SOR/Jacobi");
+        PetscOptionsSetValue("-pc_hypre_boomeramg_relax_type_down", "SOR/Jacobi");
+        PetscOptionsSetValue("-pc_hypre_boomeramg_relax_type_coarse", "Gaussian-elimination");
+      }
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+#else // PETSC_HAVE_HYPRE
+    (void)pc;
+    Assert (false,
+            ExcMessage ("Your PETSc installation does not include a copy of "
+                        "the hypre package necessary for this preconditioner."));
+#endif
+  }
+
+
+  /* ----------------- PreconditionParaSails -------------------- */
+
+  PreconditionParaSails::AdditionalData::
+  AdditionalData(const unsigned int symmetric,
+                 const unsigned int n_levels,
+                 const double threshold,
+                 const double filter,
+                 const bool output_details)
+    :
+    symmetric(symmetric),
+    n_levels(n_levels),
+    threshold(threshold),
+    filter(filter),
+    output_details(output_details)
+  {}
+
+
+  PreconditionParaSails::PreconditionParaSails ()
+  {}
+
+
+  PreconditionParaSails::PreconditionParaSails (const MatrixBase     &matrix,
+                                                const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionParaSails::initialize (const MatrixBase     &matrix_,
+                                     const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+#ifdef PETSC_HAVE_HYPRE
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCHYPRE));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCHYPRESetType(pc, "parasails");
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    if (additional_data.output_details)
+      PetscOptionsSetValue("-pc_hypre_parasails_logging","1");
+
+    Assert ((additional_data.symmetric == 0 ||
+             additional_data.symmetric == 1 ||
+             additional_data.symmetric == 2),
+            ExcMessage("ParaSails parameter symmetric can only be equal to 0, 1, 2!"));
+
+    std::stringstream ssStream;
+
+    switch (additional_data.symmetric)
+      {
+      case 0:
+      {
+        ssStream << "nonsymmetric";
+        break;
+      }
+
+      case 1:
+      {
+        ssStream << "SPD";
+        break;
+      }
+
+      case 2:
+      {
+        ssStream << "nonsymmetric,SPD";
+        break;
+      }
+
+      default:
+        Assert (false,
+                ExcMessage("ParaSails parameter symmetric can only be equal to 0, 1, 2!"));
+      };
+
+    PetscOptionsSetValue("-pc_hypre_parasails_sym",ssStream.str().c_str());
+
+    PetscOptionsSetValue("-pc_hypre_parasails_nlevels",
+                         Utilities::int_to_string(
+                           additional_data.n_levels
+                         ).c_str());
+
+    ssStream.str(""); // empty the stringstream
+    ssStream << additional_data.threshold;
+    PetscOptionsSetValue("-pc_hypre_parasails_thresh", ssStream.str().c_str());
+
+    ssStream.str(""); // empty the stringstream
+    ssStream << additional_data.filter;
+    PetscOptionsSetValue("-pc_hypre_parasails_filter", ssStream.str().c_str());
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+#else // PETSC_HAVE_HYPRE
+    (void)pc;
+    Assert (false,
+            ExcMessage ("Your PETSc installation does not include a copy of "
+                        "the hypre package necessary for this preconditioner."));
+#endif
+  }
+
+
+  /* ----------------- PreconditionNone ------------------------- */
+
+  PreconditionNone::PreconditionNone ()
+  {}
+
+
+  PreconditionNone::PreconditionNone (const MatrixBase     &matrix,
+                                      const AdditionalData &additional_data)
+  {
+    initialize (matrix, additional_data);
+  }
+
+
+  void
+  PreconditionNone::initialize (const MatrixBase     &matrix_,
+                                const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCNONE));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+
+  /* ----------------- PreconditionLU -------------------- */
+
+  PreconditionLU::AdditionalData::
+  AdditionalData (const double pivoting,
+                  const double zero_pivot,
+                  const double damping)
+    :
+    pivoting (pivoting),
+    zero_pivot (zero_pivot),
+    damping (damping)
+  {}
+
+
+  PreconditionLU::PreconditionLU ()
+  {}
+
+
+  PreconditionLU::PreconditionLU (const MatrixBase     &matrix,
+                                  const AdditionalData &additional_data)
+  {
+    initialize(matrix, additional_data);
+  }
+
+
+  void
+  PreconditionLU::initialize (const MatrixBase     &matrix_,
+                              const AdditionalData &additional_data_)
+  {
+    matrix = static_cast<Mat>(matrix_);
+    additional_data = additional_data_;
+
+    create_pc();
+
+    int ierr;
+    ierr = PCSetType (pc, const_cast<char *>(PCLU));
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    // set flags as given
+#if DEAL_II_PETSC_VERSION_LT(3,0,1)
+    ierr = PCFactorSetPivoting (pc, additional_data.pivoting);
+#else
+    ierr = PCFactorSetColumnPivot (pc, additional_data.pivoting);
+#endif
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCFactorSetZeroPivot (pc, additional_data.zero_pivot);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+#if DEAL_II_PETSC_VERSION_LT(3,0,1)
+    ierr = PCFactorSetShiftNonzero (pc, additional_data.damping);
+#else
+    ierr = PCFactorSetShiftAmount (pc, additional_data.damping);
+#endif
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetFromOptions (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+    ierr = PCSetUp (pc);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+  }
+
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_USE_PETSC

Modified: branches/s-wang2/for_deal.II/source/lac/petsc_solver.cc
===================================================================
--- branches/s-wang2/for_deal.II/source/lac/petsc_solver.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/for_deal.II/source/lac/petsc_solver.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -76,8 +76,7 @@
         // last argument is irrelevant here,
         // since we use the solver only once
         // anyway
-        ierr = KSPSetOperators (solver_data->ksp, A, preconditioner,
-                                SAME_PRECONDITIONER);
+        ierr = KSPSetOperators (solver_data->ksp, A, preconditioner, SAME_PRECONDITIONER);
         AssertThrow (ierr == 0, ExcPETScError(ierr));
 
         // let derived classes set the solver

Modified: branches/s-wang2/source/adiabatic_conditions.cc
===================================================================
--- branches/s-wang2/source/adiabatic_conditions.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/adiabatic_conditions.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -75,6 +75,7 @@
         // approximation here.
         const double density = material_model.density(temperatures[i-1], pressures[i-1],
                                                       initial_composition,representative_point);
+//        std::cout << density << std::endl;
         const double alpha = material_model.thermal_expansion_coefficient(temperatures[i-1], pressures[i-1],
                                                                           initial_composition,representative_point);
         const double cp = material_model.specific_heat(temperatures[i-1], pressures[i-1],

Modified: branches/s-wang2/source/main.cc
===================================================================
--- branches/s-wang2/source/main.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/main.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -31,7 +31,7 @@
 {
   using namespace dealii;
   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
-  PetscInitialize(&argc,&argv,0,0);
+//  PetscInitialize(&argc,&argv,0,0);
 
   try
     {
@@ -173,7 +173,7 @@
 
   dealii::GrowingVectorMemory<dealii::PETScWrappers::MPI::Vector>::release_unused_memory ();
   dealii::GrowingVectorMemory<dealii::PETScWrappers::Vector>::release_unused_memory ();
-  PetscFinalize();
+//  PetscFinalize();
 
   return 0;
 }

Modified: branches/s-wang2/source/simulator/assembly.cc
===================================================================
--- branches/s-wang2/source/simulator/assembly.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/simulator/assembly.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -838,6 +838,7 @@
     Mp_preconditioner->initialize (system_preconditioner_matrix.block(1,1));
     Amg_preconditioner->initialize (system_preconditioner_matrix.block(0,0),
                                     Amg_data);
+//    exit(0);
 
     rebuild_stokes_preconditioner = false;
 
@@ -1023,7 +1024,7 @@
          internal::Assembly::CopyData::
          StokesSystem<dim> (finite_element));
 
-    system_matrix.compress(dealii::VectorOperation::add);	//shuqiangwang
+    system_matrix.compress();
     system_rhs.compress(dealii::VectorOperation::add);
 
     if (material_model->is_compressible())
@@ -1045,7 +1046,9 @@
       computing_timer.enter_section ("   Build composition preconditioner");
     {
       preconditioner.reset (new LinearAlgebra::PreconditionILU());
-      preconditioner->initialize (system_matrix.block(2+index,2+index));
+//      system_matrix.block(2+index,2+index).write_ascii();
+      if(system_matrix.block(2+index,2+index).linfty_norm()>1e-50)		// non-zero
+    	  preconditioner->initialize (system_matrix.block(2+index,2+index));
     }
     computing_timer.exit_section();
   }

Modified: branches/s-wang2/source/simulator/checkpoint_restart.cc
===================================================================
--- branches/s-wang2/source/simulator/checkpoint_restart.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/simulator/checkpoint_restart.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -160,9 +160,9 @@
 
     system_trans.deserialize (x_system);
 
-    solution = distributed_system;
-    old_solution = old_distributed_system;
-    old_old_solution = old_old_distributed_system;
+    solution = distributed_system;						solution.update_ghost_values();
+    old_solution = old_distributed_system;				old_solution.update_ghost_values();
+    old_old_solution = old_old_distributed_system;		old_old_solution.update_ghost_values();
 
     //read zlib compressed resume.z
     {

Modified: branches/s-wang2/source/simulator/core.cc
===================================================================
--- branches/s-wang2/source/simulator/core.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/simulator/core.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -152,6 +152,7 @@
     :
     parameters (prm),
     mpi_communicator (Utilities::MPI::duplicate_communicator (mpi_communicator_)),
+//    mpi_communicator(mpi_communicator_),
     pcout (std::cout,
            (Utilities::MPI::
             this_mpi_process(mpi_communicator)
@@ -956,6 +957,8 @@
                 }
             }
 
+        vec_distributed.compress();	// shuqiangwang
+
         LinearAlgebra::BlockVector vec (solution);
         vec = vec_distributed;
 
@@ -1179,8 +1182,8 @@
       system_tmp[1] = &(old_distributed_system);
 
       system_trans.interpolate (system_tmp);
-      solution     = distributed_system;
-      old_solution = old_distributed_system;
+      solution     = distributed_system;				solution.update_ghost_values();
+      old_solution = old_distributed_system;			old_solution.update_ghost_values();
     }
 
     computing_timer.exit_section();
@@ -1194,7 +1197,7 @@
   {
     // start any scheme with an extrapolated value from the previous
     // two time steps if those are available
-    current_linearization_point = old_solution;
+    current_linearization_point = old_solution;			current_linearization_point.update_ghost_values();
     if (timestep_number > 1)
       {
         //TODO: Trilinos sadd does not like ghost vectors even as input. Copy
@@ -1206,7 +1209,7 @@
         distr_solution .sadd ((1 + time_step/old_time_step),
                               -time_step/old_time_step,
                               distr_old_solution);
-        current_linearization_point = distr_solution;
+        current_linearization_point = distr_solution;		current_linearization_point.update_ghost_values();
       }
 
     switch (parameters.nonlinear_solver)
@@ -1217,14 +1220,14 @@
           build_advection_preconditioner(0,T_preconditioner);
           solve_advection(0);
 
-          current_linearization_point.block(2) = solution.block(2);
+          current_linearization_point.block(2) = solution.block(2);		current_linearization_point.block(2).update_ghost_values();
 
           for (unsigned int c=0; c<parameters.n_compositional_fields; ++c)
             {
               assemble_advection_system (1+c);
               build_advection_preconditioner(1+c,C_preconditioner);
               solve_advection(1+c); // this is correct, 0 would be temperature
-              current_linearization_point.block(3+c) = solution.block(3+c);
+              current_linearization_point.block(3+c) = solution.block(3+c);		current_linearization_point.block(3+c).update_ghost_values();
             }
 
           assemble_stokes_system();
@@ -1251,7 +1254,7 @@
 
               const double temperature_residual = solve_advection(0);
 
-              current_linearization_point.block(2) = solution.block(2);
+              current_linearization_point.block(2) = solution.block(2);				current_linearization_point.block(2).update_ghost_values();
               rebuild_stokes_matrix = true;
               std::vector<double> composition_residual (parameters.n_compositional_fields,0);
 
@@ -1261,7 +1264,7 @@
                   build_advection_preconditioner(c+1,C_preconditioner);
                   composition_residual[c]
                     = solve_advection(1+c); // 1+n is correct, because 0 is for temperature
-                  current_linearization_point.block(3+c) = solution.block(3+c);
+                  current_linearization_point.block(3+c) = solution.block(3+c);		current_linearization_point.block(3+c).update_ghost_values();
                 }
 
               assemble_stokes_system();
@@ -1270,7 +1273,7 @@
 
               const double stokes_residual = solve_stokes();
 
-              current_linearization_point = solution;
+              current_linearization_point = solution;				current_linearization_point.update_ghost_values();
 
               pcout << "   Nonlinear residuals: " << temperature_residual
                     << ", " << stokes_residual;
@@ -1330,7 +1333,7 @@
               assemble_stokes_system();
               build_stokes_preconditioner();
               solve_stokes();
-              old_solution = solution;
+              old_solution = solution;				old_solution.update_ghost_values();
 
               pcout << std::endl;
             }
@@ -1480,10 +1483,8 @@
         time += time_step;
         ++timestep_number;
         {
-          old_old_solution      = old_solution;
-          old_solution          = solution;
-          old_old_solution.update_ghost_values();			//shuqiangwang: need to check when this is needed.
-          old_solution.update_ghost_values();
+          old_old_solution      = old_solution;			old_old_solution.update_ghost_values();			//shuqiangwang: need to check when this is needed.
+          old_solution          = solution;				old_solution.update_ghost_values();
         }
 
         // periodically generate snapshots so that we can resume here

Modified: branches/s-wang2/source/simulator/helper_functions.cc
===================================================================
--- branches/s-wang2/source/simulator/helper_functions.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/simulator/helper_functions.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -541,6 +541,7 @@
               distributed_vector(local_dof_indices[first_pressure_dof]) += adjust;
             }
       }
+    distributed_vector.compress();
 
     // now get back to the original vector
     vector = distributed_vector;

Modified: branches/s-wang2/source/simulator/initial_conditions.cc
===================================================================
--- branches/s-wang2/source/simulator/initial_conditions.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/simulator/initial_conditions.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -162,9 +162,9 @@
 //        	exit(0);
 
         // copy temperature/composition block only
-        solution.block(2+n) = initial_solution.block(2+n);
-        old_solution.block(2+n) = initial_solution.block(2+n);
-        old_old_solution.block(2+n) = initial_solution.block(2+n);
+        solution.block(2+n) = initial_solution.block(2+n);				solution.block(2+n).update_ghost_values();
+        old_solution.block(2+n) = initial_solution.block(2+n);			old_solution.block(2+n).update_ghost_values();
+        old_old_solution.block(2+n) = initial_solution.block(2+n);		old_old_solution.block(2+n).update_ghost_values();
       }
   }
 
@@ -207,8 +207,6 @@
         // we may have hanging nodes, so apply constraints
         constraints.distribute (system_tmp);
         
-        system_tmp.compress();
-
         old_solution.block(1) = system_tmp.block(1);
       }
     else
@@ -295,18 +293,22 @@
               // then set the global solution vector to the values just computed
               cell->set_dof_values (local_projection, system_tmp);
             }
-
+        system_tmp.compress();
+//        system_tmp.print(std::cout,7,true,false);
         old_solution.block(1) = system_tmp.block(1);
       }
 
-    old_solution.compress();
-
+//    old_solution.block(1).print(std::cout,7,true,false);
+    //old_solution.compress();
     // normalize the pressure in such a way that the surface pressure
     // equals a known and desired value
+    old_solution.update_ghost_values();
     normalize_pressure(old_solution);
+    old_solution.update_ghost_values();
 
     // set the current solution to the same value as the previous solution
     solution = old_solution;
+    solution.update_ghost_values();
   }
 }
 

Modified: branches/s-wang2/source/simulator/solver.cc
===================================================================
--- branches/s-wang2/source/simulator/solver.cc	2012-11-30 18:38:17 UTC (rev 1401)
+++ branches/s-wang2/source/simulator/solver.cc	2012-11-30 22:02:48 UTC (rev 1402)
@@ -222,7 +222,7 @@
       {
         SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
 
-    	  PETScWrappers::SolverCG solver(solver_control);
+    	  PETScWrappers::SolverCG solver(solver_control, src.get_mpi_communicator());
 
         // Trilinos reports a breakdown
         // in case src=dst=0, even
@@ -247,7 +247,7 @@
       if (do_solve_A == true)
         {
           SolverControl solver_control(5000, utmp.l2_norm()*1e-2);
-          PETScWrappers::SolverCG solver(solver_control);
+          PETScWrappers::SolverCG solver(solver_control, src.get_mpi_communicator());
           solver.solve(stokes_matrix.block(0,0), dst.block(0), utmp,
                        a_preconditioner);
         }
@@ -286,26 +286,33 @@
 // overwrite the vector in residual(), then call set_zero again, and then throw away
 // the result
       LinearAlgebra::BlockVector
-      distributed_solution (system_rhs);	distributed_solution.compress();
-      current_constraints.set_zero(distributed_solution);		distributed_solution.compress();
+      distributed_solution (system_rhs);	//distributed_solution.compress();
+      current_constraints.set_zero(distributed_solution);			distributed_solution.compress();
       // create vector with distribution of system_rhs.
       LinearAlgebra::Vector block_remap (system_rhs.block (index+2));
       // copy block of current_linearization_point into it, because
       // current_linearization is distributed differently.
-      block_remap = current_linearization_point.block (index+2);	block_remap.compress();
+      block_remap = current_linearization_point.block (index+2);	//block_remap.compress();
       // (ab)use the distributed solution vector to temporarily put a residual in
       initial_residual = system_matrix.block(index+2,index+2).residual (distributed_solution.block(index+2),
                                                                         block_remap,
                                                                         system_rhs.block(index+2));
-      current_constraints.set_zero(distributed_solution);
+      current_constraints.set_zero(distributed_solution);			distributed_solution.compress();
 
       // then overwrite it again with the current best guess and solve the linear system
-      distributed_solution.block(index+2) = block_remap;		distributed_solution.compress();
-      solver.solve (system_matrix.block(index+2,index+2), distributed_solution.block(index+2),
-                    system_rhs.block(index+2), index==0?*T_preconditioner:*C_preconditioner);
+      distributed_solution.block(index+2) = block_remap;		//distributed_solution.compress();
+      if(system_rhs.block(index+2).linfty_norm()<1e-50)
+      {
+    	  distributed_solution.block(index+2) = system_rhs.block(index+2);
+    	  if(system_matrix.block(index+2,index+2).linfty_norm()>1e-50)
+    		  pcout << "Simulator<dim>::solve_advection(): something wrong" << std::endl;
+      }
+      else
+    	  solver.solve (system_matrix.block(index+2,index+2), distributed_solution.block(index+2),
+    			  system_rhs.block(index+2), index==0?*T_preconditioner:*C_preconditioner);
 
       current_constraints.distribute (distributed_solution);
-      solution.block(index+2) = distributed_solution.block(index+2);		solution.compress();
+      solution.block(index+2) = distributed_solution.block(index+2);		solution.block(index+2).update_ghost_values();
 
       // print number of iterations and also record it in the
       // statistics file
@@ -368,13 +375,13 @@
 
     // then overwrite it again with the current best guess and solve the linear system
     distributed_stokes_solution.block(0) = remap.block(0);
-    distributed_stokes_solution.block(1) = remap.block(1);		distributed_stokes_solution.compress();
+    distributed_stokes_solution.block(1) = remap.block(1);		//distributed_stokes_solution.compress();
 
     // extract Stokes parts of rhs vector
     LinearAlgebra::BlockVector distributed_stokes_rhs;
     distributed_stokes_rhs.reinit(system_rhs);
     distributed_stokes_rhs.block(0) = system_rhs.block(0);
-    distributed_stokes_rhs.block(1) = system_rhs.block(1);		distributed_stokes_rhs.compress();
+    distributed_stokes_rhs.block(1) = system_rhs.block(1);		//distributed_stokes_rhs.compress();
 
     PrimitiveVectorMemory< LinearAlgebra::BlockVector > mem;
 
@@ -432,8 +439,10 @@
 
     // now rescale the pressure back to real physical units
     solution.block(1) *= pressure_scaling;
+    solution.update_ghost_values();
 
     normalize_pressure(solution);
+    solution.update_ghost_values();
 
     // print the number of iterations to screen and record it in the
     // statistics file



More information about the CIG-COMMITS mailing list