diff --git a/src/eigen/unsupported/Eigen/SparseExtra b/src/eigen/unsupported/Eigen/SparseExtra
new file mode 100644
index 000000000..819cffa27
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/SparseExtra
@@ -0,0 +1,53 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_EXTRA_MODULE_H
+#define EIGEN_SPARSE_EXTRA_MODULE_H
+
+#include "../../Eigen/Sparse"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+#include <vector>
+#include <map>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+
+#ifdef EIGEN_GOOGLEHASH_SUPPORT
+  #include <google/dense_hash_map>
+#endif
+
+/**
+  * \defgroup SparseExtra_Module SparseExtra module
+  *
+  * This module contains some experimental features extending the sparse module.
+  *
+  * \code
+  * #include <Eigen/SparseExtra>
+  * \endcode
+  */
+
+
+#include "src/SparseExtra/DynamicSparseMatrix.h"
+#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h"
+#include "src/SparseExtra/RandomSetter.h"
+
+#include "src/SparseExtra/MarketIO.h"
+
+#if !defined(_WIN32)
+#include <dirent.h>
+#include "src/SparseExtra/MatrixMarketIterator.h"
+#endif
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SPARSE_EXTRA_MODULE_H
diff --git a/src/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h b/src/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
new file mode 100644
index 000000000..e9ec746e3
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
@@ -0,0 +1,122 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
+#define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
+
+namespace Eigen { 
+
+#if 0
+
+// NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... >
+// See SparseBlock.h for an example
+
+
+/***************************************************************************
+* specialisation for DynamicSparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size>
+  : public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size> >
+{
+    typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType;
+  public:
+
+    enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+    class InnerIterator: public MatrixType::InnerIterator
+    {
+      public:
+        inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+          : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+
+    inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+      : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+    {
+      eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+    }
+
+    inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+      : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+    {
+      eigen_assert(Size!=Dynamic);
+      eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+    }
+
+    template<typename OtherDerived>
+    inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+      {
+        // need to transpose => perform a block evaluation followed by a big swap
+        DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
+        *this = aux.markAsRValue();
+      }
+      else
+      {
+        // evaluate/copy vector per vector
+        for (Index j=0; j<m_outerSize.value(); ++j)
+        {
+          SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
+          m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+        }
+      }
+      return *this;
+    }
+
+    inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+    {
+      return operator=<SparseInnerVectorSet>(other);
+    }
+
+    Index nonZeros() const
+    {
+      Index count = 0;
+      for (Index j=0; j<m_outerSize.value(); ++j)
+        count += m_matrix._data()[m_outerStart+j].size();
+      return count;
+    }
+
+    const Scalar& lastCoeff() const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+      eigen_assert(m_matrix.data()[m_outerStart].size()>0);
+      return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1);
+    }
+
+//     template<typename Sparse>
+//     inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+//     {
+//       return *this;
+//     }
+
+    EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+  protected:
+
+    const typename MatrixType::Nested m_matrix;
+    Index m_outerStart;
+    const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
diff --git a/src/eigen/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h b/src/eigen/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h
new file mode 100644
index 000000000..0e8350a7d
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h
@@ -0,0 +1,1079 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2013 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEBLOCKMATRIX_H
+#define EIGEN_SPARSEBLOCKMATRIX_H
+
+namespace Eigen { 
+/** \ingroup SparseCore_Module
+  *
+  * \class BlockSparseMatrix
+  *
+  * \brief A versatile sparse matrix representation where each element is a block
+  *
+  * This class provides routines to manipulate block sparse matrices stored in a
+  * BSR-like representation. There are two main types :
+  *
+  * 1. All blocks have the same number of rows and columns, called block size
+  * in the following. In this case, if this block size is known at compile time,
+  * it can be given as a template parameter like
+  * \code
+  * BlockSparseMatrix<Scalar, 3, ColMajor> bmat(b_rows, b_cols);
+  * \endcode
+  * Here, bmat is a b_rows x b_cols block sparse matrix
+  * where each coefficient is a 3x3 dense matrix.
+  * If the block size is fixed but will be given at runtime,
+  * \code
+  * BlockSparseMatrix<Scalar, Dynamic, ColMajor> bmat(b_rows, b_cols);
+  * bmat.setBlockSize(block_size);
+  * \endcode
+  *
+  * 2. The second case is for variable-block sparse matrices.
+  * Here each block has its own dimensions. The only restriction is that all the blocks
+  * in a row (resp. a column) should have the same number of rows (resp. of columns).
+  * It is thus required in this case to describe the layout of the matrix by calling
+  * setBlockLayout(rowBlocks, colBlocks).
+  *
+  * In any of the previous case, the matrix can be filled by calling setFromTriplets().
+  * A regular sparse matrix can be converted to a block sparse matrix and vice versa.
+  * It is obviously required to describe the block layout beforehand by calling either
+  * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks.
+  *
+  * \tparam _Scalar The Scalar type
+  * \tparam _BlockAtCompileTime The block layout option. It takes the following values
+  * Dynamic : block size known at runtime
+  * a numeric number : fixed-size block known at compile time
+  */
+template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
+
+template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
+
+namespace internal {
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
+struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
+{
+  typedef _Scalar Scalar;
+  typedef _Index Index;
+  typedef Sparse StorageKind; // FIXME Where is it used ??
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = Dynamic,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = Dynamic,
+    BlockSize = _BlockAtCompileTime,
+    Flags = _Options | NestByRefBit | LvalueBit,
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    SupportedAccessPatterns = InnerRandomAccessPattern
+  };
+};
+template<typename BlockSparseMatrixT>
+struct traits<BlockSparseMatrixView<BlockSparseMatrixT> >
+{
+  typedef Ref<Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > Scalar;
+  typedef Ref<Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > RealScalar;
+
+};
+
+// Function object to sort a triplet list
+template<typename Iterator, bool IsColMajor>
+struct TripletComp
+{
+  typedef typename Iterator::value_type Triplet;
+  bool operator()(const Triplet& a, const Triplet& b)
+  { if(IsColMajor)
+      return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
+    else
+      return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
+  }
+};
+} // end namespace internal
+
+
+/* Proxy to view the block sparse matrix as a regular sparse matrix */
+template<typename BlockSparseMatrixT>
+class BlockSparseMatrixView : public SparseMatrixBase<BlockSparseMatrixT>
+{
+  public:
+    typedef Ref<typename BlockSparseMatrixT::BlockScalar> Scalar;
+    typedef Ref<typename BlockSparseMatrixT::BlockRealScalar> RealScalar;
+    typedef typename BlockSparseMatrixT::Index Index;
+    typedef  BlockSparseMatrixT Nested;
+    enum {
+      Flags = BlockSparseMatrixT::Options,
+      Options = BlockSparseMatrixT::Options,
+      RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
+      ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
+      MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
+      MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
+    };
+  public:
+    BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat)
+     : m_spblockmat(spblockmat)
+    {}
+
+    Index outerSize() const
+    {
+      return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols();
+    }
+    Index cols() const
+    {
+      return m_spblockmat.blockCols();
+    }
+    Index rows() const
+    {
+      return m_spblockmat.blockRows();
+    }
+    Scalar coeff(Index row, Index col)
+    {
+      return m_spblockmat.coeff(row, col);
+    }
+    Scalar coeffRef(Index row, Index col)
+    {
+      return m_spblockmat.coeffRef(row, col);
+    }
+    // Wrapper to iterate over all blocks
+    class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator
+    {
+      public:
+      InnerIterator(const BlockSparseMatrixView& mat, Index outer)
+          : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer)
+      {}
+
+    };
+
+  protected:
+    const BlockSparseMatrixT& m_spblockmat;
+};
+
+// Proxy to view a regular vector as a block vector
+template<typename BlockSparseMatrixT, typename VectorType>
+class BlockVectorView
+{
+  public:
+    enum {
+      BlockSize = BlockSparseMatrixT::BlockSize,
+      ColsAtCompileTime = VectorType::ColsAtCompileTime,
+      RowsAtCompileTime = VectorType::RowsAtCompileTime,
+      Flags = VectorType::Flags
+    };
+    typedef Ref<const Matrix<typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime==1)? 1 : BlockSize, (ColsAtCompileTime==1)? 1 : BlockSize> >Scalar;
+    typedef typename BlockSparseMatrixT::Index Index;
+  public:
+    BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec)
+    : m_spblockmat(spblockmat),m_vec(vec)
+    { }
+    inline Index cols() const
+    {
+      return m_vec.cols();
+    }
+    inline Index size() const
+    {
+      return m_spblockmat.blockRows();
+    }
+    inline Scalar coeff(Index bi) const
+    {
+      Index startRow = m_spblockmat.blockRowsIndex(bi);
+      Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+      return m_vec.middleRows(startRow, rowSize);
+    }
+    inline Scalar coeff(Index bi, Index j) const
+    {
+      Index startRow = m_spblockmat.blockRowsIndex(bi);
+      Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+      return m_vec.block(startRow, j, rowSize, 1);
+    }
+  protected:
+    const BlockSparseMatrixT& m_spblockmat;
+    const VectorType& m_vec;
+};
+
+template<typename VectorType, typename Index> class BlockVectorReturn;
+
+
+// Proxy to view a regular vector as a block vector
+template<typename BlockSparseMatrixT, typename VectorType>
+class BlockVectorReturn
+{
+  public:
+    enum {
+      ColsAtCompileTime = VectorType::ColsAtCompileTime,
+      RowsAtCompileTime = VectorType::RowsAtCompileTime,
+      Flags = VectorType::Flags
+    };
+    typedef Ref<Matrix<typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime> > Scalar;
+    typedef typename BlockSparseMatrixT::Index Index;
+  public:
+    BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec)
+    : m_spblockmat(spblockmat),m_vec(vec)
+    { }
+    inline Index size() const
+    {
+      return m_spblockmat.blockRows();
+    }
+    inline Scalar coeffRef(Index bi)
+    {
+      Index startRow = m_spblockmat.blockRowsIndex(bi);
+      Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+      return m_vec.middleRows(startRow, rowSize);
+    }
+    inline Scalar coeffRef(Index bi, Index j)
+    {
+      Index startRow = m_spblockmat.blockRowsIndex(bi);
+      Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+      return m_vec.block(startRow, j, rowSize, 1);
+    }
+
+  protected:
+    const BlockSparseMatrixT& m_spblockmat;
+    VectorType& m_vec;
+};
+
+// Block version of the sparse dense product
+template<typename Lhs, typename Rhs>
+class BlockSparseTimeDenseProduct;
+
+namespace internal {
+
+template<typename BlockSparseMatrixT, typename VecType>
+struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> >
+{
+  typedef Dense StorageKind;
+  typedef MatrixXpr XprKind;
+  typedef typename BlockSparseMatrixT::Scalar Scalar;
+  typedef typename BlockSparseMatrixT::Index Index;
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = Dynamic,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = Dynamic,
+    Flags = 0,
+    CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
+  };
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class BlockSparseTimeDenseProduct
+  : public ProductBase<BlockSparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
+
+    BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const
+    {
+      BlockVectorReturn<Lhs,Dest> tmpDest(m_lhs, dest);
+      internal::sparse_time_dense_product( BlockSparseMatrixView<Lhs>(m_lhs),  BlockVectorView<Lhs, Rhs>(m_lhs, m_rhs), tmpDest, alpha);
+    }
+
+  private:
+    BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
+};
+
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
+class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
+{
+  public:
+    typedef _Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef _StorageIndex StorageIndex;
+    typedef typename internal::ref_selector<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
+
+    enum {
+      Options = _Options,
+      Flags = Options,
+      BlockSize=_BlockAtCompileTime,
+      RowsAtCompileTime = Dynamic,
+      ColsAtCompileTime = Dynamic,
+      MaxRowsAtCompileTime = Dynamic,
+      MaxColsAtCompileTime = Dynamic,
+      IsVectorAtCompileTime = 0,
+      IsColMajor = Flags&RowMajorBit ? 0 : 1
+    };
+    typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
+    typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
+    typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
+    typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
+  public:
+    // Default constructor
+    BlockSparseMatrix()
+    : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0),
+      m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0),
+      m_outerIndex(0),m_blockSize(BlockSize)
+    { }
+
+
+    /**
+     * \brief Construct and resize
+     *
+     */
+    BlockSparseMatrix(Index brow, Index bcol)
+      : m_innerBSize(IsColMajor ? brow : bcol),
+        m_outerBSize(IsColMajor ? bcol : brow),
+        m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0),
+        m_values(0),m_blockPtr(0),m_indices(0),
+        m_outerIndex(0),m_blockSize(BlockSize)
+    { }
+
+    /**
+     * \brief Copy-constructor
+     */
+    BlockSparseMatrix(const BlockSparseMatrix& other)
+      : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize),
+        m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros),
+        m_blockPtr(0),m_blockSize(other.m_blockSize)
+    {
+      // should we allow copying between variable-size blocks and fixed-size blocks ??
+      eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
+
+      std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset);
+      std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset);
+      std::copy(other.m_values, other.m_values+m_nonzeros, m_values);
+
+      if(m_blockSize != Dynamic)
+        std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr);
+
+      std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices);
+      std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex);
+    }
+
+    friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second)
+    {
+      std::swap(first.m_innerBSize, second.m_innerBSize);
+      std::swap(first.m_outerBSize, second.m_outerBSize);
+      std::swap(first.m_innerOffset, second.m_innerOffset);
+      std::swap(first.m_outerOffset, second.m_outerOffset);
+      std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
+      std::swap(first.m_nonzeros, second.m_nonzeros);
+      std::swap(first.m_values, second.m_values);
+      std::swap(first.m_blockPtr, second.m_blockPtr);
+      std::swap(first.m_indices, second.m_indices);
+      std::swap(first.m_outerIndex, second.m_outerIndex);
+      std::swap(first.m_BlockSize, second.m_blockSize);
+    }
+
+    BlockSparseMatrix& operator=(BlockSparseMatrix other)
+    {
+      //Copy-and-swap paradigm ... avoid leaked data if thrown
+      swap(*this, other);
+      return *this;
+    }
+
+    // Destructor
+    ~BlockSparseMatrix()
+    {
+      delete[] m_outerIndex;
+      delete[] m_innerOffset;
+      delete[] m_outerOffset;
+      delete[] m_indices;
+      delete[] m_blockPtr;
+      delete[] m_values;
+    }
+
+
+    /**
+      * \brief Constructor from a sparse matrix
+      *
+      */
+    template<typename MatrixType>
+    inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize)
+    {
+      EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
+
+      *this = spmat;
+    }
+
+    /**
+      * \brief Assignment from a sparse matrix with the same storage order
+      *
+      * Convert from a sparse matrix to block sparse matrix.
+      * \warning Before calling this function, tt is necessary to call
+      * either setBlockLayout() (matrices with variable-size blocks)
+      * or setBlockSize() (for fixed-size blocks).
+      */
+    template<typename MatrixType>
+    inline BlockSparseMatrix& operator=(const MatrixType& spmat)
+    {
+      eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
+                   && "Trying to assign to a zero-size matrix, call resize() first");
+      eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order");
+      typedef SparseMatrix<bool,MatrixType::Options,typename MatrixType::Index> MatrixPatternType;
+      MatrixPatternType  blockPattern(blockRows(), blockCols());
+      m_nonzeros = 0;
+
+      // First, compute the number of nonzero blocks and their locations
+      for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+      {
+        // Browse each outer block and compute the structure
+        std::vector<bool> nzblocksFlag(m_innerBSize,false);  // Record the existing blocks
+        blockPattern.startVec(bj);
+        for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
+        {
+          typename MatrixType::InnerIterator it_spmat(spmat, j);
+          for(; it_spmat; ++it_spmat)
+          {
+            StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
+            if(!nzblocksFlag[bi])
+            {
+              // Save the index of this nonzero block
+              nzblocksFlag[bi] = true;
+              blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true;
+              // Compute the total number of nonzeros (including explicit zeros in blocks)
+              m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
+            }
+          }
+        } // end current outer block
+      }
+      blockPattern.finalize();
+
+      // Allocate the internal arrays
+      setBlockStructure(blockPattern);
+
+      for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
+      for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+      {
+        // Now copy the values
+        for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
+        {
+          // Browse the outer block column by column (for column-major matrices)
+          typename MatrixType::InnerIterator it_spmat(spmat, j);
+          for(; it_spmat; ++it_spmat)
+          {
+            StorageIndex idx = 0; // Position of this block in the column block
+            StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
+            // Go to the inner block where this element belongs to
+            while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
+            StorageIndex idxVal;// Get the right position in the array of values for this element
+            if(m_blockSize == Dynamic)
+            {
+              // Offset from all blocks before ...
+              idxVal =  m_blockPtr[m_outerIndex[bj]+idx];
+              // ... and offset inside the block
+              idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
+            }
+            else
+            {
+              // All blocks before
+              idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
+              // inside the block
+              idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
+            }
+            // Insert the value
+            m_values[idxVal] = it_spmat.value();
+          } // end of this column
+        } // end of this block
+      } // end of this outer block
+
+      return *this;
+    }
+
+    /**
+      * \brief Set the nonzero block pattern of the matrix
+      *
+      * Given a sparse matrix describing the nonzero block pattern,
+      * this function prepares the internal pointers for values.
+      * After calling this function, any *nonzero* block (bi, bj) can be set
+      * with a simple call to coeffRef(bi,bj).
+      *
+      *
+      * \warning Before calling this function, tt is necessary to call
+      * either setBlockLayout() (matrices with variable-size blocks)
+      * or setBlockSize() (for fixed-size blocks).
+      *
+      * \param blockPattern Sparse matrix of boolean elements describing the block structure
+      *
+      * \sa setBlockLayout() \sa setBlockSize()
+      */
+    template<typename MatrixType>
+    void setBlockStructure(const MatrixType& blockPattern)
+    {
+      resize(blockPattern.rows(), blockPattern.cols());
+      reserve(blockPattern.nonZeros());
+
+      // Browse the block pattern and set up the various pointers
+      m_outerIndex[0] = 0;
+      if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
+      for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
+      for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+      {
+        //Browse each outer block
+
+        //First, copy and save the indices of nonzero blocks
+        //FIXME : find a way to avoid this ...
+        std::vector<int> nzBlockIdx;
+        typename MatrixType::InnerIterator it(blockPattern, bj);
+        for(; it; ++it)
+        {
+          nzBlockIdx.push_back(it.index());
+        }
+        std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
+
+        // Now, fill block indices and (eventually) pointers to blocks
+        for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
+        {
+          StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
+          m_indices[offset] = nzBlockIdx[idx];
+          if(m_blockSize == Dynamic)
+            m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
+          // There is no blockPtr for fixed-size blocks... not needed !???
+        }
+        // Save the pointer to the next outer block
+        m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
+      }
+    }
+
+    /**
+      * \brief Set the number of rows and columns blocks
+      */
+    inline void resize(Index brow, Index bcol)
+    {
+      m_innerBSize = IsColMajor ? brow : bcol;
+      m_outerBSize = IsColMajor ? bcol : brow;
+    }
+
+    /**
+      * \brief set the block size at runtime for fixed-size block layout
+      *
+      * Call this only for fixed-size blocks
+      */
+    inline void setBlockSize(Index blockSize)
+    {
+      m_blockSize = blockSize;
+    }
+
+    /**
+      * \brief Set the row and column block layouts,
+      *
+      * This function set the size of each row and column block.
+      * So this function should be used only for blocks with variable size.
+      * \param rowBlocks : Number of rows per row block
+      * \param colBlocks : Number of columns per column block
+      * \sa resize(), setBlockSize()
+      */
+    inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
+    {
+      const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
+      const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
+      eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
+      eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
+      m_outerBSize = outerBlocks.size();
+      //  starting index of blocks... cumulative sums
+      m_innerOffset = new StorageIndex[m_innerBSize+1];
+      m_outerOffset = new StorageIndex[m_outerBSize+1];
+      m_innerOffset[0] = 0;
+      m_outerOffset[0] = 0;
+      std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
+      std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
+
+      // Compute the total number of nonzeros
+      m_nonzeros = 0;
+      for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+        for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
+          m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
+
+    }
+
+    /**
+      * \brief Allocate the internal array of pointers to blocks and their inner indices
+      *
+      * \note For fixed-size blocks, call setBlockSize() to set the block.
+      * And For variable-size blocks, call setBlockLayout() before using this function
+      *
+      * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is
+      * is computed in setBlockLayout() for variable-size blocks
+      * \sa setBlockSize()
+      */
+    inline void reserve(const Index nonzerosblocks)
+    {
+      eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
+          "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
+
+      //FIXME Should free if already allocated
+      m_outerIndex = new StorageIndex[m_outerBSize+1];
+
+      m_nonzerosblocks = nonzerosblocks;
+      if(m_blockSize != Dynamic)
+      {
+        m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
+        m_blockPtr = 0;
+      }
+      else
+      {
+        // m_nonzeros  is already computed in setBlockLayout()
+        m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
+      }
+      m_indices = new StorageIndex[m_nonzerosblocks+1];
+      m_values = new Scalar[m_nonzeros];
+    }
+
+
+    /**
+      * \brief Fill values in a matrix  from a triplet list.
+      *
+      * Each triplet item has a block stored in an Eigen dense matrix.
+      * The InputIterator class should provide the functions row(), col() and value()
+      *
+      * \note For fixed-size blocks, call setBlockSize() before this function.
+      *
+      * FIXME Do not accept duplicates
+      */
+    template<typename InputIterator>
+    void setFromTriplets(const InputIterator& begin, const InputIterator& end)
+    {
+      eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
+
+      /* First, sort the triplet list
+        * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted
+        * The best approach is like in SparseMatrix::setFromTriplets()
+        */
+      internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
+      std::sort(begin, end, tripletcomp);
+
+      /* Count the number of rows and column blocks,
+       * and the number of nonzero blocks per outer dimension
+       */
+      VectorXi rowBlocks(m_innerBSize); // Size of each block row
+      VectorXi colBlocks(m_outerBSize); // Size of each block column
+      rowBlocks.setZero(); colBlocks.setZero();
+      VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector
+      VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks
+      nzblock_outer.setZero();
+      nz_outer.setZero();
+      for(InputIterator it(begin); it !=end; ++it)
+      {
+        eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
+        eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
+                     || (m_blockSize == Dynamic));
+
+        if(m_blockSize == Dynamic)
+        {
+          eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
+              "NON CORRESPONDING SIZES FOR ROW BLOCKS");
+          eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
+              "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
+          rowBlocks[it->row()] =it->value().rows();
+          colBlocks[it->col()] = it->value().cols();
+        }
+        nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
+        nzblock_outer(IsColMajor ? it->col() : it->row())++;
+      }
+      // Allocate member arrays
+      if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
+      StorageIndex nzblocks = nzblock_outer.sum();
+      reserve(nzblocks);
+
+       // Temporary markers
+      VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion
+
+      // Setup outer index pointers and markers
+      m_outerIndex[0] = 0;
+      if (m_blockSize == Dynamic)  m_blockPtr[0] =  0;
+      for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+      {
+        m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
+        block_id(bj) = m_outerIndex[bj];
+        if(m_blockSize==Dynamic)
+        {
+          m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
+        }
+      }
+
+      // Fill the matrix
+      for(InputIterator it(begin); it!=end; ++it)
+      {
+        StorageIndex outer = IsColMajor ? it->col() : it->row();
+        StorageIndex inner = IsColMajor ? it->row() : it->col();
+        m_indices[block_id(outer)] = inner;
+        StorageIndex block_size = it->value().rows()*it->value().cols();
+        StorageIndex nz_marker = blockPtr(block_id[outer]);
+        memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
+        if(m_blockSize == Dynamic)
+        {
+          m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size;
+        }
+        block_id(outer)++;
+      }
+
+      // An alternative when the outer indices are sorted...no need to use an array of markers
+//      for(Index bcol = 0; bcol < m_outerBSize; ++bcol)
+//      {
+//      Index id = 0, id_nz = 0, id_nzblock = 0;
+//      for(InputIterator it(begin); it!=end; ++it)
+//      {
+//        while (id<bcol) // one pass should do the job unless there are empty columns
+//        {
+//          id++;
+//          m_outerIndex[id+1]=m_outerIndex[id];
+//        }
+//        m_outerIndex[id+1] += 1;
+//        m_indices[id_nzblock]=brow;
+//        Index block_size = it->value().rows()*it->value().cols();
+//        m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size;
+//        id_nzblock++;
+//        memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar));
+//        id_nz += block_size;
+//      }
+//      while(id < m_outerBSize-1) // Empty columns at the end
+//      {
+//        id++;
+//        m_outerIndex[id+1]=m_outerIndex[id];
+//      }
+//      }
+    }
+
+
+    /**
+      * \returns the number of rows
+      */
+    inline Index rows() const
+    {
+//      return blockRows();
+      return (IsColMajor ? innerSize() : outerSize());
+    }
+
+    /**
+      * \returns the number of cols
+      */
+    inline Index cols() const
+    {
+//      return blockCols();
+      return (IsColMajor ? outerSize() : innerSize());
+    }
+
+    inline Index innerSize() const
+    {
+      if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize];
+      else return  (m_innerBSize * m_blockSize) ;
+    }
+
+    inline Index outerSize() const
+    {
+      if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize];
+      else return  (m_outerBSize * m_blockSize) ;
+    }
+    /** \returns the number of rows grouped by blocks */
+    inline Index blockRows() const
+    {
+      return (IsColMajor ? m_innerBSize : m_outerBSize);
+    }
+    /** \returns the number of columns grouped by blocks */
+    inline Index blockCols() const
+    {
+      return (IsColMajor ? m_outerBSize : m_innerBSize);
+    }
+
+    inline Index outerBlocks() const { return m_outerBSize; }
+    inline Index innerBlocks() const { return m_innerBSize; }
+
+    /** \returns the block index where outer belongs to */
+    inline Index outerToBlock(Index outer) const
+    {
+      eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
+
+      if(m_blockSize != Dynamic)
+        return (outer / m_blockSize); // Integer division
+
+      StorageIndex b_outer = 0;
+      while(m_outerOffset[b_outer] <= outer) ++b_outer;
+      return b_outer - 1;
+    }
+    /** \returns  the block index where inner belongs to */
+    inline Index innerToBlock(Index inner) const
+    {
+      eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
+
+      if(m_blockSize != Dynamic)
+        return (inner / m_blockSize); // Integer division
+
+      StorageIndex b_inner = 0;
+      while(m_innerOffset[b_inner] <= inner) ++b_inner;
+      return b_inner - 1;
+    }
+
+    /**
+      *\returns a reference to the (i,j) block as an Eigen Dense Matrix
+      */
+    Ref<BlockScalar> coeffRef(Index brow, Index bcol)
+    {
+      eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
+      eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
+
+      StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
+      StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
+      StorageIndex inner = IsColMajor ? brow : bcol;
+      StorageIndex outer = IsColMajor ? bcol : brow;
+      StorageIndex offset = m_outerIndex[outer];
+      while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
+        offset++;
+      if(m_indices[offset] == inner)
+      {
+        return Map<BlockScalar>(&(m_values[blockPtr(offset)]), rsize, csize);
+      }
+      else
+      {
+        //FIXME the block does not exist, Insert it !!!!!!!!!
+        eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED");
+      }
+    }
+
+    /**
+      * \returns the value of the (i,j) block as an Eigen Dense Matrix
+      */
+    Map<const BlockScalar> coeff(Index brow, Index bcol) const
+    {
+      eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
+      eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
+
+      StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
+      StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
+      StorageIndex inner = IsColMajor ? brow : bcol;
+      StorageIndex outer = IsColMajor ? bcol : brow;
+      StorageIndex offset = m_outerIndex[outer];
+      while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
+      if(m_indices[offset] == inner)
+      {
+        return Map<const BlockScalar> (&(m_values[blockPtr(offset)]), rsize, csize);
+      }
+      else
+//        return BlockScalar::Zero(rsize, csize);
+        eigen_assert("NOT YET SUPPORTED");
+    }
+
+    // Block Matrix times vector product
+    template<typename VecType>
+    BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType> operator*(const VecType& lhs) const
+    {
+      return BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>(*this, lhs);
+    }
+
+    /** \returns the number of nonzero blocks */
+    inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
+    /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */
+    inline Index nonZeros() const { return m_nonzeros; }
+
+    inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
+//    inline Scalar *valuePtr(){ return m_values; }
+    inline StorageIndex *innerIndexPtr() {return m_indices; }
+    inline const StorageIndex *innerIndexPtr() const {return m_indices; }
+    inline StorageIndex *outerIndexPtr() {return m_outerIndex; }
+    inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
+
+    /** \brief for compatibility purposes with the SparseMatrix class */
+    inline bool isCompressed() const {return true;}
+    /**
+      * \returns the starting index of the bi row block
+      */
+    inline Index blockRowsIndex(Index bi) const
+    {
+      return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
+    }
+
+    /**
+      * \returns the starting index of the bj col block
+      */
+    inline Index blockColsIndex(Index bj) const
+    {
+      return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
+    }
+
+    inline Index blockOuterIndex(Index bj) const
+    {
+      return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
+    }
+    inline Index blockInnerIndex(Index bi) const
+    {
+      return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
+    }
+
+    // Not needed ???
+    inline Index blockInnerSize(Index bi) const
+    {
+      return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
+    }
+    inline Index blockOuterSize(Index bj) const
+    {
+      return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
+    }
+
+    /**
+      * \brief Browse the matrix by outer index
+      */
+    class InnerIterator; // Browse column by column
+
+    /**
+      * \brief Browse the matrix by block outer index
+      */
+    class BlockInnerIterator; // Browse block by block
+
+    friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
+    {
+      for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
+      {
+        BlockInnerIterator itb(m, j);
+        for(; itb; ++itb)
+        {
+          s << "("<<itb.row() << ", " << itb.col() << ")\n";
+          s << itb.value() <<"\n";
+        }
+      }
+      s << std::endl;
+      return s;
+    }
+
+    /**
+      * \returns the starting position of the block <id> in the array of values
+      */
+    Index blockPtr(Index id) const
+    {
+      if(m_blockSize == Dynamic) return m_blockPtr[id];
+      else return id * m_blockSize * m_blockSize;
+      //return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type());
+    }
+
+
+  protected:
+//    inline Index blockDynIdx(Index id, internal::true_type) const
+//    {
+//      return m_blockPtr[id];
+//    }
+//    inline Index blockDynIdx(Index id, internal::false_type) const
+//    {
+//      return id * BlockSize * BlockSize;
+//    }
+
+    // To be implemented
+    // Insert a block at a particular location... need to make a room for that
+    Map<BlockScalar> insert(Index brow, Index bcol);
+
+    Index m_innerBSize; // Number of block rows
+    Index m_outerBSize; // Number of block columns
+    StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
+    StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
+    Index m_nonzerosblocks; // Total nonzeros blocks (lower than  m_innerBSize x m_outerBSize)
+    Index m_nonzeros; // Total nonzeros elements
+    Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
+    StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
+    StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
+    StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
+    Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
+};
+
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
+class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
+{
+  public:
+
+    enum{
+      Flags = _Options
+    };
+
+    BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
+    : m_mat(mat),m_outer(outer),
+      m_id(mat.m_outerIndex[outer]),
+      m_end(mat.m_outerIndex[outer+1])
+    {
+    }
+
+    inline BlockInnerIterator& operator++() {m_id++; return *this; }
+
+    inline const Map<const BlockScalar> value() const
+    {
+      return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
+          rows(),cols());
+    }
+    inline Map<BlockScalar> valueRef()
+    {
+      return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
+          rows(),cols());
+    }
+    // Block inner index
+    inline Index index() const {return m_mat.m_indices[m_id]; }
+    inline Index outer() const { return m_outer; }
+    // block row index
+    inline Index row() const  {return index(); }
+    // block column index
+    inline Index col() const {return outer(); }
+    // FIXME Number of rows in the current block
+    inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
+    // Number of columns in the current block ...
+    inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
+    inline operator bool() const { return (m_id < m_end); }
+
+  protected:
+    const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
+    const Index m_outer;
+    Index m_id;
+    Index m_end;
+};
+
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
+class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
+{
+  public:
+    InnerIterator(const BlockSparseMatrix& mat, Index outer)
+    : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer),
+      itb(mat, mat.outerToBlock(outer)),
+      m_offset(outer - mat.blockOuterIndex(m_outerB))
+     {
+        if (itb)
+        {
+          m_id = m_mat.blockInnerIndex(itb.index());
+          m_start = m_id;
+          m_end = m_mat.blockInnerIndex(itb.index()+1);
+        }
+     }
+    inline InnerIterator& operator++()
+    {
+      m_id++;
+      if (m_id >= m_end)
+      {
+        ++itb;
+        if (itb)
+        {
+          m_id = m_mat.blockInnerIndex(itb.index());
+          m_start = m_id;
+          m_end = m_mat.blockInnerIndex(itb.index()+1);
+        }
+      }
+      return *this;
+    }
+    inline const Scalar& value() const
+    {
+      return itb.value().coeff(m_id - m_start, m_offset);
+    }
+    inline Scalar& valueRef()
+    {
+      return itb.valueRef().coeff(m_id - m_start, m_offset);
+    }
+    inline Index index() const { return m_id; }
+    inline Index outer() const {return m_outer; }
+    inline Index col() const {return outer(); }
+    inline Index row() const { return index();}
+    inline operator bool() const
+    {
+      return itb;
+    }
+  protected:
+    const BlockSparseMatrix& m_mat;
+    const Index m_outer;
+    const Index m_outerB;
+    BlockInnerIterator itb; // Iterator through the blocks
+    const Index m_offset; // Position of this column in the block
+    Index m_start; // starting inner index of this block
+    Index m_id; // current inner index in the block
+    Index m_end; // starting inner index of the next block
+
+};
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEBLOCKMATRIX_H
diff --git a/src/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/src/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
new file mode 100644
index 000000000..037a13f86
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
@@ -0,0 +1,392 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
+#define EIGEN_DYNAMIC_SPARSEMATRIX_H
+
+namespace Eigen { 
+
+/** \deprecated use a SparseMatrix in an uncompressed mode
+  *
+  * \class DynamicSparseMatrix
+  *
+  * \brief A sparse matrix class designed for matrix assembly purpose
+  *
+  * \param _Scalar the scalar type, i.e. the type of the coefficients
+  *
+  * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
+  * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
+  * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
+  * otherwise.
+  *
+  * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
+  * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
+  * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
+  *
+  * \see SparseMatrix
+  */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct traits<DynamicSparseMatrix<_Scalar, _Options, _StorageIndex> >
+{
+  typedef _Scalar Scalar;
+  typedef _StorageIndex StorageIndex;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = Dynamic,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = Dynamic,
+    Flags = _Options | NestByRefBit | LvalueBit,
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    SupportedAccessPatterns = OuterRandomAccessPattern
+  };
+};
+}
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+ class  DynamicSparseMatrix
+  : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _StorageIndex> >
+{
+    typedef SparseMatrixBase<DynamicSparseMatrix> Base;
+    using Base::convert_index;
+  public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
+    // FIXME: why are these operator already alvailable ???
+    // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
+    // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
+    typedef MappedSparseMatrix<Scalar,Flags> Map;
+    using Base::IsRowMajor;
+    using Base::operator=;
+    enum {
+      Options = _Options
+    };
+
+  protected:
+
+    typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0), StorageIndex> TransposedSparseMatrix;
+
+    Index m_innerSize;
+    std::vector<internal::CompressedStorage<Scalar,StorageIndex> > m_data;
+
+  public:
+
+    inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+    inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+    inline Index innerSize() const { return m_innerSize; }
+    inline Index outerSize() const { return convert_index(m_data.size()); }
+    inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
+
+    std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() { return m_data; }
+    const std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() const { return m_data; }
+
+    /** \returns the coefficient value at given position \a row, \a col
+      * This operation involes a log(rho*outer_size) binary search.
+      */
+    inline Scalar coeff(Index row, Index col) const
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+      return m_data[outer].at(inner);
+    }
+
+    /** \returns a reference to the coefficient value at given position \a row, \a col
+      * This operation involes a log(rho*outer_size) binary search. If the coefficient does not
+      * exist yet, then a sorted insertion into a sequential buffer is performed.
+      */
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+      return m_data[outer].atWithInsertion(inner);
+    }
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    void setZero()
+    {
+      for (Index j=0; j<outerSize(); ++j)
+        m_data[j].clear();
+    }
+
+    /** \returns the number of non zero coefficients */
+    Index nonZeros() const
+    {
+      Index res = 0;
+      for (Index j=0; j<outerSize(); ++j)
+        res += m_data[j].size();
+      return res;
+    }
+
+
+
+    void reserve(Index reserveSize = 1000)
+    {
+      if (outerSize()>0)
+      {
+        Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
+        for (Index j=0; j<outerSize(); ++j)
+        {
+          m_data[j].reserve(reserveSizePerVector);
+        }
+      }
+    }
+
+    /** Does nothing: provided for compatibility with SparseMatrix */
+    inline void startVec(Index /*outer*/) {}
+
+    /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+      * - the nonzero does not already exist
+      * - the new coefficient is the last one of the given inner vector.
+      *
+      * \sa insert, insertBackByOuterInner */
+    inline Scalar& insertBack(Index row, Index col)
+    {
+      return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+    }
+
+    /** \sa insertBack */
+    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+    {
+      eigen_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
+      eigen_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
+                && "wrong sorted insertion");
+      m_data[outer].append(0, inner);
+      return m_data[outer].value(m_data[outer].size()-1);
+    }
+
+    inline Scalar& insert(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      Index startId = 0;
+      Index id = static_cast<Index>(m_data[outer].size()) - 1;
+      m_data[outer].resize(id+2,1);
+
+      while ( (id >= startId) && (m_data[outer].index(id) > inner) )
+      {
+        m_data[outer].index(id+1) = m_data[outer].index(id);
+        m_data[outer].value(id+1) = m_data[outer].value(id);
+        --id;
+      }
+      m_data[outer].index(id+1) = inner;
+      m_data[outer].value(id+1) = 0;
+      return m_data[outer].value(id+1);
+    }
+
+    /** Does nothing: provided for compatibility with SparseMatrix */
+    inline void finalize() {}
+
+    /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+    void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      for (Index j=0; j<outerSize(); ++j)
+        m_data[j].prune(reference,epsilon);
+    }
+
+    /** Resize the matrix without preserving the data (the matrix is set to zero)
+      */
+    void resize(Index rows, Index cols)
+    {
+      const Index outerSize = IsRowMajor ? rows : cols;
+      m_innerSize = convert_index(IsRowMajor ? cols : rows);
+      setZero();
+      if (Index(m_data.size()) != outerSize)
+      {
+        m_data.resize(outerSize);
+      }
+    }
+
+    void resizeAndKeepData(Index rows, Index cols)
+    {
+      const Index outerSize = IsRowMajor ? rows : cols;
+      const Index innerSize = IsRowMajor ? cols : rows;
+      if (m_innerSize>innerSize)
+      {
+        // remove all coefficients with innerCoord>=innerSize
+        // TODO
+        //std::cerr << "not implemented yet\n";
+        exit(2);
+      }
+      if (m_data.size() != outerSize)
+      {
+        m_data.resize(outerSize);
+      }
+    }
+
+    /** The class DynamicSparseMatrix is deprectaed */
+    EIGEN_DEPRECATED inline DynamicSparseMatrix()
+      : m_innerSize(0), m_data(0)
+    {
+      eigen_assert(innerSize()==0 && outerSize()==0);
+    }
+
+    /** The class DynamicSparseMatrix is deprectaed */
+    EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols)
+      : m_innerSize(0)
+    {
+      resize(rows, cols);
+    }
+
+    /** The class DynamicSparseMatrix is deprectaed */
+    template<typename OtherDerived>
+    EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+      : m_innerSize(0)
+    {
+    Base::operator=(other.derived());
+    }
+
+    inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
+      : Base(), m_innerSize(0)
+    {
+      *this = other.derived();
+    }
+
+    inline void swap(DynamicSparseMatrix& other)
+    {
+      //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+      std::swap(m_innerSize, other.m_innerSize);
+      //std::swap(m_outerSize, other.m_outerSize);
+      m_data.swap(other.m_data);
+    }
+
+    inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
+    {
+      if (other.isRValue())
+      {
+        swap(other.const_cast_derived());
+      }
+      else
+      {
+        resize(other.rows(), other.cols());
+        m_data = other.m_data;
+      }
+      return *this;
+    }
+
+    /** Destructor */
+    inline ~DynamicSparseMatrix() {}
+
+  public:
+
+    /** \deprecated
+      * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+    EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+    {
+      setZero();
+      reserve(reserveSize);
+    }
+
+    /** \deprecated use insert()
+      * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+      *  1 - the coefficient does not exist yet
+      *  2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+      * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+      * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+      *
+      * \see fillrand(), coeffRef()
+      */
+    EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+      return insertBack(outer,inner);
+    }
+
+    /** \deprecated use insert()
+      * Like fill() but with random inner coordinates.
+      * Compared to the generic coeffRef(), the unique limitation is that we assume
+      * the coefficient does not exist yet.
+      */
+    EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+    {
+      return insert(row,col);
+    }
+
+    /** \deprecated use finalize()
+      * Does nothing. Provided for compatibility with SparseMatrix. */
+    EIGEN_DEPRECATED void endFill() {}
+    
+#   ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+#     include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+#   endif
+ };
+
+template<typename Scalar, int _Options, typename _StorageIndex>
+class DynamicSparseMatrix<Scalar,_Options,_StorageIndex>::InnerIterator : public SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator
+{
+    typedef typename SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator Base;
+  public:
+    InnerIterator(const DynamicSparseMatrix& mat, Index outer)
+      : Base(mat.m_data[outer]), m_outer(outer)
+    {}
+
+    inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+    inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+    inline Index outer() const { return m_outer; }
+
+  protected:
+    const Index m_outer;
+};
+
+template<typename Scalar, int _Options, typename _StorageIndex>
+class DynamicSparseMatrix<Scalar,_Options,_StorageIndex>::ReverseInnerIterator : public SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator
+{
+    typedef typename SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator Base;
+  public:
+    ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer)
+      : Base(mat.m_data[outer]), m_outer(outer)
+    {}
+
+    inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+    inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+    inline Index outer() const { return m_outer; }
+
+  protected:
+    const Index m_outer;
+};
+
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct evaluator<DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> >
+  : evaluator_base<DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> >
+{
+  typedef _Scalar Scalar;
+  typedef DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
+  typedef typename SparseMatrixType::InnerIterator InnerIterator;
+  typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
+  
+  enum {
+    CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+    Flags = SparseMatrixType::Flags
+  };
+  
+  evaluator() : m_matrix(0) {}
+  evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {}
+  
+  operator SparseMatrixType&() { return m_matrix->const_cast_derived(); }
+  operator const SparseMatrixType&() const { return *m_matrix; }
+  
+  Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); }
+  
+  Index nonZerosEstimate() const { return m_matrix->nonZeros(); }
+
+  const SparseMatrixType *m_matrix;
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/src/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h b/src/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h
new file mode 100644
index 000000000..41e4af4a4
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -0,0 +1,275 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_MARKET_IO_H
+#define EIGEN_SPARSE_MARKET_IO_H
+
+#include <iostream>
+
+namespace Eigen { 
+
+namespace internal 
+{
+  template <typename Scalar>
+  inline bool GetMarketLine (std::stringstream& line, Index& M, Index& N, Index& i, Index& j, Scalar& value)
+  {
+    line >> i >> j >> value;
+    i--;
+    j--;
+    if(i>=0 && j>=0 && i<M && j<N)
+    {
+      return true; 
+    }
+    else
+      return false;
+  }
+  template <typename Scalar>
+  inline bool GetMarketLine (std::stringstream& line, Index& M, Index& N, Index& i, Index& j, std::complex<Scalar>& value)
+  {
+    Scalar valR, valI;
+    line >> i >> j >> valR >> valI;
+    i--;
+    j--;
+    if(i>=0 && j>=0 && i<M && j<N)
+    {
+      value = std::complex<Scalar>(valR, valI);
+      return true; 
+    }
+    else
+      return false;
+  }
+
+  template <typename RealScalar>
+  inline void  GetVectorElt (const std::string& line, RealScalar& val)
+  {
+    std::istringstream newline(line);
+    newline >> val;  
+  }
+
+  template <typename RealScalar>
+  inline void GetVectorElt (const std::string& line, std::complex<RealScalar>& val)
+  {
+    RealScalar valR, valI; 
+    std::istringstream newline(line);
+    newline >> valR >> valI; 
+    val = std::complex<RealScalar>(valR, valI);
+  }
+  
+  template<typename Scalar>
+  inline void putMarketHeader(std::string& header,int sym)
+  {
+    header= "%%MatrixMarket matrix coordinate ";
+    if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+    {
+      header += " complex"; 
+      if(sym == Symmetric) header += " symmetric";
+      else if (sym == SelfAdjoint) header += " Hermitian";
+      else header += " general";
+    }
+    else
+    {
+      header += " real"; 
+      if(sym == Symmetric) header += " symmetric";
+      else header += " general";
+    }
+  }
+
+  template<typename Scalar>
+  inline void PutMatrixElt(Scalar value, int row, int col, std::ofstream& out)
+  {
+    out << row << " "<< col << " " << value << "\n";
+  }
+  template<typename Scalar>
+  inline void PutMatrixElt(std::complex<Scalar> value, int row, int col, std::ofstream& out)
+  {
+    out << row << " " << col << " " << value.real() << " " << value.imag() << "\n";
+  }
+
+
+  template<typename Scalar>
+  inline void putVectorElt(Scalar value, std::ofstream& out)
+  {
+    out << value << "\n"; 
+  }
+  template<typename Scalar>
+  inline void putVectorElt(std::complex<Scalar> value, std::ofstream& out)
+  {
+    out << value.real << " " << value.imag()<< "\n"; 
+  }
+
+} // end namepsace internal
+
+inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector)
+{
+  sym = 0; 
+  iscomplex = false;
+  isvector = false;
+  std::ifstream in(filename.c_str(),std::ios::in);
+  if(!in)
+    return false;
+  
+  std::string line; 
+  // The matrix header is always the first line in the file 
+  std::getline(in, line); eigen_assert(in.good());
+  
+  std::stringstream fmtline(line); 
+  std::string substr[5];
+  fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4];
+  if(substr[2].compare("array") == 0) isvector = true;
+  if(substr[3].compare("complex") == 0) iscomplex = true;
+  if(substr[4].compare("symmetric") == 0) sym = Symmetric;
+  else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint;
+  
+  return true;
+}
+  
+template<typename SparseMatrixType>
+bool loadMarket(SparseMatrixType& mat, const std::string& filename)
+{
+  typedef typename SparseMatrixType::Scalar Scalar;
+  typedef typename SparseMatrixType::Index Index;
+  std::ifstream input(filename.c_str(),std::ios::in);
+  if(!input)
+    return false;
+  
+  const int maxBuffersize = 2048;
+  char buffer[maxBuffersize];
+  
+  bool readsizes = false;
+
+  typedef Triplet<Scalar,Index> T;
+  std::vector<T> elements;
+  
+  Index M(-1), N(-1), NNZ(-1);
+  Index count = 0;
+  while(input.getline(buffer, maxBuffersize))
+  {
+    // skip comments   
+    //NOTE An appropriate test should be done on the header to get the  symmetry
+    if(buffer[0]=='%')
+      continue;
+    
+    std::stringstream line(buffer);
+    
+    if(!readsizes)
+    {
+      line >> M >> N >> NNZ;
+      if(M > 0 && N > 0 && NNZ > 0) 
+      {
+        readsizes = true;
+        //std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n";
+        mat.resize(M,N);
+        mat.reserve(NNZ);
+      }
+    }
+    else
+    { 
+      Index i(-1), j(-1);
+      Scalar value; 
+      if( internal::GetMarketLine(line, M, N, i, j, value) ) 
+      {
+        ++ count;
+        elements.push_back(T(i,j,value));
+      }
+      else 
+        std::cerr << "Invalid read: " << i << "," << j << "\n";        
+    }
+  }
+  mat.setFromTriplets(elements.begin(), elements.end());
+  if(count!=NNZ)
+    std::cerr << count << "!=" << NNZ << "\n";
+  
+  input.close();
+  return true;
+}
+
+template<typename VectorType>
+bool loadMarketVector(VectorType& vec, const std::string& filename)
+{
+   typedef typename VectorType::Scalar Scalar;
+  std::ifstream in(filename.c_str(), std::ios::in);
+  if(!in)
+    return false;
+  
+  std::string line; 
+  int n(0), col(0); 
+  do 
+  { // Skip comments
+    std::getline(in, line); eigen_assert(in.good());
+  } while (line[0] == '%');
+  std::istringstream newline(line);
+  newline  >> n >> col; 
+  eigen_assert(n>0 && col>0);
+  vec.resize(n);
+  int i = 0; 
+  Scalar value; 
+  while ( std::getline(in, line) && (i < n) ){
+    internal::GetVectorElt(line, value); 
+    vec(i++) = value; 
+  }
+  in.close();
+  if (i!=n){
+    std::cerr<< "Unable to read all elements from file " << filename << "\n";
+    return false;
+  }
+  return true;
+}
+
+template<typename SparseMatrixType>
+bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0)
+{
+  typedef typename SparseMatrixType::Scalar Scalar;
+  std::ofstream out(filename.c_str(),std::ios::out);
+  if(!out)
+    return false;
+  
+  out.flags(std::ios_base::scientific);
+  out.precision(64);
+  std::string header; 
+  internal::putMarketHeader<Scalar>(header, sym); 
+  out << header << std::endl; 
+  out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n";
+  int count = 0;
+  for(int j=0; j<mat.outerSize(); ++j)
+    for(typename SparseMatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      ++ count;
+      internal::PutMatrixElt(it.value(), it.row()+1, it.col()+1, out);
+      // out << it.row()+1 << " " << it.col()+1 << " " << it.value() << "\n";
+    }
+  out.close();
+  return true;
+}
+
+template<typename VectorType>
+bool saveMarketVector (const VectorType& vec, const std::string& filename)
+{
+ typedef typename VectorType::Scalar Scalar; 
+ std::ofstream out(filename.c_str(),std::ios::out);
+  if(!out)
+    return false;
+  
+  out.flags(std::ios_base::scientific);
+  out.precision(64);
+  if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+      out << "%%MatrixMarket matrix array complex general\n"; 
+  else
+    out << "%%MatrixMarket matrix array real general\n"; 
+  out << vec.size() << " "<< 1 << "\n";
+  for (int i=0; i < vec.size(); i++){
+    internal::putVectorElt(vec(i), out); 
+  }
+  out.close();
+  return true; 
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_MARKET_IO_H
diff --git a/src/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/src/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
new file mode 100644
index 000000000..02916ea6f
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
@@ -0,0 +1,247 @@
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BROWSE_MATRICES_H
+#define EIGEN_BROWSE_MATRICES_H
+
+namespace Eigen {
+
+enum {
+  SPD = 0x100,
+  NonSymmetric = 0x0
+}; 
+
+/** 
+ * @brief Iterator to browse matrices from a specified folder
+ * 
+ * This is used to load all the matrices from a folder. 
+ * The matrices should be in Matrix Market format
+ * It is assumed that the matrices are named as matname.mtx
+ * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian)
+ * The right hand side vectors are loaded as well, if they exist.
+ * They should be named as matname_b.mtx. 
+ * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx
+ * 
+ * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx
+ * 
+ * Sample code
+ * \code
+ * 
+ * \endcode
+ * 
+ * \tparam Scalar The scalar type 
+ */
+template <typename Scalar>
+class MatrixMarketIterator 
+{
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+  public:
+    typedef Matrix<Scalar,Dynamic,1> VectorType; 
+    typedef SparseMatrix<Scalar,ColMajor> MatrixType; 
+  
+  public:
+    MatrixMarketIterator(const std::string &folder)
+      : m_sym(0), m_isvalid(false), m_matIsLoaded(false), m_hasRhs(false), m_hasrefX(false), m_folder(folder)
+    {
+      m_folder_id = opendir(folder.c_str());
+      if(m_folder_id)
+        Getnextvalidmatrix();
+    }
+    
+    ~MatrixMarketIterator()
+    {
+      if (m_folder_id) closedir(m_folder_id); 
+    }
+    
+    inline MatrixMarketIterator& operator++()
+    {
+      m_matIsLoaded = false;
+      m_hasrefX = false;
+      m_hasRhs = false;
+      Getnextvalidmatrix();
+      return *this;
+    }
+    inline operator bool() const { return m_isvalid;}
+    
+    /** Return the sparse matrix corresponding to the current file */
+    inline MatrixType& matrix() 
+    { 
+      // Read the matrix
+      if (m_matIsLoaded) return m_mat;
+      
+      std::string matrix_file = m_folder + "/" + m_matname + ".mtx";
+      if ( !loadMarket(m_mat, matrix_file)) 
+      {
+        std::cerr << "Warning loadMarket failed when loading \"" << matrix_file << "\"" << std::endl;
+        m_matIsLoaded = false;
+        return m_mat;
+      }
+      m_matIsLoaded = true; 
+
+      if (m_sym != NonSymmetric) 
+      {
+        // Check whether we need to restore a full matrix:
+        RealScalar diag_norm  = m_mat.diagonal().norm();
+        RealScalar lower_norm = m_mat.template triangularView<Lower>().norm();
+        RealScalar upper_norm = m_mat.template triangularView<Upper>().norm();
+        if(lower_norm>diag_norm && upper_norm==diag_norm)
+        {
+          // only the lower part is stored
+          MatrixType tmp(m_mat);
+          m_mat = tmp.template selfadjointView<Lower>();
+        }
+        else if(upper_norm>diag_norm && lower_norm==diag_norm)
+        {
+          // only the upper part is stored
+          MatrixType tmp(m_mat);
+          m_mat = tmp.template selfadjointView<Upper>();
+        }
+      }
+      return m_mat; 
+    }
+    
+    /** Return the right hand side corresponding to the current matrix. 
+     * If the rhs file is not provided, a random rhs is generated
+     */
+    inline VectorType& rhs() 
+    { 
+       // Get the right hand side
+      if (m_hasRhs) return m_rhs;
+      
+      std::string rhs_file;
+      rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx
+      m_hasRhs = Fileexists(rhs_file);
+      if (m_hasRhs)
+      {
+        m_rhs.resize(m_mat.cols());
+        m_hasRhs = loadMarketVector(m_rhs, rhs_file);
+      }
+      if (!m_hasRhs)
+      {
+        // Generate a random right hand side
+        if (!m_matIsLoaded) this->matrix(); 
+        m_refX.resize(m_mat.cols());
+        m_refX.setRandom();
+        m_rhs = m_mat * m_refX;
+        m_hasrefX = true;
+        m_hasRhs = true;
+      }
+      return m_rhs; 
+    }
+    
+    /** Return a reference solution
+     * If it is not provided and if the right hand side is not available
+     * then refX is randomly generated such that A*refX = b 
+     * where A and b are the matrix and the rhs. 
+     * Note that when a rhs is provided, refX is not available 
+     */
+    inline VectorType& refX() 
+    { 
+      // Check if a reference solution is provided
+      if (m_hasrefX) return m_refX;
+      
+      std::string lhs_file;
+      lhs_file = m_folder + "/" + m_matname + "_x.mtx"; 
+      m_hasrefX = Fileexists(lhs_file);
+      if (m_hasrefX)
+      {
+        m_refX.resize(m_mat.cols());
+        m_hasrefX = loadMarketVector(m_refX, lhs_file);
+      }
+      else
+        m_refX.resize(0);
+      return m_refX; 
+    }
+    
+    inline std::string& matname() { return m_matname; }
+    
+    inline int sym() { return m_sym; }
+    
+    bool hasRhs() {return m_hasRhs; }
+    bool hasrefX() {return m_hasrefX; }
+    bool isFolderValid() { return bool(m_folder_id); }
+    
+  protected:
+    
+    inline bool Fileexists(std::string file)
+    {
+      std::ifstream file_id(file.c_str());
+      if (!file_id.good() ) 
+      {
+        return false;
+      }
+      else 
+      {
+        file_id.close();
+        return true;
+      }
+    }
+    
+    void Getnextvalidmatrix( )
+    {
+      m_isvalid = false;
+      // Here, we return with the next valid matrix in the folder
+      while ( (m_curs_id = readdir(m_folder_id)) != NULL) {
+        m_isvalid = false;
+        std::string curfile;
+        curfile = m_folder + "/" + m_curs_id->d_name;
+        // Discard if it is a folder
+        if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems
+//         struct stat st_buf; 
+//         stat (curfile.c_str(), &st_buf);
+//         if (S_ISDIR(st_buf.st_mode)) continue;
+        
+        // Determine from the header if it is a matrix or a right hand side 
+        bool isvector,iscomplex=false;
+        if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue;
+        if(isvector) continue;
+        if (!iscomplex)
+        {
+          if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+            continue; 
+        }
+        if (iscomplex)
+        {
+          if(internal::is_same<Scalar, float>::value || internal::is_same<Scalar, double>::value)
+            continue; 
+        }
+        
+        
+        // Get the matrix name
+        std::string filename = m_curs_id->d_name;
+        m_matname = filename.substr(0, filename.length()-4); 
+        
+        // Find if the matrix is SPD 
+        size_t found = m_matname.find("SPD");
+        if( (found!=std::string::npos) && (m_sym != NonSymmetric) )
+          m_sym = SPD;
+       
+        m_isvalid = true;
+        break; 
+      }
+    }
+    int m_sym; // Symmetry of the matrix
+    MatrixType m_mat; // Current matrix  
+    VectorType m_rhs;  // Current vector
+    VectorType m_refX; // The reference solution, if exists
+    std::string m_matname; // Matrix Name
+    bool m_isvalid; 
+    bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file
+    bool m_hasRhs; // The right hand side exists
+    bool m_hasrefX; // A reference solution is provided
+    std::string m_folder;
+    DIR * m_folder_id;
+    struct dirent *m_curs_id; 
+    
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/src/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/src/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h
new file mode 100644
index 000000000..ee97299af
--- /dev/null
+++ b/src/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h
@@ -0,0 +1,327 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_RANDOMSETTER_H
+#define EIGEN_RANDOMSETTER_H
+
+namespace Eigen { 
+
+/** Represents a std::map
+  *
+  * \see RandomSetter
+  */
+template<typename Scalar> struct StdMapTraits
+{
+  typedef int KeyType;
+  typedef std::map<KeyType,Scalar> Type;
+  enum {
+    IsSorted = 1
+  };
+
+  static void setInvalidKey(Type&, const KeyType&) {}
+};
+
+#ifdef EIGEN_UNORDERED_MAP_SUPPORT
+/** Represents a std::unordered_map
+  *
+  * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
+  * yourself making sure that unordered_map is defined in the std namespace.
+  *
+  * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
+  * \code
+  * #include <tr1/unordered_map>
+  * #define EIGEN_UNORDERED_MAP_SUPPORT
+  * namespace std {
+  *   using std::tr1::unordered_map;
+  * }
+  * \endcode
+  *
+  * \see RandomSetter
+  */
+template<typename Scalar> struct StdUnorderedMapTraits
+{
+  typedef int KeyType;
+  typedef std::unordered_map<KeyType,Scalar> Type;
+  enum {
+    IsSorted = 0
+  };
+
+  static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif // EIGEN_UNORDERED_MAP_SUPPORT
+
+#ifdef _DENSE_HASH_MAP_H_
+/** Represents a google::dense_hash_map
+  *
+  * \see RandomSetter
+  */
+template<typename Scalar> struct GoogleDenseHashMapTraits
+{
+  typedef int KeyType;
+  typedef google::dense_hash_map<KeyType,Scalar> Type;
+  enum {
+    IsSorted = 0
+  };
+
+  static void setInvalidKey(Type& map, const KeyType& k)
+  { map.set_empty_key(k); }
+};
+#endif
+
+#ifdef _SPARSE_HASH_MAP_H_
+/** Represents a google::sparse_hash_map
+  *
+  * \see RandomSetter
+  */
+template<typename Scalar> struct GoogleSparseHashMapTraits
+{
+  typedef int KeyType;
+  typedef google::sparse_hash_map<KeyType,Scalar> Type;
+  enum {
+    IsSorted = 0
+  };
+
+  static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif
+
+/** \class RandomSetter
+  *
+  * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
+  *
+  * \tparam SparseMatrixType the type of the sparse matrix we are updating
+  * \tparam MapTraits a traits class representing the map implementation used for the temporary sparse storage.
+  *                  Its default value depends on the system.
+  * \tparam OuterPacketBits defines the number of rows (or columns) manage by a single map object
+  *                        as a power of two exponent.
+  *
+  * This class temporarily represents a sparse matrix object using a generic map implementation allowing for
+  * efficient random access. The conversion from the compressed representation to a hash_map object is performed
+  * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy
+  * suggest the use of nested blocks as in this example:
+  *
+  * \code
+  * SparseMatrix<double> m(rows,cols);
+  * {
+  *   RandomSetter<SparseMatrix<double> > w(m);
+  *   // don't use m but w instead with read/write random access to the coefficients:
+  *   for(;;)
+  *     w(rand(),rand()) = rand;
+  * }
+  * // when w is deleted, the data are copied back to m
+  * // and m is ready to use.
+  * \endcode
+  *
+  * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would
+  * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter
+  * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order.
+  * To reach optimal performance, this value should be adjusted according to the average number of nonzeros
+  * per rows/columns.
+  *
+  * The possible values for the template parameter MapTraits are:
+  *  - \b StdMapTraits: corresponds to std::map. (does not perform very well)
+  *  - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
+  *  - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
+  *  - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
+  *
+  * The default map implementation depends on the availability, and the preferred order is:
+  * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
+  *
+  * For performance and memory consumption reasons it is highly recommended to use one of
+  * the Google's hash_map implementation. To enable the support for them, you have two options:
+  *  - \#include <google/dense_hash_map> yourself \b before Eigen/Sparse header
+  *  - define EIGEN_GOOGLEHASH_SUPPORT
+  * In the later case the inclusion of <google/dense_hash_map> is made for you.
+  *
+  * \see http://code.google.com/p/google-sparsehash/
+  */
+template<typename SparseMatrixType,
+         template <typename T> class MapTraits =
+#if defined _DENSE_HASH_MAP_H_
+          GoogleDenseHashMapTraits
+#elif defined _HASH_MAP
+          GnuHashMapTraits
+#else
+          StdMapTraits
+#endif
+         ,int OuterPacketBits = 6>
+class RandomSetter
+{
+    typedef typename SparseMatrixType::Scalar Scalar;
+    typedef typename SparseMatrixType::StorageIndex StorageIndex;
+
+    struct ScalarWrapper
+    {
+      ScalarWrapper() : value(0) {}
+      Scalar value;
+    };
+    typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
+    typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
+    static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
+    enum {
+      SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
+      TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
+      SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor
+    };
+
+  public:
+
+    /** Constructs a random setter object from the sparse matrix \a target
+      *
+      * Note that the initial value of \a target are imported. If you want to re-set
+      * a sparse matrix from scratch, then you must set it to zero first using the
+      * setZero() function.
+      */
+    inline RandomSetter(SparseMatrixType& target)
+      : mp_target(&target)
+    {
+      const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize();
+      const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize();
+      m_outerPackets = outerSize >> OuterPacketBits;
+      if (outerSize&OuterPacketMask)
+        m_outerPackets += 1;
+      m_hashmaps = new HashMapType[m_outerPackets];
+      // compute number of bits needed to store inner indices
+      Index aux = innerSize - 1;
+      m_keyBitsOffset = 0;
+      while (aux)
+      {
+        ++m_keyBitsOffset;
+        aux = aux >> 1;
+      }
+      KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
+      for (Index k=0; k<m_outerPackets; ++k)
+        MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
+
+      // insert current coeffs
+      for (Index j=0; j<mp_target->outerSize(); ++j)
+        for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
+          (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
+    }
+
+    /** Destructor updating back the sparse matrix target */
+    ~RandomSetter()
+    {
+      KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
+      if (!SwapStorage) // also means the map is sorted
+      {
+        mp_target->setZero();
+        mp_target->makeCompressed();
+        mp_target->reserve(nonZeros());
+        Index prevOuter = -1;
+        for (Index k=0; k<m_outerPackets; ++k)
+        {
+          const Index outerOffset = (1<<OuterPacketBits) * k;
+          typename HashMapType::iterator end = m_hashmaps[k].end();
+          for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+          {
+            const Index outer = (it->first >> m_keyBitsOffset) + outerOffset;
+            const Index inner = it->first & keyBitsMask;
+            if (prevOuter!=outer)
+            {
+              for (Index j=prevOuter+1;j<=outer;++j)
+                mp_target->startVec(j);
+              prevOuter = outer;
+            }
+            mp_target->insertBackByOuterInner(outer, inner) = it->second.value;
+          }
+        }
+        mp_target->finalize();
+      }
+      else
+      {
+        VectorXi positions(mp_target->outerSize());
+        positions.setZero();
+        // pass 1
+        for (Index k=0; k<m_outerPackets; ++k)
+        {
+          typename HashMapType::iterator end = m_hashmaps[k].end();
+          for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+          {
+            const Index outer = it->first & keyBitsMask;
+            ++positions[outer];
+          }
+        }
+        // prefix sum
+        Index count = 0;
+        for (Index j=0; j<mp_target->outerSize(); ++j)
+        {
+          Index tmp = positions[j];
+          mp_target->outerIndexPtr()[j] = count;
+          positions[j] = count;
+          count += tmp;
+        }
+        mp_target->makeCompressed();
+        mp_target->outerIndexPtr()[mp_target->outerSize()] = count;
+        mp_target->resizeNonZeros(count);
+        // pass 2
+        for (Index k=0; k<m_outerPackets; ++k)
+        {
+          const Index outerOffset = (1<<OuterPacketBits) * k;
+          typename HashMapType::iterator end = m_hashmaps[k].end();
+          for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+          {
+            const Index inner = (it->first >> m_keyBitsOffset) + outerOffset;
+            const Index outer = it->first & keyBitsMask;
+            // sorted insertion
+            // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
+            // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
+            // small fraction of them have to be sorted, whence the following simple procedure:
+            Index posStart = mp_target->outerIndexPtr()[outer];
+            Index i = (positions[outer]++) - 1;
+            while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) )
+            {
+              mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i];
+              mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i];
+              --i;
+            }
+            mp_target->innerIndexPtr()[i+1] = inner;
+            mp_target->valuePtr()[i+1] = it->second.value;
+          }
+        }
+      }
+      delete[] m_hashmaps;
+    }
+
+    /** \returns a reference to the coefficient at given coordinates \a row, \a col */
+    Scalar& operator() (Index row, Index col)
+    {
+      const Index outer = SetterRowMajor ? row : col;
+      const Index inner = SetterRowMajor ? col : row;
+      const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
+      const Index outerMinor = outer & OuterPacketMask;  // index of the inner vector in the packet
+      const KeyType key = internal::convert_index<KeyType>((outerMinor<<m_keyBitsOffset) | inner);
+      return m_hashmaps[outerMajor][key].value;
+    }
+
+    /** \returns the number of non zero coefficients
+      *
+      * \note According to the underlying map/hash_map implementation,
+      * this function might be quite expensive.
+      */
+    Index nonZeros() const
+    {
+      Index nz = 0;
+      for (Index k=0; k<m_outerPackets; ++k)
+        nz += static_cast<Index>(m_hashmaps[k].size());
+      return nz;
+    }
+
+
+  protected:
+
+    HashMapType* m_hashmaps;
+    SparseMatrixType* mp_target;
+    Index m_outerPackets;
+    unsigned char m_keyBitsOffset;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_RANDOMSETTER_H
diff --git a/src/libslic3r/SLA/SLASupportTree.cpp b/src/libslic3r/SLA/SLASupportTree.cpp
index 8615ab91c..37b0c0ffc 100644
--- a/src/libslic3r/SLA/SLASupportTree.cpp
+++ b/src/libslic3r/SLA/SLASupportTree.cpp
@@ -612,7 +612,9 @@ double ray_mesh_intersect(const Vec3d& s,
                           const Vec3d& dir,
                           const EigenMesh3D& m);
 
-PointSet normals(const PointSet& points, const EigenMesh3D& mesh);
+PointSet normals(const PointSet& points, const EigenMesh3D& mesh,
+                 double eps = 0.05,  // min distance from edges
+                 std::function<void()> throw_on_cancel = [](){});
 
 inline Vec2d to_vec2(const Vec3d& v3) {
     return {v3(X), v3(Y)};
@@ -1049,7 +1051,7 @@ bool SLASupportTree::generate(const PointSet &points,
         tifcl();
 
         // calculate the normals to the triangles belonging to filtered points
-        auto nmls = sla::normals(filt_pts, mesh);
+        auto nmls = sla::normals(filt_pts, mesh, cfg.head_front_radius_mm, tifcl);
 
         head_norm.resize(count, 3);
         head_pos.resize(count, 3);
diff --git a/src/libslic3r/SLA/SLASupportTreeIGL.cpp b/src/libslic3r/SLA/SLASupportTreeIGL.cpp
index 50d7775a2..5d40bb514 100644
--- a/src/libslic3r/SLA/SLASupportTreeIGL.cpp
+++ b/src/libslic3r/SLA/SLASupportTreeIGL.cpp
@@ -1,3 +1,4 @@
+#include <cmath>
 #include "SLA/SLASupportTree.hpp"
 #include "SLA/SLABoilerPlate.hpp"
 #include "SLA/SLASpatIndex.hpp"
@@ -9,15 +10,8 @@
 #include "boost/geometry/index/rtree.hpp"
 
 #include <igl/ray_mesh_intersect.h>
-
-//#if !defined(_MSC_VER) || defined(_WIN64)
-#if 1
-#define IGL_COMPATIBLE
-#endif
-
-#ifdef IGL_COMPATIBLE
 #include <igl/point_mesh_squared_distance.h>
-#endif
+#include <igl/remove_duplicate_vertices.h>
 
 #include "SLASpatIndex.hpp"
 #include "ClipperUtils.hpp"
@@ -84,33 +78,124 @@ size_t SpatIndex::size() const
     return m_impl->m_store.size();
 }
 
-PointSet normals(const PointSet& points, const EigenMesh3D& mesh) {
-    if(points.rows() == 0 || mesh.V.rows() == 0 || mesh.F.rows() == 0) return {};
-#ifdef IGL_COMPATIBLE
+bool point_on_edge(const Vec3d& p, const Vec3d& e1, const Vec3d& e2,
+                   double eps = 0.05)
+{
+    using Line3D = Eigen::ParametrizedLine<double, 3>;
+
+    auto line = Line3D::Through(e1, e2);
+    double d = line.distance(p);
+    return std::abs(d) < eps;
+}
+
+template<class Vec> double distance(const Vec& pp1, const Vec& pp2) {
+    auto p = pp2 - pp1;
+    return std::sqrt(p.transpose() * p);
+}
+
+PointSet normals(const PointSet& points, const EigenMesh3D& emesh,
+                 double eps,
+                 std::function<void()> throw_on_cancel) {
+    if(points.rows() == 0 || emesh.V.rows() == 0 || emesh.F.rows() == 0)
+        return {};
+
     Eigen::VectorXd dists;
     Eigen::VectorXi I;
     PointSet C;
 
+    // We need to remove duplicate vertices and have a true index triangle
+    // structure
+    EigenMesh3D  mesh;
+    Eigen::VectorXi SVI, SVJ;
+    igl::remove_duplicate_vertices(emesh.V, emesh.F, 1e-6,
+                                   mesh.V, SVI, SVJ, mesh.F);
+
     igl::point_mesh_squared_distance( points, mesh.V, mesh.F, dists, I, C);
 
     PointSet ret(I.rows(), 3);
     for(int i = 0; i < I.rows(); i++) {
+        throw_on_cancel();
         auto idx = I(i);
         auto trindex = mesh.F.row(idx);
 
-        auto& p1 = mesh.V.row(trindex(0));
-        auto& p2 = mesh.V.row(trindex(1));
-        auto& p3 = mesh.V.row(trindex(2));
+        const Vec3d& p1 = mesh.V.row(trindex(0));
+        const Vec3d& p2 = mesh.V.row(trindex(1));
+        const Vec3d& p3 = mesh.V.row(trindex(2));
 
-        Eigen::Vector3d U = p2 - p1;
-        Eigen::Vector3d V = p3 - p1;
-        ret.row(i) = U.cross(V).normalized();
+        // We should check if the point lies on an edge of the hosting triangle.
+        // If it does than all the other triangles using the same two points
+        // have to be searched and the final normal should be some kind of
+        // aggregation of the participating triangle normals. We should also
+        // consider the cases where the support point lies right on a vertex
+        // of its triangle. The procedure is the same, get the neighbor
+        // triangles and calculate an average normal.
+
+        const Vec3d& p = C.row(i);
+
+        // mark the vertex indices of the edge. ia and ib marks and edge ic
+        // will mark a single vertex.
+        int ia = -1, ib = -1, ic = -1;
+
+        if(std::abs(distance(p, p1)) < eps) {
+            ic = trindex(0);
+        }
+        else if(std::abs(distance(p, p2)) < eps) {
+            ic = trindex(1);
+        }
+        else if(std::abs(distance(p, p3)) < eps) {
+            ic = trindex(2);
+        }
+        else if(point_on_edge(p, p1, p2, eps)) {
+            ia = trindex(0); ib = trindex(1);
+        }
+        else if(point_on_edge(p, p2, p3, eps)) {
+            ia = trindex(1); ib = trindex(2);
+        }
+        else if(point_on_edge(p, p1, p3, eps)) {
+            ia = trindex(0); ib = trindex(2);
+        }
+
+        std::vector<Vec3i> neigh;
+        if(ic >= 0) { // The point is right on a vertex of the triangle
+            for(int n = 0; n < mesh.F.rows(); ++n) {
+                throw_on_cancel();
+                Vec3i ni = mesh.F.row(n);
+                if((ni(X) == ic || ni(Y) == ic || ni(Z) == ic))
+                    neigh.emplace_back(ni);
+            }
+        }
+        else if(ia >= 0 && ib >= 0) { // the point is on and edge
+            // now get all the neigboring triangles
+            for(int n = 0; n < mesh.F.rows(); ++n) {
+                throw_on_cancel();
+                Vec3i ni = mesh.F.row(n);
+                if((ni(X) == ia || ni(Y) == ia || ni(Z) == ia) &&
+                   (ni(X) == ib || ni(Y) == ib || ni(Z) == ib))
+                    neigh.emplace_back(ni);
+            }
+        }
+
+        if(!neigh.empty()) { // there were neighbors to count with
+            Vec3d sumnorm(0, 0, 0);
+            for(const Vec3i& tri : neigh) {
+                const Vec3d& pt1 = mesh.V.row(tri(0));
+                const Vec3d& pt2 = mesh.V.row(tri(1));
+                const Vec3d& pt3 = mesh.V.row(tri(2));
+                Eigen::Vector3d U = pt2 - pt1;
+                Eigen::Vector3d V = pt3 - pt1;
+                sumnorm += U.cross(V).normalized();
+            }
+            sumnorm /= neigh.size();
+            ret.row(i) = sumnorm;
+        }
+        else { // point lies safely within its triangle
+            Eigen::Vector3d U = p2 - p1;
+            Eigen::Vector3d V = p3 - p1;
+            ret.row(i) = U.cross(V).normalized();
+        }
     }
 
     return ret;
-#else // TODO:  do something on 32 bit windows
-    return {};
-#endif
 }
 
 double ray_mesh_intersect(const Vec3d& s,
@@ -223,7 +308,7 @@ Segments model_boundary(const EigenMesh3D& emesh, double offs)
         pp.emplace_back(p);
     }
 
-    ExPolygons merged = union_ex(offset(pp, float(scale_(offs))), true);
+    ExPolygons merged = union_ex(Slic3r::offset(pp, float(scale_(offs))), true);
 
     for(auto& expoly : merged) {
         auto lines = expoly.lines();