Bugzilla – Attachment 681 Details for
Bug 1179
Hypersparse matrix class proposal
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
Log In
[x]
|
Forgot Password
Login:
[x]
This bugzilla service is closed. All entries have been migrated to
https://gitlab.com/libeigen/eigen
HyperSparseMatrix class
dcs.h (text/x-c), 48.60 KB, created by
Dmitry Zhdanov
on 2016-03-29 22:52:07 UTC
(
hide
)
Description:
HyperSparseMatrix class
Filename:
MIME Type:
Creator:
Dmitry Zhdanov
Created:
2016-03-29 22:52:07 UTC
Size:
48.60 KB
patch
obsolete
>// >// Copyright (C) 2014-2016 Dmitry Zhdanov <dmitry.zhdanov@northwestern.edu> >// based on sparse matrix code by Gael Guennebaud <gael.guennebaud@inria.fr> >// >// This Source Code Form is subject to the terms of the Mozilla >// Public License v. 2.0. If a copy of the MPL was not distributed >// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. > >#ifndef DCS_H >#define DCS_H > >#include "sparse_accumulator.h" > >namespace dcs //Forward declarations >{ > /*! \ingroup SparseCore_Module > * > * \class HyperSparseMatrix > * > * \brief Implementation of doubly compressed sparse row/column format (DCSR(C)) > * > * The doubly compressed sparse row/column format (DCSR(C)) is originally proposed in: > * > * A. Buluc and J. R. Gilbert "On the representation and multiplication of hypersparse matrices", > * in IPDPS'08: Proceedings of the 2008 IEEE International Symposium on Parallel and Distributed Processing, IEEE Computer Society, Washington, DC, 2008, pp. 1-11. > * > * This implementation is intended to be maximally compliant with the Eigen::SparseMatrix. > * > * Key limitations: > * - No support for Eigen::SparseMatrix-like uncompressed mode for fast insertions (InnerNNZs array is omitted). > * > * Major enhancements: > * + Added memory efficient transposeInPlace() function. > * + Added conservativeResize() function. > * + Reimplemented and generalized InnerIterator and ReverseInnerIterator. > * + No need to call finalize() after using low level API functions. > * > * Some features: > * + Optimized operator=(const Eigen::SparseMatrixBase<OtherDerived>& other) to avoid need in extra copy of the matrix. > * + Optimized setFromTriplets() algoritm. > * > * \tparam _Scalar - type of matrix elements > * \tparam _Options - storage order: either Eigen::ColMajor (default) or Eigen::RowMajor > * \tparam _Index - type of indices (should be signed type) > */ > > //template<typename _Scalar, int _Options, typename _Index> class HyperSparseMatrix; >} > > >namespace dcs >{ > > template<typename _Scalar, int _Options, typename _Index> > class HyperSparseMatrix > : public Eigen::SparseCompressedBase<dcs::HyperSparseMatrix<_Scalar, _Options, _Index> > > { > public: > > typedef Eigen::SparseCompressedBase<HyperSparseMatrix> Base; > using Base::isCompressed; > using Base::nonZeros; > EIGEN_SPARSE_PUBLIC_INTERFACE(HyperSparseMatrix) > typedef _Index Index; > using Base::operator+=; > using Base::operator-=; > using Base::IsRowMajor; > > //???!!! do we need this typeder for compatibility > //typedef Eigen::MappedSparseMatrix<Scalar, Flags> Map; > typedef Eigen::Diagonal<HyperSparseMatrix> DiagonalReturnType; > typedef Eigen::Diagonal<const HyperSparseMatrix> ConstDiagonalReturnType; > class InnerIterator; > class ReverseInnerIterator; > > typedef Eigen::internal::CompressedStorage<Scalar, Index> Storage; > enum { > Options = _Options, > RowMajorBit=Eigen::RowMajorBit > }; > > typedef HyperSparseMatrix NestedExpression; > typedef typename Base::IndexVector IndexVector; > typedef typename Base::ScalarVector ScalarVector; > protected: > >//============ data =========== > //Same as in Eigen::SparseMatrix > > Index m_outerSize; > Index m_innerSize; > Index* m_outerIndex; > Storage m_data; > > //!!!NEW HyperSparseMatrix - specific > Index* m_outerAuxIndex; > Index m_outerRealSize; > size_t m_allocatedOuterSize; > //line 112 > public: >//============ functions =========== > /*! \returns the number of rows of the matrix */ > inline Index rows() const { return IsRowMajor ? m_outerRealSize : m_innerSize; } > /*! \returns the number of columns of the matrix */ > inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerRealSize; } > > /*! \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ > inline Index innerSize() const { return m_innerSize; } > /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ > inline Index outerSize() const { return m_outerRealSize; } > > //NEW! > /*! \retuns the number of non-empty columns (resp. rows) of the matrix if the storage order column major (resp. row major) > * This function is aimed at interoperability with other libraries. > */ > inline Index outerAuxSize() const { return m_outerSize; } > > /*! \returns a const pointer to the array of values. > * This function is aimed at interoperability with other libraries. > * \sa innerIndexPtr(), outerIndexPtr() */ > inline const Scalar* valuePtr() const { return &m_data.value(0); } > /*! \returns a non-const pointer to the array of values. > * This function is aimed at interoperability with other libraries. > * \sa innerIndexPtr(), outerIndexPtr() */ > inline Scalar* valuePtr() { return &m_data.value(0); }; > > /*! \returns a const pointer to the array of inner indices. > * This function is aimed at interoperability with other libraries. > * \sa valuePtr(), outerIndexPtr() */ > inline const Index* innerIndexPtr() const { return &m_data.index(0); } > /*! \returns a non-const pointer to the array of inner indices. > * This function is aimed at interoperability with other libraries. > * \sa valuePtr(), outerIndexPtr() */ > inline Index* innerIndexPtr() { return &m_data.index(0); } > > /*! \returns a const pointer to the array of the starting positions of the inner vectors. > * This function is aimed at interoperability with other libraries. > * \sa valuePtr(), innerIndexPtr() */ > inline const Index* outerIndexPtr() const { return m_outerIndex; }; > /*! \returns a non-const pointer to the array of the starting positions of the inner vectors. > * This function is aimed at interoperability with other libraries. > * \sa valuePtr(), innerIndexPtr() */ > inline Index* outerIndexPtr() { return m_outerIndex; }; > > /*! \returns a const pointer the array of of indices of non-zero outer indices. > * \sa valuePtr(), innerIndexPtr() */ > inline const Index* outerAuxIndexPtr() const { return m_outerAuxIndex; }; > /*! \returns a non-const pointer to the array indices of non-zero outer indices. > * \sa valuePtr(), innerIndexPtr() */ > inline Index* outerAuxIndexPtr() { return m_outerAuxIndex; }; > > /*! \internal > \returns reference to inner storage structure (which is identical to used by Eigen::SparseMatrix) > */ > inline Storage& data() { return m_data; }; > /*! \internal > \returns \c const reference to inner storage structure (which is identical to used by Eigen::SparseMatrix) > */ > inline const Storage& data() const { return m_data; }; > > //New! > /*! \internal > * \returns the maximal auxiliary index lower than the \c key over overall aux index domain > */ > inline Index searchLowerAuxIndex(Index key) const > { > return searchLowerAuxIndex(0, m_outerSize, key); > }; > > //New! > /*! \internal > * \returns the maximal auxiliary index in the range [\c start, \c end] which corresponds to outer index lower than the \c key (or \c start if nothing is found) > */ > inline Index searchLowerAuxIndex(ptrdiff_t start, ptrdiff_t end, Index key) const > { > while (end > start) > { > size_t mid = (end + start) >> 1; > if (m_outerAuxIndex[mid] < key) > start = mid+1; > else > end = mid; > } > return static_cast<Index>(start); > }; > > //New! > /*! \internal > * \returns either auxIndex corresponding to the given outer index (key) or m_outerSize if no match is found. > */ > inline Index auxIndex(Index key) const > { > return auxIndex(0, m_outerSize, key); > } > > //New! > /*! \internal > * \returns auxIndex corresponding to the given outer index (key) in the range [start, end] or m_outerSize if no match is found > */ > inline Index auxIndex(size_t start, size_t end, Index key) const > { > if (start >= end) > return Index(0); > else if (end > start && key == m_outerAuxIndex[end - 1]) > return Index(end - 1); > // ^^ optimization: let's first check if it is the last coefficient > // (very common in high level algorithms) > const Index id = searchLowerAuxIndex(start, end - 1, key); > return ((id < (Index)end) && (m_outerAuxIndex[id] == key)) ? id : m_outerSize; > }; > > /*! \returns the value of the matrix at position \a i, \a j > * This function returns Scalar(0) if the element is an explicit \em zero */ > inline Scalar coeff(Index row, Index col) const > { > eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); > const Index auxOuter = IsRowMajor ? row : col; > const Index inner = IsRowMajor ? col : row; > Index outer=auxIndex(auxOuter); > if (outer < m_outerSize) > { > return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer + 1], inner); > } > else > { > return Scalar(0); > } > }; > > //New! > /*! > * \returns the row/column pointer index for arrays \c m_outerIndex and \c m_outerAuxIndex. > * \c auxOuter is row/column auxIndex > * If column/row does not exist then it will be inserted. > */ > Index auxIndexRef(Index auxOuter) > { > Index outerPtr=searchLowerAuxIndex(0, m_outerSize, auxOuter); > // Updating matrix size and m_outerIndex and m_outerAuxIndex pointers > if (outerPtr == m_outerSize || m_outerAuxIndex[outerPtr] != auxOuter) > { > resizeOuterIndices(m_outerSize + 1); > Index p = m_outerSize; > m_outerIndex[p]=m_outerIndex[p - 1]; > p--; > while (p > outerPtr) > { > m_outerAuxIndex[p]=m_outerAuxIndex[p - 1]; > m_outerIndex[p]=m_outerIndex[p - 1]; > p--; > } > m_outerAuxIndex[p]=auxOuter; > return p; > }; > return outerPtr; > }; > > /*! \returns a non-const reference to the value of the matrix at position \a i, \a j > * > * If the element does not exist then it is inserted via the insert(Index,Index) function > * > * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) > * function if the element does not already exist. > * \nNEW!!!!! This function does NOT convert the matrix into Uncompressed mode! > */ > inline Scalar& coeffRef(Index row, Index col) > { > eigen_assert(row >= 0 && row<rows() && col >= 0 && col<cols()); > > const Index auxOuter = IsRowMajor ? row : col; > const Index inner = IsRowMajor ? col : row; > > Index outer=auxIndexRef(auxOuter); > > Index start = m_outerIndex[outer]; > Index end = m_outerIndex[outer + 1]; > eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix"); > if (end <= start) > return insert(row, col); > const Index p = (Index)m_data.searchLowerIndex(start, end - 1, inner); > if ((p<end) && (m_data.index(p) == inner)) > return m_data.value(p); > else > return insert(row, col); > } > > > /*! \returns a reference to a novel non zero coefficient with coordinates \c row x \c col. > * The non zero coefficient must \b not already exist. > * > * It is strongly recommended to first call reserve(const SizesType &) to reserve > * a more appropriate number of elements per inner vector that better match your scenario. > * > * This function performs a sorted insertion in O(1) if the elements of each inner vector are > * inserted in increasing inner index order, and in O(nnz_j) for a random insertion. > * > * \nNEW!!!!! This function does not convert the matrix to Uncompressed mode! > */ > Scalar& insert(Index row, Index col) > { > eigen_assert(row >= 0 && row<rows() && col >= 0 && col<cols()); > return insertCompressed(row, col); > }; > > /*! Removes all non zeros but keeps allocated memory. > * \nNEW!!!!! Also keeps unchanged the auxIndex structure > * \sa squeeze() > */ > inline void setZero() > { > m_data.clear(); > resizeOuterIndices(0); > if (m_outerIndex) > m_outerIndex[0]=0; > } > > /** \returns the number of non zero coefficients */ > inline Index nonZeros() const > { > return static_cast<Index>(m_data.size()); > } > >// Memory management; > /*! Preallocates \c reserveSize non zeros. > * \sa reserveOuter(), squeeze(), squeezeOuter() > */ > inline void reserve(Index reserveSize, Index reserveOuterSize=0) > { > m_data.reserve(reserveSize); > if (reserveOuterSize > 0) > reserveOuter(reserveOuterSize); > }; > > /*! Reallocates internal arrays to accomodate additional \a reserveOuterSize non-empty rows/columns. > * \sa reserve(), squeeze(), squeezeOuter() > */ > inline void reserveOuter(Index reserveOuterSize) > { > size_t new_size=m_outerSize + reserveOuterSize; > if (new_size > m_allocatedOuterSize) > reallocateOuterIndices(new_size); > } > > /*! Releases all extra unused memory > * \sa squeezeOuter(), reserve(), reserveOuter() > */ > inline void squeeze() > { > squeezeOuter(); > m_data.squeeze(); > } > > /*! Releases memory reserved for adding new rows/columns (does not release memory reserved for adding non-zeros) > * \sa squeeze(), reserve(), reserveOuter() > */ > inline void squeezeOuter() > { > if (m_allocatedOuterSize>m_outerSize) > reallocateOuterIndices(m_outerSize); > } > > >//////////////////////////////////////////////////////// >// LOW-LEVEL API >//////////////////////////////////////////////////////// > > public: > > //--- low level purely coherent filling --- > > /** \internal > * \returns a reference to the non zero coefficient at position \c row, \c col assuming that: > * - the nonzero does not already exist > * - the new coefficient is the last one according to the storage order > * > * Before filling a given inner vector you must call the statVec(Index) function. > * > * Unlike SparseMatrix, after an insertion session, you do not need to call the finalize() function. > * > * \sa insert, insertBackByOuterInner, startVec */ > inline Scalar& insertBack(Index row, Index col) > { > return insertBackByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row); > } > > /** \internal > * \sa insertBack, startVec */ > inline Scalar& insertBackByOuterInner(Index realOuter, Index inner) > { > Index outer=auxIndex(realOuter); //NEW!!!!!! > eigen_assert(_Index(m_outerIndex[outer + 1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); > eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1)<inner) && "Invalid ordered insertion (invalid inner index)"); > Index p = m_outerIndex[outer + 1]; > ++m_outerIndex[outer + 1]; > m_data.append(0, inner); > return m_data.value(p); > } > > /** \internal > * \warning use it only if you know what you are doing */ > inline Scalar& insertBackByOuterInnerUnordered(Index realOuter, Index inner) > { > Index outer=auxIndex(realOuter); //\nNEW!!!!!! > Index p = m_outerIndex[outer + 1]; > ++m_outerIndex[outer + 1]; > m_data.append(0, inner); > return m_data.value(p); > } > > /** \internal > * \sa insertBack, insertBackByOuterInner > * \nNEW!!! Totally rewritten. Rows still must be inserted in incremental order but zero rows can be skipped.*/ > inline void startVec(Index realOuter) > { > if (m_outerSize <= 0) m_outerSize=0; > eigen_assert((m_outerSize <= 0 || m_outerAuxIndex[m_outerSize - 1] < realOuter) && "You must call startVec for each inner vector sequentially."); > if (m_outerSize==0 || m_outerIndex[m_outerSize - 1] != m_outerIndex[m_outerSize]) > { > resizeOuterIndices(m_outerSize + 1); > m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize - 1]; > eigen_assert((m_outerIndex[m_outerSize] == Index(m_data.size())) && "You must call startVec for each inner vector sequentially. There probably was the inconsistent call before."); > } > m_outerAuxIndex[m_outerSize - 1]=realOuter; > } > > /*! \internal > * Must be called after inserting a set of non zero entries using the low level compressed API. > * \nNEW!!! NOT required in new version (does nothing). Preserved fro interface compatibility with Eigen::SparceMatrix > */ > inline void finalize(){} > > //--- > > >//////////////////////////////////////////////////////// >// END OF LOW-LEVEL API >//////////////////////////////////////////////////////// > > //NEW! > /*! Batch insertion of new elements from > *\code std::vector<Eigen::Triplet<double>> tripletList\endcode > * or any similar container > * Arguments \c begin and \c end should be appropriate container start and end interators, > * the optional argumants \c acc_idx and \c acc_val are pointers to memory buffers for intermediate operations (their contents will be destroyed). > * NEW! enhanced memory efficient algorithm is used compared to Eigen::SparseMatrix analog > */ > template<typename InputIterator> > void setFromTriplets(const InputIterator& begin, const InputIterator& end, util::sparse_accumulator<Index, Index>* acc_idx=NULL, util::sparse_accumulator<Scalar, Index>* acc_val=NULL) > { > bool own_acc_idx=(acc_idx == NULL); > if (own_acc_idx) > acc_idx= new util::sparse_accumulator<Index, Index>(std::max(cols(), rows())); > else if (acc_idx->size()!= std::max(cols(), rows())) > acc_idx->resize(std::max(cols(),rows())); > bool own_acc_val=(acc_val == NULL); > if (own_acc_val) > acc_val= new util::sparse_accumulator<Scalar, Index>(std::max(cols(), rows())); > else if (acc_val->size() != std::max(cols(), rows())) > acc_val->resize(std::max(cols(), rows())); > > // pass1a: count nonempty columns or rows > for (InputIterator it(begin); it != end; ++it) > { > eigen_assert(it->row() >= 0 && it->row()<rows() && it->col() >= 0 && it->col()<cols()); > acc_idx->add(IsRowMajor ? it->row() : it->col(), 1); > } > //acc_idx->sort_indices(); > //std::free(m_outerIndex); > if (m_outerSize != (_Index)acc_idx->active_size()) > resizeOuterIndices(acc_idx->active_size()); > > // pass1b: preliminary initialization of m_outerAuxIndex, m_outerIndex (no dublicates accounted) > acc_idx->sort_indices(); > m_outerIndex[0]=0; > acc_idx->store(m_outerAuxIndex, m_outerIndex+1); > > Index nnz=0; > for (ptrdiff_t i=0; i < (ptrdiff_t)m_outerSize; i++) > { > nnz+=acc_idx->value(m_outerAuxIndex[i]); > acc_idx->value(m_outerAuxIndex[i])=nnz - acc_idx->value(m_outerAuxIndex[i]); > } > > // pass2: sorting the triplet elements by outer index > Index* idx_tmp=static_cast<Index*>(std::malloc((nnz)* sizeof(Index))); > Index k=0; > for (InputIterator it(begin); it != end; ++it) > { > idx_tmp[(acc_idx->value(IsRowMajor ? it->row() : it->col()))++]=k++; > } > > // pass3: determining the actual number of non-zeros (accounting for dublicates); > acc_idx->reset(); > k=0; > nnz=0; > for (size_t i=0; i < (size_t)m_outerSize; i++) > { > for (size_t j=0; j < (size_t)m_outerIndex[i + 1]; j++) > { > acc_idx->set_flag(IsRowMajor ? (begin + idx_tmp[k++])->col() : (begin + idx_tmp[k++])->row()); > } > nnz+=(_Index)acc_idx->active_size(); > acc_idx->reset_flags(); > } > > // pass4: writing data; > data().resize(nnz); > //data().clear(); > k=0; > for (size_t i=0; i < (size_t)m_outerSize; i++) > { > for (size_t j=0; j < (size_t)m_outerIndex[i + 1]; j++) > { > InputIterator ptr=begin + idx_tmp[k++]; > acc_val->add(IsRowMajor ? ptr->col() : ptr->row(), ptr->value()); > } > m_outerIndex[i + 1]=m_outerIndex[i]+(_Index)acc_val->active_size(); > acc_val->sort_indices(); > acc_val->store_and_reset(&data().index(m_outerIndex[i]), &data().value(m_outerIndex[i])); > } > > std::free(idx_tmp); > if (own_acc_idx) > delete acc_idx; > else > acc_idx->reset(); > if (own_acc_val) > delete acc_val; > else > acc_val->reset(); > }; > > /*! \internal > * Same as insert(Index,Index) except that the indices are given relative to the storage order. > * \returns reference to [\a j,\a j]-th matrix element. > */ > Scalar& insertByOuterInner(Index j, Index i) > { > return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); > }; > > /*! Suppresses all nonzeros which are much smaller than \c reference under the tolerence \c epsilon */ > void prune(const Scalar& reference, const RealScalar& epsilon = Eigen::NumTraits<RealScalar>::dummy_precision()) > { > prune(default_prunning_func(reference, epsilon)); > }; > > /*! Suppresses all nonzeros which do not satisfy the predicate \c keep. > * Rearranges the auxiliary structures by eliminating empty rows/columns if necessary. > * The functor type \a KeepFunc must implement the following function: > * \code > * bool operator() (const Index& row, const Index& col, const Scalar& value) const; > * \endcode > * \sa prune(const Scalar&, const RealScalar&) > */ > template<typename KeepFunc> > void prune(const KeepFunc& keep = KeepFunc()) > { > // TODO also implement a unit test > Index k = 0; > for (Index j=0; j<m_outerSize; ++j) > { > Index previousStart = m_outerIndex[j]; > m_outerIndex[j] = k; > Index end = m_outerIndex[j + 1]; > for (Index i=previousStart; i<end; ++i) > { > if (keep(IsRowMajor ? j : m_data.index(i), IsRowMajor ? m_data.index(i) : j, m_data.value(i))) > { > m_data.value(k) = m_data.value(i); > m_data.index(k) = m_data.index(i); > ++k; > } > } > } > m_outerIndex[m_outerSize] = k; > m_data.resize(k, 0); > > //NEW Pruning m_outerIndex and m_outerAuxIndex > pruneIndices(); > }; > > //NEW!!! > /*! Rearranges the auxiliary structures by eliminating empty rows/columns. > */ > void pruneIndices() > { > Index k=0; > while (k < m_outerSize && m_outerIndex[k] != m_outerIndex[k + 1]) > k++; > Index l=k + 1; > if (l > m_outerSize) return; // nothing needs to be done > while (l < m_outerSize && m_outerIndex[l] == m_outerIndex[l + 1]) l++; > while (l < m_outerSize) > { > m_outerIndex[k]= m_outerIndex[l]; > m_outerAuxIndex[k]= m_outerAuxIndex[l]; > k++; > do{ l++; } while (l < m_outerSize && m_outerIndex[l] == m_outerIndex[l + 1]); > } > m_outerIndex[k]= m_outerIndex[l]; > resizeOuterIndices(k); > } > >protected: > > //NEW! > /*! /internal Unprotect only if you know what you are doing! > * Reallocates the auxiliary structures to accomodate space for \c new_size non-empty rows/columns. > */ > inline void reallocateOuterIndices(size_t new_size) > { > Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (new_size + 1)* sizeof(Index))); > if (!newOuterIndex) Eigen::internal::throw_std_bad_alloc(); > m_outerIndex = newOuterIndex; > Index *newOuterAuxIndex = static_cast<Index*>(std::realloc(m_outerAuxIndex, (new_size)* sizeof(Index))); > if (!newOuterAuxIndex) Eigen::internal::throw_std_bad_alloc(); > m_outerAuxIndex = newOuterAuxIndex; > if (new_size > m_allocatedOuterSize) //setting newly allocated memory to zero !!!??? should we do it??? ATTENTION!!! Used in util util::sparse_evaluator<_Scalar, _Index>::multiply with Hypersparse matrices > { > if (m_allocatedOuterSize == 0) m_outerIndex[0]=0; > std::memset(m_outerIndex + (m_allocatedOuterSize + 1), 0, (new_size - m_allocatedOuterSize)*sizeof(Index)); > std::memset(m_outerAuxIndex + m_allocatedOuterSize, 0, (new_size - m_allocatedOuterSize)*sizeof(Index)); > } > m_allocatedOuterSize=new_size; > } > >public: > > //NEW! > /*! /internal Call only if you know what you are doing! > Sets new sizes of arrays m_innerIndex and m_innerAuxIndex and modifies value of m_outerSize > */ > void resizeOuterIndices(size_t new_size) > { > if (m_allocatedOuterSize < new_size) > reallocateOuterIndices(new_size); > m_outerSize=(_Index)new_size; > }; > > /*! Resizes the matrix to a \c rows x \c cols matrix and initializes it to zero. > * This completely destroys the auxiliary structures and frees the associated memory. > * However, the memory allocated for storing non-zeros is preserved. > * \sa \sa conservativeResize(Index,Index), reserve(), reserveOuter(), setZero() > */ > void resize(Index rows, Index cols) > { > m_outerRealSize = IsRowMajor ? rows : cols; > m_innerSize = IsRowMajor ? cols : rows; > m_data.clear(); > if (m_outerIndex != NULL) > { > std::free(m_outerIndex); > m_outerIndex = NULL; > } > if (m_outerAuxIndex != NULL) > { > std::free(m_outerAuxIndex); > m_outerAuxIndex=NULL; > } > m_allocatedOuterSize=0; > m_outerSize = 0; > }; > > //NEW! > /*! > *Changes number of columms while preserving all existing elements at columns \a i << \c nbCols. The elements in newly added columns are set to 0. > * \param nbCols new number of columns > */ > inline void conservativeResize(Eigen::NoChange_t, Index nbCols) > { > conservativeResize(rows(), nbCols); > }; > > > //NEW! > /*! > *Changes number of rows while preserving all existing elements at rows \a i << \c nbRows. The elements in newly added rows are set to 0. > * \param nbRows new number of rows > * \sa resize() > */ > inline void conservativeResize(Index nbRows, Eigen::NoChange_t) > { > conservativeResize(nbRows, cols()); > }; > > //NEW! > /*! > * Resizes matrix while preserving all its nonzero elements at positions [\a i,\a j]<[\c nbRows,\c nbCols]. New elements are set to zero. > * \param nbRows new number of rows > * \param nbCols new number of columns > * \sa resize() > */ > void conservativeResize(Index nbRows, Index nbCols) > { > Index nbInner, nbOuter; > if (IsRowMajor) { nbInner = nbCols; nbOuter = nbRows; } > else { nbInner = nbRows; nbOuter = nbCols; } > //cuttung out extra outer lines (rows) if necessary > if (nbOuter < outerSize()) > { > Index nbAuxOuter = searchLowerAuxIndex(nbOuter) + 1; > if (nbAuxOuter < outerAuxSize()) > { > m_data.resize(m_outerIndex[nbAuxOuter], 0); > m_outerSize = nbAuxOuter; > } > }; > //setting new outer dimension [???!!!]TODO - this part can be optimized! > m_outerRealSize = nbOuter; > //cutting out extra inner dimensions (columns) if necessary > if (nbInner < innerSize()) > { > conservative_resizing_func inner_cut(nbInner); > prune(inner_cut); > pruneIndices(); > }; > //setting new inner dimension > m_innerSize = nbInner; > }; > > > /*! \returns the diagonal coefficients as a const expression. */ > const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); } > > /*! \returns the diagonal coefficients as a read-write expression. > * \warning If the diagonal entries are written, then all diagonal > * entries \b must already exist, otherwise an assertion will be raised. > */ > DiagonalReturnType diagonal() { return DiagonalReturnType(*this); } > >//constructors > /*! Default constructor yielding an empty \c 0 \c x \c 0 matrix */ > inline HyperSparseMatrix() > : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_outerAuxIndex(0), m_outerRealSize(0), m_allocatedOuterSize(0) > { > check_template_parameters(); > resize(0, 0); > }; > > /*! Constructs a \c rows \c x \c cols empty matrix */ > inline HyperSparseMatrix(Index rows, Index cols) > : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_outerAuxIndex(0), m_outerRealSize(0), m_allocatedOuterSize(0) > { > check_template_parameters(); > resize(rows, cols); > } > > /*! Constructs a hypersparse matrix from the sparse expression \c other */ > template<typename OtherDerived> > inline HyperSparseMatrix(const Eigen::SparseMatrixBase<OtherDerived>& other) > : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_outerAuxIndex(0), m_outerRealSize(0), m_allocatedOuterSize(0) > { > EIGEN_STATIC_ASSERT((Eigen::internal::is_same<Scalar, typename OtherDerived::Scalar>::value), > YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) > check_template_parameters(); > *this = other.derived(); > } > > /*! Constructs a hypersparse matrix from the sparse selfadjoint view \c other */ > template<typename OtherDerived, unsigned int UpLo> > inline HyperSparseMatrix(const Eigen::SparseSelfAdjointView<OtherDerived, UpLo>& other) > : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_outerAuxIndex(0), m_outerRealSize(0), m_allocatedOuterSize(0) > { > check_template_parameters(); > *this = other; > } > > /*! Copy constructor (it performs a deep copy) */ > inline HyperSparseMatrix(const HyperSparseMatrix& other) > : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_outerAuxIndex(0), m_outerRealSize(0), m_allocatedOuterSize(0) > { > check_template_parameters(); > *this = other.derived(); > } > > /*! \brief Copy constructor with in-place evaluation */ > template<typename OtherDerived> > HyperSparseMatrix(const Eigen::ReturnByValue<OtherDerived>& other) > : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_outerAuxIndex(0), m_outerRealSize(0), m_allocatedOuterSize(0) > { > check_template_parameters(); > initAssignment(other); > other.evalTo(*this); > } > > /*! Swaps the content of two sparse matrices of the same type. > * This is a fast operation that simply swaps the underlying pointers and parameters. */ > inline void swap(HyperSparseMatrix& other) > { > //EIGEN_DBG_SPARSE(std::cout << "HyperSparseMatrix:: swap\n"); > std::swap(m_outerIndex, other.m_outerIndex); > std::swap(m_innerSize, other.m_innerSize); > std::swap(m_outerSize, other.m_outerSize); > std::swap(m_outerAuxIndex, other.m_outerAuxIndex); > std::swap(m_outerRealSize, other.m_outerRealSize); > std::swap(m_allocatedOuterSize, other.m_allocatedOuterSize); > m_data.swap(other.m_data); > } > > /*! Sets *this to the identity matrix */ > inline void setIdentity() > { > eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); > this->m_data.resize(rows()); > resizeOuterIndices(rows()); > Eigen::Map<Eigen::Matrix<Index, Eigen::Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows() - 1); > Eigen::Map<Eigen::Matrix<Scalar, Eigen::Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes(); > Eigen::Map<Eigen::Matrix<Index, Eigen::Dynamic, 1> >(this->m_outerIndex, rows() + 1).setLinSpaced(0, rows()); > Index k=0; > for (Index* i = m_outerAuxIndex; i < m_outerAuxIndex + cols();i++) (*i)=k++; > //std::for_each(m_outerAuxIndex, m_outerAuxIndex + cols(), [&](Index& i){i=k++; }); > } > > //NEW! > /*! Replaces matrix with its own transpose using minimal auxiliary storage. > */ > void transposeInPlace() > { > if (data().size() == 0) return; > > util::sparse_accumulator<Index, Index> acc_idx(innerSize()); > > for (Index i=0; i < data().size(); i++) > { > acc_idx.add(data().index(i), 1); > }; > acc_idx.sort_indices(); > util::sparse_accumulator<Index, Index> row_indices(acc_idx); > > Index nnz=0; > Index j=0; > for (Index i=0; i < (Index)acc_idx.active_size(); i++) > { > while (nnz<outerIndexPtr()[j] || nnz >= outerIndexPtr()[j + 1]) > j++; > row_indices.active_value(i)= j; > > nnz+=acc_idx.active_value(i); > acc_idx.active_value(i)=nnz - acc_idx.active_value(i); > } > > util::sparse_accumulator<Index, Index> acc_idx_offsets(acc_idx); > Index s=0; > Index s0=0; > for (Index i=0; i < outerAuxSize(); i++) > { > for (Index k=outerIndexPtr()[i]; k < outerIndexPtr()[i + 1]; k++) > { > // checking if the current k value already had been processed > while (s0<(Index)acc_idx.active_size()-1 && (Index)acc_idx.active_value(s0) <= k) > s0++; > if (k >= acc_idx_offsets.active_value(s0) && k < acc_idx.active_value(s0)) > { > k=acc_idx.active_value(s0)-1; > continue; > } > > Index _s=k; > Index j=i; > Scalar _val=data().value(_s); > Index _idx=data().index(_s); > do > { > Index& new_index=acc_idx.value(_idx); > > Index& jj=row_indices.value(_idx); > > //assigning new values to _val, _idx and _s and performing the trasposition update > std::swap(data().value(new_index), _val); > _idx=data().index(new_index); > _s=new_index;// (_s != new_index ? new_index : k); > data().index(new_index)=outerAuxIndexPtr()[j]; > > while (_s < outerIndexPtr()[jj] || _s >= outerIndexPtr()[jj + 1])jj++; > j=jj; > > new_index++; > } while (_s != k); > } > }; > std::swap(m_innerSize, m_outerRealSize); > resizeOuterIndices(acc_idx_offsets.active_size()); > acc_idx_offsets.store_and_reset(m_outerAuxIndex, m_outerIndex); > m_outerIndex[acc_idx.active_size()]=nnz; > > util::sparse_accumulator<Scalar, Index> inner_sorter(innerSize()); > for (Index i=0; i < outerAuxSize(); i++) > { > for (Index j=outerIndexPtr()[i]; j < outerIndexPtr()[i + 1]; j++) > { > inner_sorter.add(data().index(j), data().value(j)); > } > inner_sorter.sort_indices(); > inner_sorter.store_and_reset(&data().index(outerIndexPtr()[i]), &data().value(outerIndexPtr()[i])); > } > } > > /*! Copies \c other into \c *this. > * \returns a reference to \c *this. > */ > inline HyperSparseMatrix& operator=(const HyperSparseMatrix& other) > { > if (other.isRValue()) > { > swap(other.const_cast_derived()); > } > else if (this != &other) > { > initAssignment(other); > resizeOuterIndices(other.outerAuxSize()); > //[[[!!!???NEW!!!???]]] > if (m_outerSize > 0) > { > memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize + 1)*sizeof(Index)); > memcpy(m_outerAuxIndex, other.m_outerAuxIndex, (m_outerSize)*sizeof(Index)); > } > m_data = other.m_data; > } > return *this; > }; > > template<typename OtherDerived> > EIGEN_DONT_INLINE HyperSparseMatrix& operator=(const Eigen::SparseMatrixBase<OtherDerived>& other); > > friend std::ostream & operator << (std::ostream & s, const HyperSparseMatrix& m) > { > for (Index o=0; o < m.outerAuxSize(); ++o) > { > s << "Outer index: " << m.outerAuxIndexPtr()[o]<<"\n"; > for (Index i=m.outerIndexPtr()[o]; i < m.outerIndexPtr()[o + 1]; ++i) > { > s << " [" << i << "]->" << m.data().index((Eigen::Index)i) << ": " << m.data().value((Eigen::Index)i)<<"\n"; > } > } > return s; > } > > /*! Destructor.*/ > inline ~HyperSparseMatrix() > { > if (m_outerIndex != NULL) > { > std::free(m_outerIndex); > m_outerIndex=NULL; > } > if (m_outerAuxIndex != NULL) > { > std::free(m_outerAuxIndex); m_outerAuxIndex=NULL; > } > m_allocatedOuterSize=0; > } > > > >private: > > template<typename Other> > void initAssignment(const Other& other) > { > resize(_Index(other.rows()), _Index(other.cols())); > }; > > static void check_template_parameters() > { > EIGEN_STATIC_ASSERT(Eigen::NumTraits<Index>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); > EIGEN_STATIC_ASSERT((Options&(Eigen::ColMajor | Eigen::RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS); > } > > struct default_prunning_func { > default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} > inline bool operator() (const Index&, const Index&, const Scalar& value) const > { > return !Eigen::internal::isMuchSmallerThan(value, reference, epsilon); > } > Scalar reference; > RealScalar epsilon; > }; > > struct conservative_resizing_func { > conservative_resizing_func(Index innerSize) : innerSize(innerSize) {}; > inline bool operator() (const Index& row, const Index& col, const Scalar& value) const > { > return ((_Options & Eigen::RowMajor) ? col<innerSize : row<innerSize); > }; > Index innerSize; > }; > > /** \internal > * \sa insert(Index,Index) */ > EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); > }; > >///////////////////////////////////////////////////// >//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& >///////////////////////////////////////////////////// >//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& >///////////////////////////////////////////////////// >//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& >///////////////////////////////////////////////////// > > /*! > * \class InnerIterator > * > * \brief Inner iterator for hypersparse matrices. Allows both row-by-row and non-empty-rows-only iteration modes > * > * \tparam _Scalar - type of matrix elements > * \tparam _Options - storage order : either Eigen::ColMajor(default) or Eigen::RowMajor > * \tparam _Index - type of indices(should be signed type) > */ > template<typename Scalar, int _Options, typename _Index> > class HyperSparseMatrix<Scalar, _Options, _Index>::InnerIterator > { > public: > //NEW! > /*! Constructs Inner iterator for \c _outer row/column of the matrix \c mat. > * The index \c _outer refers to \c n-th matrix row/column (\c 0 \c <= \c n<\c mat.outerSize()) if the flag \c isAuxOuter is set to \c false (default), > * or to \c n-th nonzero matrix row/column (\c 0\c <= \c n \c < \c mat.outerAuxSize()) if \c isAuxOuter \c = \c true. > */ > InnerIterator(const HyperSparseMatrix& mat, Index _outer, bool isAuxOuter=false) > : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()) > { > if (isAuxOuter) > { > m_outer=mat.m_outerAuxIndex[_outer]; > m_id=mat.m_outerIndex[_outer]; > m_end = mat.m_outerIndex[_outer + 1]; > } > else > { > _Index outer=mat.auxIndex(_outer); > m_outer=_outer; > if (outer >= mat.outerAuxSize()) > { > m_id=(_Index)mat.data().size(); > m_end=(_Index)mat.data().size(); > } > else > { > m_id=mat.m_outerIndex[outer]; > m_end = mat.m_outerIndex[outer + 1]; > } > } > } > > /*! Interator increment. Switches to next non-zero row/column element. > * \returns a reference to \c *this. > */ > inline InnerIterator& operator++() { m_id++; return *this; }; > /* \returns const reference to current value.*/ > inline const Scalar& value() const { return m_values[m_id]; }; > /*! \returns reference to current value.*/ > inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }; > /*! \returns inner index.*/ > inline Index index() const { return m_indices[m_id]; }; > /*! \returns outer index.*/ > inline Index outer() const { return m_outer; }; > /*! \returns current row index.*/ > inline Index row() const { return IsRowMajor ? m_outer : index(); }; > /*! \returns current column index.*/ > inline Index col() const { return IsRowMajor ? index() : m_outer; }; > /*! \returns \c false if end of row/column is reached and \c true otherwise.*/ > inline operator bool() const { return (m_id < m_end); }; > > protected: > const Scalar* m_values; > const Index* m_indices; > Index m_outer; > Index m_id; > Index m_end; > }; > > /*! > * \class ReverseInnerIterator > * > * \brief Reverse inner iterator for hypersparse matrices (iterates from the end of row/column to its beginning). Admits for both row-by-row and non-empty-rows-only iteration modes > * > * \tparam _Scalar - type of matrix elements > * \tparam _Options - storage order : either Eigen::ColMajor(default) or Eigen::RowMajor > * \tparam _Index - type of indices(should be signed type) > */ > template<typename Scalar, int _Options, typename _Index> > class HyperSparseMatrix<Scalar, _Options, _Index>::ReverseInnerIterator > { > public: > //NEW! > /*! Constructs reverse inner iterator for \d _outer row/column of the matrix \c mat. > * The index \d _outer refers to \c n -th matrix row/column (0 <= \c n<\c mat.outerSize()) if the flag \c isAuxOuter is set to \c false (default), > * or to \c n-th nonzero matrix row/column (\c 0 \c <= \c n\c < \c mat.outerAuxSize()) if \c isAuxOuter \c = \c true. > */ > ReverseInnerIterator(const HyperSparseMatrix& mat, Index _outer, bool isAuxOuter=false) > : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()) > { > if (isAuxOuter) > { > m_outer=mat.m_outerAuxIndex[_outer]; > m_id = mat.m_outerIndex[_outer + 1]; > m_start=mat.m_outerIndex[_outer]; > } > else > { > _Index outer=mat.auxIndex(_outer); > m_outer=_outer; > if (outer >= mat.outerAuxSize()) > { > m_id=0; > m_start=0; > } > else > { > m_id=mat.m_outerIndex[outer+1]; > m_start=mat.m_outerIndex[outer]; > } > } > } > /*! Interator decrement. Switches to previous non-zero row/column element. > * \returns a reference to \c *this. > */ > inline ReverseInnerIterator& operator--() { --m_id; return *this; }; > /*! \returns const reference to current value.*/ > inline const Scalar& value() const { return m_values[m_id - 1]; } > /*! \returns reference to current value.*/ > inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id - 1]); } > /*! \returns inner index.*/ > inline Index index() const { return m_indices[m_id - 1]; } > /*! \returns outer index.*/ > inline Index outer() const { return m_outer; } > /*! \returns current row index.*/ > inline Index row() const { return IsRowMajor ? m_outer : index(); } > /*! \returns current column index.*/ > inline Index col() const { return IsRowMajor ? index() : m_outer; } > /*! \returns \c false if beginning of row/column is reached and \c true otherwise.*/ > inline operator bool() const { return (m_id > m_start); } > > protected: > const Scalar* m_values; > const Index* m_indices; > Index m_outer; > Index m_id; > Index m_start; > }; > >///////////////////////////////////////////////////// >//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& >///////////////////////////////////////////////////// >//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& >///////////////////////////////////////////////////// >//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& >///////////////////////////////////////////////////// > > /*! > * /internal Implementation of the \c insert(row,col) functionality. > * Made as a separate function for compatibility with Eigen::SparseMatrix (essential piece of code is just copied from SparseMatrix.h) > */ > template<typename _Scalar, int _Options, typename _Index> > EIGEN_DONT_INLINE typename HyperSparseMatrix<_Scalar, _Options, _Index>::Scalar& HyperSparseMatrix<_Scalar, _Options, _Index>::insertCompressed(Index row, Index col) > { > const Index auxOuter = IsRowMajor ? row : col; > const Index inner = IsRowMajor ? col : row; >//new start > Index outer=auxIndexRef(auxOuter); >//new end > Index previousOuter = outer; > if (m_outerIndex[outer + 1] == 0) > { > // we start a new inner vector > while (previousOuter >= 0 && m_outerIndex[previousOuter] == 0) > { > m_outerIndex[previousOuter] = static_cast<Index>(m_data.size()); > --previousOuter; > } > m_outerIndex[outer + 1] = m_outerIndex[outer]; > } > > // here we have to handle the tricky case where the outerIndex array > // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., > // the 2nd inner vector... > bool isLastVec = (!(previousOuter == -1 && m_data.size() != 0)) > && (m_outerIndex[outer + 1] == m_data.size()); > > size_t startId = m_outerIndex[outer]; > // FIXME let's make sure sizeof(long int) == sizeof(size_t) > size_t p = m_outerIndex[outer + 1]; > ++m_outerIndex[outer + 1]; > > double reallocRatio = 1; > if (m_data.allocatedSize() <= m_data.size()) > { > // if there is no preallocated memory, let's reserve a minimum of 32 elements > if (m_data.size() == 0) > { > m_data.reserve(32); > } > else > { > // we need to reallocate the data, to reduce multiple reallocations > // we use a smart resize algorithm based on the current filling ratio > // in addition, we use double to avoid integers overflows > double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize) / double(outer + 1); > reallocRatio = (nnzEstimate - double(m_data.size())) / double(m_data.size()); > // furthermore we bound the realloc ratio to: > // 1) reduce multiple minor realloc when the matrix is almost filled > // 2) avoid to allocate too much memory when the matrix is almost empty > reallocRatio = (std::min)((std::max)(reallocRatio, 1.5), 8.); > } > } > m_data.resize(m_data.size() + 1, reallocRatio); > > if (!isLastVec) > { > if (previousOuter == -1) > { > // oops wrong guess. > // let's correct the outer offsets > for (Index k=0; k <= (outer + 1); ++k) > m_outerIndex[k] = 0; > Index k=outer + 1; > while (m_outerIndex[k] == 0) > m_outerIndex[k++] = 1; > while (k <= m_outerSize && m_outerIndex[k] != 0) > m_outerIndex[k++]++; > p = 0; > --k; > k = m_outerIndex[k] - 1; > while (k > 0) > { > m_data.index(k) = m_data.index(k - 1); > m_data.value(k) = m_data.value(k - 1); > k--; > } > } > else > { > // we are not inserting into the last inner vec > // update outer indices: > Index j = outer + 2; > while (j <= m_outerSize && m_outerIndex[j] != 0) > m_outerIndex[j++]++; > --j; > // shift data of last vecs: > Index k = m_outerIndex[j] - 1; > while (k >= Index(p)) > { > m_data.index(k) = m_data.index(k - 1); > m_data.value(k) = m_data.value(k - 1); > k--; > } > } > } > > while ((p > startId) && (m_data.index(p - 1) > inner)) > { > m_data.index(p) = m_data.index(p - 1); > m_data.value(p) = m_data.value(p - 1); > --p; > } > > m_data.index(p) = inner; > return (m_data.value(p) = 0); > }; > > ////////////////////////////// > /*! Copies the generic sparse expression \c other into \c *this. > * The expression must provide a (templated) evalTo(Derived& dst) const function which does the actual job. > * NEW! The hack for copying the object with different storage order is used which involves transposeInPlace() and allows to avoid creating extra copies of the matrix. > * \returns a reference to \c *this. > */ > template<typename Scalar, int _Options, typename _Index> > template<typename OtherDerived> > EIGEN_DONT_INLINE HyperSparseMatrix<Scalar, _Options, _Index>& HyperSparseMatrix<Scalar, _Options, _Index>::operator=(const Eigen::SparseMatrixBase<OtherDerived>& other) > { > EIGEN_STATIC_ASSERT((Eigen::internal::is_same<Scalar, typename OtherDerived::Scalar>::value), > YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) > > const bool needToTranspose = (Flags & Eigen::RowMajorBit) != (Eigen::internal::evaluator<OtherDerived>::Flags & Eigen::RowMajorBit); > //const bool needToTranspose = (this->IsRowMajor!=other.IsRowMajor);// ((Flags & Eigen::RowMajorBit) != (OtherDerived::Flags & Eigen::RowMajorBit)); > if (needToTranspose) > { > //???!!! test if this hack is always correct!!! > const int _Options_t = (_Options & (~Eigen::RowMajorBit)) | (Eigen::internal::evaluator<OtherDerived>::Flags & Eigen::RowMajorBit); > HyperSparseMatrix<Scalar, _Options_t, _Index>* _comp_this = (HyperSparseMatrix<Scalar, _Options_t, _Index>*)this; > *_comp_this = other.derived(); > _comp_this->transposeInPlace(); > return *this; > } > else > { > if (other.isRValue()) > initAssignment(other.derived()); > // there is no special optimization > return Base::operator=(other.derived()); > } > } >} > >namespace Eigen //Traits >{ > namespace internal > { > template<typename _Scalar, int _Options, typename _Index> > struct traits<dcs::HyperSparseMatrix<_Scalar, _Options, _Index> > > { > typedef _Scalar Scalar; > typedef _Index StorageIndex; > //typedef _Index Index; > typedef Sparse StorageKind; > typedef MatrixXpr XprKind; > enum { > RowsAtCompileTime = Dynamic, > ColsAtCompileTime = Dynamic, > MaxRowsAtCompileTime = Dynamic, > MaxColsAtCompileTime = Dynamic, > Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit, > CoeffReadCost = NumTraits<Scalar>::ReadCost, > SupportedAccessPatterns = InnerRandomAccessPattern > }; > }; > //////////////////////////////////////////////////////// > template<typename _Scalar, int _Options, typename _Index, int DiagIndex> > struct traits<Diagonal<dcs::HyperSparseMatrix<_Scalar, _Options, _Index>, DiagIndex> > > { > typedef dcs::HyperSparseMatrix<_Scalar, _Options, _Index> MatrixType; > typedef typename ref_selector<MatrixType>::type MatrixTypeNested; > typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; > > typedef _Scalar Scalar; > typedef Dense StorageKind; > typedef _Index StorageIndex; > typedef MatrixXpr XprKind; > > enum { > RowsAtCompileTime = Dynamic, > ColsAtCompileTime = 1, > MaxRowsAtCompileTime = Dynamic, > MaxColsAtCompileTime = 1, > Flags = LvalueBit > }; > }; > > > template<typename _Scalar, int _Options, typename _Index, int DiagIndex> > struct traits<Diagonal<const dcs::HyperSparseMatrix<_Scalar, _Options, _Index>, DiagIndex> > > : public traits<Diagonal<dcs::HyperSparseMatrix<_Scalar, _Options, _Index>, DiagIndex> > > { > enum { > Flags = 0 > }; > }; > } > > //////////////////////////////////////////////////////// >} > >namespace Eigen >{ > namespace internal { > > template<typename _Scalar, int _Options, typename _Index> > struct evaluator<dcs::HyperSparseMatrix<_Scalar, _Options, _Index> > > : evaluator<SparseCompressedBase<dcs::HyperSparseMatrix<_Scalar, _Options, _Index> > > > { > typedef evaluator<SparseCompressedBase<dcs::HyperSparseMatrix<_Scalar, _Options, _Index> > > Base; > typedef dcs::HyperSparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; > evaluator() : Base() {} > explicit evaluator(const SparseMatrixType &mat) : Base(mat) {} > }; > > }; >}; > >#endif
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Raw
Actions:
View
Attachments on
bug 1179
:
676
|
677
|
678
|
679
|
680
|
681
|
682
|
683
|
684
|
690
|
691