10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
13#include "./InternalHeaderCheck.h"
48template<
typename Scalar_,
int Options_,
typename StorageIndex_>
49struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_> >
51 typedef Scalar_ Scalar;
52 typedef StorageIndex_ StorageIndex;
53 typedef Sparse StorageKind;
54 typedef MatrixXpr XprKind;
61 SupportedAccessPatterns = InnerRandomAccessPattern
65template<
typename Scalar_,
int Options_,
typename StorageIndex_,
int DiagIndex>
66struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
68 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
69 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
70 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
72 typedef Scalar_ Scalar;
73 typedef Dense StorageKind;
74 typedef StorageIndex_ StorageIndex;
75 typedef MatrixXpr XprKind;
79 ColsAtCompileTime = 1,
81 MaxColsAtCompileTime = 1,
86template<
typename Scalar_,
int Options_,
typename StorageIndex_,
int DiagIndex>
87struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
88 :
public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
97template<
typename Scalar_,
int Options_,
typename StorageIndex_>
102 using Base::convert_index;
104 template<
typename,
typename,
typename,
typename,
typename>
105 friend struct internal::Assignment;
110 using Base::operator+=;
111 using Base::operator-=;
116 typedef typename Base::InnerIterator InnerIterator;
117 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
120 using Base::IsRowMajor;
121 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
140 inline Index rows()
const {
return IsRowMajor ? m_outerSize : m_innerSize; }
142 inline Index cols()
const {
return IsRowMajor ? m_innerSize : m_outerSize; }
152 inline const Scalar*
valuePtr()
const {
return m_data.valuePtr(); }
156 inline Scalar*
valuePtr() {
return m_data.valuePtr(); }
186 inline Storage& data() {
return m_data; }
188 inline const Storage& data()
const {
return m_data; }
194 eigen_assert(row>=0 && row<
rows() && col>=0 && col<
cols());
196 const Index outer = IsRowMajor ? row : col;
197 const Index inner = IsRowMajor ? col : row;
198 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
199 return m_data.atInRange(m_outerIndex[outer],
end,
StorageIndex(inner));
212 eigen_assert(row>=0 && row<
rows() && col>=0 && col<
cols());
214 const Index outer = IsRowMajor ? row : col;
215 const Index inner = IsRowMajor ? col : row;
217 Index start = m_outerIndex[outer];
218 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
219 eigen_assert(
end>=start &&
"you probably called coeffRef on a non finalized matrix");
223 if((p<
end) && (m_data.index(p)==inner))
224 return m_data.value(p);
258 std::fill_n(m_outerIndex, m_outerSize + 1,
StorageIndex(0));
259 if(m_innerNonZeros) {
260 std::fill_n(m_innerNonZeros, m_outerSize,
StorageIndex(0));
269 eigen_assert(
isCompressed() &&
"This function does not make sense in non compressed mode.");
270 m_data.reserve(reserveSize);
273 #ifdef EIGEN_PARSED_BY_DOXYGEN
286 template<
class SizesType>
287 inline void reserve(
const SizesType& reserveSizes);
289 template<
class SizesType>
290 inline void reserve(
const SizesType& reserveSizes,
const typename SizesType::value_type& enableif =
291 typename SizesType::value_type())
293 EIGEN_UNUSED_VARIABLE(enableif);
294 reserveInnerVectors(reserveSizes);
298 template<
class SizesType>
299 inline void reserveInnerVectors(
const SizesType& reserveSizes)
303 Index totalReserveSize = 0;
306 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
312 for(
Index j=0; j<m_outerSize; ++j)
314 newOuterIndex[j] = count;
315 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316 totalReserveSize += reserveSizes[j];
318 m_data.reserve(totalReserveSize);
319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320 for(
Index j=m_outerSize-1; j>=0; --j)
322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323 for(
Index i=innerNNZ-1; i>=0; --i)
325 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
328 previousOuterIndex = m_outerIndex[j];
329 m_outerIndex[j] = newOuterIndex[j];
330 m_innerNonZeros[j] = innerNNZ;
333 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
335 m_data.resize(m_outerIndex[m_outerSize]);
340 if (!newOuterIndex) internal::throw_std_bad_alloc();
343 for(
Index j=0; j<m_outerSize; ++j)
345 newOuterIndex[j] = count;
346 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348 count += toReserve + m_innerNonZeros[j];
350 newOuterIndex[m_outerSize] = count;
352 m_data.resize(count);
353 for(
Index j=m_outerSize-1; j>=0; --j)
355 Index offset = newOuterIndex[j] - m_outerIndex[j];
359 for(
Index i=innerNNZ-1; i>=0; --i)
361 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
367 std::swap(m_outerIndex, newOuterIndex);
368 std::free(newOuterIndex);
386 inline Scalar& insertBack(
Index row,
Index col)
388 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
393 inline Scalar& insertBackByOuterInner(
Index outer,
Index inner)
395 eigen_assert(
Index(m_outerIndex[outer+1]) == m_data.size() &&
"Invalid ordered insertion (invalid outer index)");
396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) &&
"Invalid ordered insertion (invalid inner index)");
397 Index p = m_outerIndex[outer+1];
398 ++m_outerIndex[outer+1];
399 m_data.append(Scalar(0), inner);
400 return m_data.value(p);
405 inline Scalar& insertBackByOuterInnerUnordered(
Index outer,
Index inner)
407 Index p = m_outerIndex[outer+1];
408 ++m_outerIndex[outer+1];
409 m_data.append(Scalar(0), inner);
410 return m_data.value(p);
415 inline void startVec(
Index outer)
417 eigen_assert(m_outerIndex[outer]==
Index(m_data.size()) &&
"You must call startVec for each inner vector sequentially");
418 eigen_assert(m_outerIndex[outer+1]==0 &&
"You must call startVec for each inner vector sequentially");
419 m_outerIndex[outer+1] = m_outerIndex[outer];
425 inline void finalize()
430 Index i = m_outerSize;
432 while (i>=0 && m_outerIndex[i]==0)
435 while (i<=m_outerSize)
437 m_outerIndex[i] =
size;
445 template<
typename InputIterators>
448 template<
typename InputIterators,
typename DupFunctor>
451 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
453 template<
typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
472 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
474 Index oldStart = m_outerIndex[1];
475 m_outerIndex[1] = m_innerNonZeros[0];
476 for(
Index j=1; j<m_outerSize; ++j)
478 Index nextOldStart = m_outerIndex[j+1];
479 Index offset = oldStart - m_outerIndex[j];
482 for(
Index k=0; k<m_innerNonZeros[j]; ++k)
484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
488 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489 oldStart = nextOldStart;
491 std::free(m_innerNonZeros);
493 m_data.resize(m_outerIndex[m_outerSize]);
500 if(m_innerNonZeros != 0)
503 for (
Index i = 0; i < m_outerSize; i++)
505 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
512 prune(default_prunning_func(reference,epsilon));
522 template<
typename KeepFunc>
523 void prune(
const KeepFunc& keep = KeepFunc())
529 for(
Index j=0; j<m_outerSize; ++j)
531 Index previousStart = m_outerIndex[j];
534 for(
Index i=previousStart; i<
end; ++i)
536 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
538 m_data.value(k) = m_data.value(i);
539 m_data.index(k) = m_data.index(i);
544 m_outerIndex[m_outerSize] = k;
559 if (this->
rows() == rows && this->
cols() == cols)
return;
573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574 m_innerNonZeros = newInnerNonZeros;
576 for(
Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577 m_innerNonZeros[i] = 0;
579 else if (innerChange < 0)
583 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
585 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586 for(
Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587 m_innerNonZeros[i] = 0;
591 if (m_innerNonZeros && innerChange < 0)
593 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
597 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
601 m_innerSize = newInnerSize;
604 if (outerChange == 0)
608 if (!newOuterIndex) internal::throw_std_bad_alloc();
609 m_outerIndex = newOuterIndex;
612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613 for(
Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614 m_outerIndex[i] = lastIdx;
616 m_outerSize += outerChange;
629 m_innerSize = IsRowMajor ?
cols :
rows;
631 if (m_outerSize !=
outerSize || m_outerSize==0)
633 std::free(m_outerIndex);
635 if (!m_outerIndex) internal::throw_std_bad_alloc();
641 std::free(m_innerNonZeros);
644 std::fill_n(m_outerIndex, m_outerSize + 1,
StorageIndex(0));
665 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
672 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
678 template<
typename OtherDerived>
680 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
682 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
683 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
689 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
690 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
692 internal::call_assignment_no_alias(*
this, other.
derived());
697 template<
typename OtherDerived,
unsigned int UpLo>
699 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
701 Base::operator=(other);
706 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
712 template<
typename OtherDerived>
714 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
716 initAssignment(other);
721 template<
typename OtherDerived>
723 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
733 std::swap(m_outerIndex, other.m_outerIndex);
734 std::swap(m_innerSize, other.m_innerSize);
735 std::swap(m_outerSize, other.m_outerSize);
736 std::swap(m_innerNonZeros, other.m_innerNonZeros);
737 m_data.swap(other.m_data);
744 eigen_assert(
rows() ==
cols() &&
"ONLY FOR SQUARED MATRICES");
745 this->m_data.resize(
rows());
749 std::free(m_innerNonZeros);
754 if (other.isRValue())
756 swap(other.const_cast_derived());
758 else if(
this!=&other)
760 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
761 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
763 initAssignment(other);
766 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
767 m_data = other.m_data;
771 Base::operator=(other);
777#ifndef EIGEN_PARSED_BY_DOXYGEN
778 template<
typename OtherDerived>
779 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other)
780 {
return Base::operator=(other.derived()); }
782 template<
typename Lhs,
typename Rhs>
783 inline SparseMatrix& operator=(
const Product<Lhs,Rhs,AliasFreeProduct>& other);
786 template<
typename OtherDerived>
787 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
789 friend std::ostream & operator << (std::ostream & s,
const SparseMatrix& m)
792 s <<
"Nonzero entries:\n";
795 for (Index i=0; i<m.nonZeros(); ++i)
796 s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
800 for (Index i=0; i<m.outerSize(); ++i)
802 Index p = m.m_outerIndex[i];
803 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
806 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
808 for (; k<m.m_outerIndex[i+1]; ++k) {
815 s <<
"Outer pointers:\n";
816 for (
Index i=0; i<m.outerSize(); ++i) {
817 s << m.m_outerIndex[i] <<
" ";
819 s <<
" $" << std::endl;
820 if(!m.isCompressed())
822 s <<
"Inner non zeros:\n";
823 for (Index i=0; i<m.outerSize(); ++i) {
824 s << m.m_innerNonZeros[i] <<
" ";
826 s <<
" $" << std::endl;
830 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
837 std::free(m_outerIndex);
838 std::free(m_innerNonZeros);
844# ifdef EIGEN_SPARSEMATRIX_PLUGIN
845# include EIGEN_SPARSEMATRIX_PLUGIN
850 template<
typename Other>
851 void initAssignment(
const Other& other)
853 resize(other.rows(), other.cols());
856 std::free(m_innerNonZeros);
863 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
867 class SingletonVector
869 StorageIndex m_index;
870 StorageIndex m_value;
872 typedef StorageIndex value_type;
873 SingletonVector(Index i, Index v)
874 : m_index(convert_index(i)), m_value(convert_index(v))
877 StorageIndex operator[](Index i)
const {
return i==m_index ? m_value : 0; }
882 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
887 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
889 const Index outer = IsRowMajor ? row : col;
890 const Index inner = IsRowMajor ? col : row;
892 eigen_assert(!isCompressed());
893 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
895 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
896 m_data.index(p) = convert_index(inner);
897 return (m_data.value(p) = Scalar(0));
900 struct IndexPosPair {
901 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
919 template<
typename DiagXpr,
typename Func>
920 void assignDiagonal(
const DiagXpr diagXpr,
const Func& assignFunc)
922 Index n = diagXpr.size();
924 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
927 if((this->rows()!=n) || (this->cols()!=n))
931 if(m_data.size()==0 || overwrite)
933 typedef Array<StorageIndex,Dynamic,1> ArrayXI;
934 this->makeCompressed();
935 this->resizeNonZeros(n);
940 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
944 bool isComp = isCompressed();
945 internal::evaluator<DiagXpr> diaEval(diagXpr);
946 std::vector<IndexPosPair> newEntries;
949 for(Index i = 0; i<n; ++i)
951 internal::LowerBoundIndex lb = this->lower_bound(i,i);
956 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
958 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
961 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
962 m_innerNonZeros[i]++;
963 m_data.value(p) = Scalar(0);
964 m_data.index(p) = StorageIndex(i);
965 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
970 newEntries.push_back(IndexPosPair(i,p));
977 Storage newData(m_data.size()+n_entries);
980 for(Index k=0; k<n_entries;++k)
982 Index i = newEntries[k].i;
983 Index p = newEntries[k].p;
984 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
985 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
986 for(Index j=prev_i;j<i;++j)
987 m_outerIndex[j+1] += k;
989 m_innerNonZeros[i]++;
992 newData.value(p+k) = Scalar(0);
993 newData.index(p+k) = StorageIndex(i);
994 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
997 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
998 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
999 for(Index j=prev_i+1;j<=m_outerSize;++j)
1000 m_outerIndex[j] += n_entries;
1002 m_data.swap(newData);
1008 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1009 EIGEN_STATIC_ASSERT((Options&(
ColMajor|
RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS)
1011 struct default_prunning_func {
1012 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
1013 inline bool operator() (
const Index&,
const Index&,
const Scalar& value)
const
1015 return !internal::isMuchSmallerThan(value, reference, epsilon);
1024template<
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1025void set_from_triplets(
const InputIterator& begin,
const InputIterator&
end, SparseMatrixType& mat, DupFunctor dup_func)
1027 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1028 typedef typename SparseMatrixType::Scalar Scalar;
1029 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1030 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1035 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1037 for(InputIterator it(begin); it!=
end; ++it)
1039 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1040 wi(IsRowMajor ? it->col() : it->row())++;
1045 for(InputIterator it(begin); it!=
end; ++it)
1046 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1049 trMat.collapseDuplicates(dup_func);
1096template<
typename Scalar,
int Options_,
typename StorageIndex_>
1097template<
typename InputIterators>
1100 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_> >(begin,
end, *
this, internal::scalar_sum_op<Scalar,Scalar>());
1112template<
typename Scalar,
int Options_,
typename StorageIndex_>
1113template<
typename InputIterators,
typename DupFunctor>
1116 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_>, DupFunctor>(begin,
end, *
this, dup_func);
1120template<
typename Scalar,
int Options_,
typename StorageIndex_>
1121template<
typename DupFunctor>
1124 eigen_assert(!isCompressed());
1126 IndexVector wi(innerSize());
1128 StorageIndex count = 0;
1130 for(
Index j=0; j<outerSize(); ++j)
1132 StorageIndex start = count;
1133 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1134 for(
Index k=m_outerIndex[j]; k<oldEnd; ++k)
1136 Index i = m_data.index(k);
1140 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1144 m_data.value(count) = m_data.value(k);
1145 m_data.index(count) = m_data.index(k);
1150 m_outerIndex[j] = start;
1152 m_outerIndex[m_outerSize] = count;
1155 std::free(m_innerNonZeros);
1156 m_innerNonZeros = 0;
1157 m_data.resize(m_outerIndex[m_outerSize]);
1160template<
typename Scalar,
int Options_,
typename StorageIndex_>
1161template<
typename OtherDerived>
1162EIGEN_DONT_INLINE SparseMatrix<Scalar,Options_,StorageIndex_>& SparseMatrix<Scalar,Options_,StorageIndex_>::operator=(
const SparseMatrixBase<OtherDerived>& other)
1164 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1165 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1167 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1168 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1171 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags &
RowMajorBit);
1172 if (needToTranspose)
1174 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1175 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1181 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1182 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1183 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1184 OtherCopy otherCopy(other.derived());
1185 OtherCopyEval otherCopyEval(otherCopy);
1187 SparseMatrix dest(other.rows(),other.cols());
1192 for (Index j=0; j<otherCopy.outerSize(); ++j)
1193 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1194 ++dest.m_outerIndex[it.index()];
1197 StorageIndex count = 0;
1198 IndexVector positions(dest.outerSize());
1199 for (Index j=0; j<dest.outerSize(); ++j)
1201 StorageIndex tmp = dest.m_outerIndex[j];
1202 dest.m_outerIndex[j] = count;
1203 positions[j] = count;
1206 dest.m_outerIndex[dest.outerSize()] = count;
1208 dest.m_data.resize(count);
1210 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1212 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1214 Index pos = positions[it.index()]++;
1215 dest.m_data.index(pos) = j;
1216 dest.m_data.value(pos) = it.value();
1224 if(other.isRValue())
1226 initAssignment(other.derived());
1229 return Base::operator=(other.derived());
1233template<
typename Scalar_,
int Options_,
typename StorageIndex_>
1236 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1238 const Index outer = IsRowMajor ? row : col;
1239 const Index inner = IsRowMajor ? col : row;
1246 if(m_data.allocatedSize()==0)
1247 m_data.reserve(2*m_innerSize);
1251 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1253 std::fill(m_innerNonZeros, m_innerNonZeros + m_outerSize,
StorageIndex(0));
1258 for(
Index j=1; j<=m_outerSize; ++j)
1259 m_outerIndex[j] =
end;
1265 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1266 for(
Index j=0; j<m_outerSize; ++j)
1267 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1272 Index data_end = m_data.allocatedSize();
1276 if(m_outerIndex[outer]==data_end)
1278 eigen_internal_assert(m_innerNonZeros[outer]==0);
1284 while(j>=0 && m_innerNonZeros[j]==0)
1285 m_outerIndex[j--] = p;
1288 ++m_innerNonZeros[outer];
1289 m_data.append(Scalar(0), inner);
1292 if(data_end != m_data.allocatedSize())
1297 eigen_internal_assert(data_end < m_data.allocatedSize());
1298 StorageIndex new_end = convert_index(m_data.allocatedSize());
1299 for(
Index k=outer+1; k<=m_outerSize; ++k)
1300 if(m_outerIndex[k]==data_end)
1301 m_outerIndex[k] = new_end;
1303 return m_data.value(p);
1308 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1310 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1313 ++m_innerNonZeros[outer];
1314 m_data.resize(m_data.size()+1);
1317 if(data_end != m_data.allocatedSize())
1322 eigen_internal_assert(data_end < m_data.allocatedSize());
1323 StorageIndex new_end = convert_index(m_data.allocatedSize());
1324 for(
Index k=outer+1; k<=m_outerSize; ++k)
1325 if(m_outerIndex[k]==data_end)
1326 m_outerIndex[k] = new_end;
1330 Index startId = m_outerIndex[outer];
1331 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1332 while ( (p > startId) && (m_data.index(p-1) > inner) )
1334 m_data.index(p) = m_data.index(p-1);
1335 m_data.value(p) = m_data.value(p-1);
1339 m_data.index(p) = convert_index(inner);
1340 return (m_data.value(p) = Scalar(0));
1343 if(m_data.size() != m_data.allocatedSize())
1346 m_data.resize(m_data.allocatedSize());
1350 return insertUncompressed(row,col);
1353template<
typename Scalar_,
int Options_,
typename StorageIndex_>
1356 eigen_assert(!isCompressed());
1358 const Index outer = IsRowMajor ? row : col;
1359 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1361 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1362 StorageIndex innerNNZ = m_innerNonZeros[outer];
1366 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1369 Index startId = m_outerIndex[outer];
1370 Index p = startId + m_innerNonZeros[outer];
1371 while ( (p > startId) && (m_data.index(p-1) > inner) )
1373 m_data.index(p) = m_data.index(p-1);
1374 m_data.value(p) = m_data.value(p-1);
1377 eigen_assert((p<=startId || m_data.index(p-1)!=inner) &&
"you cannot insert an element that already exists, you must call coeffRef to this end");
1379 m_innerNonZeros[outer]++;
1381 m_data.index(p) = inner;
1382 return (m_data.value(p) = Scalar(0));
1385template<
typename Scalar_,
int Options_,
typename StorageIndex_>
1386EIGEN_DONT_INLINE
typename SparseMatrix<Scalar_,Options_,StorageIndex_>::Scalar& SparseMatrix<Scalar_,Options_,StorageIndex_>::insertCompressed(Index row, Index col)
1388 eigen_assert(isCompressed());
1390 const Index outer = IsRowMajor ? row : col;
1391 const Index inner = IsRowMajor ? col : row;
1393 Index previousOuter = outer;
1394 if (m_outerIndex[outer+1]==0)
1397 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1399 m_outerIndex[previousOuter] = convert_index(m_data.size());
1402 m_outerIndex[outer+1] = m_outerIndex[outer];
1408 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1409 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1411 std::size_t startId = m_outerIndex[outer];
1413 std::size_t p = m_outerIndex[outer+1];
1414 ++m_outerIndex[outer+1];
1416 double reallocRatio = 1;
1417 if (m_data.allocatedSize()<=m_data.size())
1420 if (m_data.size()==0)
1429 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1430 reallocRatio = (nnzEstimate-double(m_data.size()))/
double(m_data.size());
1434 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1437 m_data.resize(m_data.size()+1,reallocRatio);
1441 if (previousOuter==-1)
1445 for (Index k=0; k<=(outer+1); ++k)
1446 m_outerIndex[k] = 0;
1448 while(m_outerIndex[k]==0)
1449 m_outerIndex[k++] = 1;
1450 while (k<=m_outerSize && m_outerIndex[k]!=0)
1451 m_outerIndex[k++]++;
1454 k = m_outerIndex[k]-1;
1457 m_data.index(k) = m_data.index(k-1);
1458 m_data.value(k) = m_data.value(k-1);
1467 while (j<=m_outerSize && m_outerIndex[j]!=0)
1468 m_outerIndex[j++]++;
1471 Index k = m_outerIndex[j]-1;
1474 m_data.index(k) = m_data.index(k-1);
1475 m_data.value(k) = m_data.value(k-1);
1481 while ( (p > startId) && (m_data.index(p-1) > inner) )
1483 m_data.index(p) = m_data.index(p-1);
1484 m_data.value(p) = m_data.value(p-1);
1488 m_data.index(p) = inner;
1489 return (m_data.value(p) = Scalar(0));
1494template<
typename Scalar_,
int Options_,
typename StorageIndex_>
1495struct evaluator<SparseMatrix<Scalar_,Options_,StorageIndex_> >
1496 : evaluator<SparseCompressedBase<SparseMatrix<Scalar_,Options_,StorageIndex_> > >
1498 typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_,Options_,StorageIndex_> > > Base;
1499 typedef SparseMatrix<Scalar_,Options_,StorageIndex_> SparseMatrixType;
1500 evaluator() : Base() {}
1501 explicit evaluator(
const SparseMatrixType &mat) : Base(mat) {}
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:49
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:67
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:98
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:40
Index nonZeros() const
Definition: SparseCompressedBase.h:58
bool isCompressed() const
Definition: SparseCompressedBase.h:109
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:30
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition: SparseMatrixBase.h:45
Index size() const
Definition: SparseMatrixBase.h:183
@ Flags
Definition: SparseMatrixBase.h:97
A versatible sparse matrix representation.
Definition: SparseMatrix.h:100
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:192
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:655
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:730
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:161
void setZero()
Definition: SparseMatrix.h:255
Index cols() const
Definition: SparseMatrix.h:142
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:165
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:713
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1114
Index outerSize() const
Definition: SparseMatrix.h:147
SparseMatrix()
Definition: SparseMatrix.h:664
void uncompress()
Definition: SparseMatrix.h:498
const Scalar * valuePtr() const
Definition: SparseMatrix.h:152
void makeCompressed()
Definition: SparseMatrix.h:467
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:698
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:626
bool isCompressed() const
Definition: SparseCompressedBase.h:109
Index rows() const
Definition: SparseMatrix.h:140
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:705
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:183
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1098
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:722
void setIdentity()
Definition: SparseMatrix.h:742
Index innerSize() const
Definition: SparseMatrix.h:145
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:671
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:174
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:179
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:556
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:510
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:170
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:679
~SparseMatrix()
Definition: SparseMatrix.h:835
Scalar * valuePtr()
Definition: SparseMatrix.h:156
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:523
void reserve(Index reserveSize)
Definition: SparseMatrix.h:267
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:210
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1234
void reserve(const SizesType &reserveSizes)
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:661
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:47
a sparse vector class
Definition: SparseVector.h:68
static const lastp1_t end
Definition: IndexedViewHelper.h:183
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323
const unsigned int LvalueBit
Definition: Constants.h:146
const unsigned int RowMajorBit
Definition: Constants.h:68
const unsigned int CompressedAccessBit
Definition: Constants.h:193
Namespace containing all symbols from the Eigen library.
Definition: B01_Experimental.dox:1
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:59
const int Dynamic
Definition: Constants.h:24
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41
Derived & derived()
Definition: EigenBase.h:48
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:235