Eigen  3.4.90 (git rev a4098ac676528a83cfb73d4d26ce1b42ec05f47c)
SparseVector.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEVECTOR_H
11#define EIGEN_SPARSEVECTOR_H
12
13#include "./InternalHeaderCheck.h"
14
15namespace Eigen {
16
30namespace internal {
31template<typename Scalar_, int Options_, typename StorageIndex_>
32struct traits<SparseVector<Scalar_, Options_, StorageIndex_> >
33{
34 typedef Scalar_ Scalar;
35 typedef StorageIndex_ StorageIndex;
36 typedef Sparse StorageKind;
37 typedef MatrixXpr XprKind;
38 enum {
39 IsColVector = (Options_ & RowMajorBit) ? 0 : 1,
40
41 RowsAtCompileTime = IsColVector ? Dynamic : 1,
42 ColsAtCompileTime = IsColVector ? 1 : Dynamic,
43 MaxRowsAtCompileTime = RowsAtCompileTime,
44 MaxColsAtCompileTime = ColsAtCompileTime,
45 Flags = Options_ | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
46 SupportedAccessPatterns = InnerRandomAccessPattern
47 };
48};
49
50// Sparse-Vector-Assignment kinds:
51enum {
52 SVA_RuntimeSwitch,
53 SVA_Inner,
54 SVA_Outer
55};
56
57template< typename Dest, typename Src,
58 int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
59 : Src::InnerSizeAtCompileTime==1 ? SVA_Outer
60 : SVA_Inner>
61struct sparse_vector_assign_selector;
62
63}
64
65template<typename Scalar_, int Options_, typename StorageIndex_>
67 : public SparseCompressedBase<SparseVector<Scalar_, Options_, StorageIndex_> >
68{
70 using Base::convert_index;
71 public:
72 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
73 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
74 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
75
76 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
77 enum { IsColVector = internal::traits<SparseVector>::IsColVector };
78
79 enum {
80 Options = Options_
81 };
82
83 EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
84 EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
85 EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
86 EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
87
88 EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
89 EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
90
91 EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
92 EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
93
94 inline const StorageIndex* outerIndexPtr() const { return 0; }
95 inline StorageIndex* outerIndexPtr() { return 0; }
96 inline const StorageIndex* innerNonZeroPtr() const { return 0; }
97 inline StorageIndex* innerNonZeroPtr() { return 0; }
98
100 inline Storage& data() { return m_data; }
102 inline const Storage& data() const { return m_data; }
103
104 inline Scalar coeff(Index row, Index col) const
105 {
106 eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
107 return coeff(IsColVector ? row : col);
108 }
109 inline Scalar coeff(Index i) const
110 {
111 eigen_assert(i>=0 && i<m_size);
112 return m_data.at(StorageIndex(i));
113 }
114
115 inline Scalar& coeffRef(Index row, Index col)
116 {
117 eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
118 return coeffRef(IsColVector ? row : col);
119 }
120
127 inline Scalar& coeffRef(Index i)
128 {
129 eigen_assert(i>=0 && i<m_size);
130
131 return m_data.atWithInsertion(StorageIndex(i));
132 }
133
134 public:
135
136 typedef typename Base::InnerIterator InnerIterator;
137 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
138
139 inline void setZero() { m_data.clear(); }
140
142 inline Index nonZeros() const { return m_data.size(); }
143
144 inline void startVec(Index outer)
145 {
146 EIGEN_UNUSED_VARIABLE(outer);
147 eigen_assert(outer==0);
148 }
149
150 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
151 {
152 EIGEN_UNUSED_VARIABLE(outer);
153 eigen_assert(outer==0);
154 return insertBack(inner);
155 }
156 inline Scalar& insertBack(Index i)
157 {
158 m_data.append(0, i);
159 return m_data.value(m_data.size()-1);
160 }
161
162 Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
163 {
164 EIGEN_UNUSED_VARIABLE(outer);
165 eigen_assert(outer==0);
166 return insertBackUnordered(inner);
167 }
168 inline Scalar& insertBackUnordered(Index i)
169 {
170 m_data.append(0, i);
171 return m_data.value(m_data.size()-1);
172 }
173
174 inline Scalar& insert(Index row, Index col)
175 {
176 eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
177
178 Index inner = IsColVector ? row : col;
179 Index outer = IsColVector ? col : row;
180 EIGEN_ONLY_USED_FOR_DEBUG(outer);
181 eigen_assert(outer==0);
182 return insert(inner);
183 }
184 Scalar& insert(Index i)
185 {
186 eigen_assert(i>=0 && i<m_size);
187
188 Index startId = 0;
189 Index p = Index(m_data.size()) - 1;
190 // TODO smart realloc
191 m_data.resize(p+2,1);
192
193 while ( (p >= startId) && (m_data.index(p) > i) )
194 {
195 m_data.index(p+1) = m_data.index(p);
196 m_data.value(p+1) = m_data.value(p);
197 --p;
198 }
199 m_data.index(p+1) = convert_index(i);
200 m_data.value(p+1) = 0;
201 return m_data.value(p+1);
202 }
203
206 inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
207
208
209 inline void finalize() {}
210
212 Index prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) {
213 return prune([&](const Scalar& val){ return !internal::isMuchSmallerThan(val, reference, epsilon); });
214 }
215
223 template<class F>
224 Index prune(F&& keep_predicate)
225 {
226 Index k = 0;
227 Index n = m_data.size();
228 for (Index i = 0; i < n; ++i)
229 {
230 if (keep_predicate(m_data.value(i)))
231 {
232 m_data.value(k) = std::move(m_data.value(i));
233 m_data.index(k) = m_data.index(i);
234 ++k;
235 }
236 }
237 m_data.resize(k);
238 return k;
239 }
240
249 void resize(Index rows, Index cols)
250 {
251 eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
252 resize(IsColVector ? rows : cols);
253 }
254
259 void resize(Index newSize)
260 {
261 m_size = newSize;
262 m_data.clear();
263 }
264
273 {
274 if (newSize < m_size)
275 {
276 Index i = 0;
277 while (i<m_data.size() && m_data.index(i)<newSize) ++i;
278 m_data.resize(i);
279 }
280 m_size = newSize;
281 }
282
283 void resizeNonZeros(Index size) { m_data.resize(size); }
284
285 inline SparseVector() : m_size(0) { resize(0); }
286
287 explicit inline SparseVector(Index size) : m_size(0) { resize(size); }
288
289 inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
290
291 template<typename OtherDerived>
292 inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
293 : m_size(0)
294 {
295 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
296 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
297 #endif
298 *this = other.derived();
299 }
300
301 inline SparseVector(const SparseVector& other)
302 : Base(other), m_size(0)
303 {
304 *this = other.derived();
305 }
306
311 inline void swap(SparseVector& other)
312 {
313 std::swap(m_size, other.m_size);
314 m_data.swap(other.m_data);
315 }
316
317 template<int OtherOptions>
319 {
320 eigen_assert(other.outerSize()==1);
321 std::swap(m_size, other.m_innerSize);
322 m_data.swap(other.m_data);
323 }
324
325 inline SparseVector& operator=(const SparseVector& other)
326 {
327 if (other.isRValue())
328 {
329 swap(other.const_cast_derived());
330 }
331 else
332 {
333 resize(other.size());
334 m_data = other.m_data;
335 }
336 return *this;
337 }
338
339 template<typename OtherDerived>
340 inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
341 {
342 SparseVector tmp(other.size());
343 internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
344 this->swap(tmp);
345 return *this;
346 }
347
348 #ifndef EIGEN_PARSED_BY_DOXYGEN
349 template<typename Lhs, typename Rhs>
350 inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
351 {
352 return Base::operator=(product);
353 }
354 #endif
355
356 friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
357 {
358 for (Index i=0; i<m.nonZeros(); ++i)
359 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
360 s << std::endl;
361 return s;
362 }
363
365 inline ~SparseVector() {}
366
368 Scalar sum() const;
369
370 public:
371
373 EIGEN_DEPRECATED void startFill(Index reserve)
374 {
375 setZero();
376 m_data.reserve(reserve);
377 }
378
380 EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
381 {
382 eigen_assert(r==0 || c==0);
383 return fill(IsColVector ? r : c);
384 }
385
387 EIGEN_DEPRECATED Scalar& fill(Index i)
388 {
389 m_data.append(0, i);
390 return m_data.value(m_data.size()-1);
391 }
392
394 EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
395 {
396 eigen_assert(r==0 || c==0);
397 return fillrand(IsColVector ? r : c);
398 }
399
401 EIGEN_DEPRECATED Scalar& fillrand(Index i)
402 {
403 return insert(i);
404 }
405
407 EIGEN_DEPRECATED void endFill() {}
408
409 // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
411 EIGEN_DEPRECATED Storage& _data() { return m_data; }
413 EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
414
415# ifdef EIGEN_SPARSEVECTOR_PLUGIN
416# include EIGEN_SPARSEVECTOR_PLUGIN
417# endif
418
419protected:
420 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
421 EIGEN_STATIC_ASSERT((Options_&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS)
422
423 Storage m_data;
424 Index m_size;
425};
426
427namespace internal {
428
429template<typename Scalar_, int Options_, typename Index_>
430struct evaluator<SparseVector<Scalar_,Options_,Index_> >
431 : evaluator_base<SparseVector<Scalar_,Options_,Index_> >
432{
433 typedef SparseVector<Scalar_,Options_,Index_> SparseVectorType;
434 typedef evaluator_base<SparseVectorType> Base;
435 typedef typename SparseVectorType::InnerIterator InnerIterator;
436 typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
437
438 enum {
439 CoeffReadCost = NumTraits<Scalar_>::ReadCost,
440 Flags = SparseVectorType::Flags
441 };
442
443 evaluator() : Base() {}
444
445 explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
446 {
447 EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
448 }
449
450 inline Index nonZerosEstimate() const {
451 return m_matrix->nonZeros();
452 }
453
454 operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
455 operator const SparseVectorType&() const { return *m_matrix; }
456
457 const SparseVectorType *m_matrix;
458};
459
460template< typename Dest, typename Src>
461struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
462 static void run(Dest& dst, const Src& src) {
463 eigen_internal_assert(src.innerSize()==src.size());
464 typedef internal::evaluator<Src> SrcEvaluatorType;
465 SrcEvaluatorType srcEval(src);
466 for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
467 dst.insert(it.index()) = it.value();
468 }
469};
470
471template< typename Dest, typename Src>
472struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
473 static void run(Dest& dst, const Src& src) {
474 eigen_internal_assert(src.outerSize()==src.size());
475 typedef internal::evaluator<Src> SrcEvaluatorType;
476 SrcEvaluatorType srcEval(src);
477 for(Index i=0; i<src.size(); ++i)
478 {
479 typename SrcEvaluatorType::InnerIterator it(srcEval, i);
480 if(it)
481 dst.insert(i) = it.value();
482 }
483 }
484};
485
486template< typename Dest, typename Src>
487struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
488 static void run(Dest& dst, const Src& src) {
489 if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
490 else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
491 }
492};
493
494}
495
496} // end namespace Eigen
497
498#endif // EIGEN_SPARSEVECTOR_H
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:40
internal::traits< SparseVector< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition: SparseMatrixBase.h:45
A versatible sparse matrix representation.
Definition: SparseMatrix.h:100
Index outerSize() const
Definition: SparseMatrix.h:147
a sparse vector class
Definition: SparseVector.h:68
void conservativeResize(Index newSize)
Definition: SparseVector.h:272
Scalar & coeffRef(Index i)
Definition: SparseVector.h:127
Index prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseVector.h:212
void resize(Index rows, Index cols)
Definition: SparseVector.h:249
~SparseVector()
Definition: SparseVector.h:365
void swap(SparseVector &other)
Definition: SparseVector.h:311
Index prune(F &&keep_predicate)
Prunes the entries of the vector based on a predicate
Definition: SparseVector.h:224
Index nonZeros() const
Definition: SparseVector.h:142
void resize(Index newSize)
Definition: SparseVector.h:259
Scalar sum() const
Definition: SparseRedux.h:43
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323
const unsigned int LvalueBit
Definition: Constants.h:146
const unsigned int RowMajorBit
Definition: Constants.h:68
const unsigned int CompressedAccessBit
Definition: Constants.h:193
Namespace containing all symbols from the Eigen library.
Definition: B01_Experimental.dox:1
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:59
const int Dynamic
Definition: Constants.h:24
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:235