This bugzilla service is closed. All entries have been migrated to https://gitlab.com/libeigen/eigen
View | Details | Raw Unified | Return to bug 279
Collapse All | Expand All

(-)a/Eigen/src/Core/ArrayBase.h (-2 / +2 lines)
Lines 171-200 template<typename Derived> class ArrayBa Link Here
171
  *
171
  *
172
  * \returns a reference to \c *this
172
  * \returns a reference to \c *this
173
  */
173
  */
174
template<typename Derived>
174
template<typename Derived>
175
template<typename OtherDerived>
175
template<typename OtherDerived>
176
EIGEN_STRONG_INLINE Derived &
176
EIGEN_STRONG_INLINE Derived &
177
ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
177
ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
178
{
178
{
179
  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar>());
179
  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
180
  return derived();
180
  return derived();
181
}
181
}
182
182
183
/** replaces \c *this by \c *this + \a other.
183
/** replaces \c *this by \c *this + \a other.
184
  *
184
  *
185
  * \returns a reference to \c *this
185
  * \returns a reference to \c *this
186
  */
186
  */
187
template<typename Derived>
187
template<typename Derived>
188
template<typename OtherDerived>
188
template<typename OtherDerived>
189
EIGEN_STRONG_INLINE Derived &
189
EIGEN_STRONG_INLINE Derived &
190
ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
190
ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
191
{
191
{
192
  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar>());
192
  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
193
  return derived();
193
  return derived();
194
}
194
}
195
195
196
/** replaces \c *this by \c *this * \a other coefficient wise.
196
/** replaces \c *this by \c *this * \a other coefficient wise.
197
  *
197
  *
198
  * \returns a reference to \c *this
198
  * \returns a reference to \c *this
199
  */
199
  */
200
template<typename Derived>
200
template<typename Derived>
(-)a/Eigen/src/Core/AssignEvaluator.h (-6 / +6 lines)
Lines 682-698 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE vo Link Here
682
  Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
682
  Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
683
  
683
  
684
  dense_assignment_loop<Kernel>::run(kernel);
684
  dense_assignment_loop<Kernel>::run(kernel);
685
}
685
}
686
686
687
template<typename DstXprType, typename SrcXprType>
687
template<typename DstXprType, typename SrcXprType>
688
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(const DstXprType& dst, const SrcXprType& src)
688
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(const DstXprType& dst, const SrcXprType& src)
689
{
689
{
690
  call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar>());
690
  call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
691
}
691
}
692
692
693
/***************************************************************************
693
/***************************************************************************
694
* Part 6 : Generic assignment
694
* Part 6 : Generic assignment
695
***************************************************************************/
695
***************************************************************************/
696
696
697
// Based on the respective shapes of the destination and source,
697
// Based on the respective shapes of the destination and source,
698
// the class AssignmentKind determine the kind of assignment mechanism.
698
// the class AssignmentKind determine the kind of assignment mechanism.
Lines 717-739 struct Assignment; Link Here
717
// Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite complicated.
717
// Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite complicated.
718
// So this intermediate function removes everything related to "assume-aliasing" such that Assignment
718
// So this intermediate function removes everything related to "assume-aliasing" such that Assignment
719
// does not has to bother about these annoying details.
719
// does not has to bother about these annoying details.
720
720
721
template<typename Dst, typename Src>
721
template<typename Dst, typename Src>
722
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
722
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
723
void call_assignment(Dst& dst, const Src& src)
723
void call_assignment(Dst& dst, const Src& src)
724
{
724
{
725
  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar>());
725
  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
726
}
726
}
727
template<typename Dst, typename Src>
727
template<typename Dst, typename Src>
728
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
728
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
729
void call_assignment(const Dst& dst, const Src& src)
729
void call_assignment(const Dst& dst, const Src& src)
730
{
730
{
731
  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar>());
731
  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
732
}
732
}
733
                     
733
                     
734
// Deal with "assume-aliasing"
734
// Deal with "assume-aliasing"
735
template<typename Dst, typename Src, typename Func>
735
template<typename Dst, typename Src, typename Func>
736
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
736
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
737
void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing<Src>::value, void*>::type = 0)
737
void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing<Src>::value, void*>::type = 0)
738
{
738
{
739
  typename plain_matrix_type<Src>::type tmp(src);
739
  typename plain_matrix_type<Src>::type tmp(src);
Lines 782-798 void call_assignment_no_alias(Dst& dst, Link Here
782
  EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
782
  EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
783
  
783
  
784
  Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
784
  Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
785
}
785
}
786
template<typename Dst, typename Src>
786
template<typename Dst, typename Src>
787
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
787
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
788
void call_assignment_no_alias(Dst& dst, const Src& src)
788
void call_assignment_no_alias(Dst& dst, const Src& src)
789
{
789
{
790
  call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar>());
790
  call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
791
}
791
}
792
792
793
template<typename Dst, typename Src, typename Func>
793
template<typename Dst, typename Src, typename Func>
794
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
794
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
795
void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func)
795
void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func)
796
{
796
{
797
  Index dstRows = src.rows();
797
  Index dstRows = src.rows();
798
  Index dstCols = src.cols();
798
  Index dstCols = src.cols();
Lines 804-820 void call_assignment_no_alias_no_transpo Link Here
804
  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src)
804
  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src)
805
  
805
  
806
  Assignment<Dst,Src,Func>::run(dst, src, func);
806
  Assignment<Dst,Src,Func>::run(dst, src, func);
807
}
807
}
808
template<typename Dst, typename Src>
808
template<typename Dst, typename Src>
809
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
809
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
810
void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src)
810
void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src)
811
{
811
{
812
  call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar>());
812
  call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
813
}
813
}
814
814
815
// forward declaration
815
// forward declaration
816
template<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, const Src &src);
816
template<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, const Src &src);
817
817
818
// Generic Dense to Dense assignment
818
// Generic Dense to Dense assignment
819
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
819
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
820
struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Scalar>
820
struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Scalar>
Lines 833-849 struct Assignment<DstXprType, SrcXprType Link Here
833
};
833
};
834
834
835
// Generic assignment through evalTo.
835
// Generic assignment through evalTo.
836
// TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism.
836
// TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism.
837
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
837
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
838
struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Scalar>
838
struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Scalar>
839
{
839
{
840
  EIGEN_DEVICE_FUNC
840
  EIGEN_DEVICE_FUNC
841
  static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
841
  static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
842
  {
842
  {
843
    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
843
    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
844
    src.evalTo(dst);
844
    src.evalTo(dst);
845
  }
845
  }
846
};
846
};
847
847
848
} // namespace internal
848
} // namespace internal
849
849
(-)a/Eigen/src/Core/CwiseBinaryOp.h (-2 / +2 lines)
Lines 155-183 public: Link Here
155
  *
155
  *
156
  * \returns a reference to \c *this
156
  * \returns a reference to \c *this
157
  */
157
  */
158
template<typename Derived>
158
template<typename Derived>
159
template<typename OtherDerived>
159
template<typename OtherDerived>
160
EIGEN_STRONG_INLINE Derived &
160
EIGEN_STRONG_INLINE Derived &
161
MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
161
MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
162
{
162
{
163
  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar>());
163
  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
164
  return derived();
164
  return derived();
165
}
165
}
166
166
167
/** replaces \c *this by \c *this + \a other.
167
/** replaces \c *this by \c *this + \a other.
168
  *
168
  *
169
  * \returns a reference to \c *this
169
  * \returns a reference to \c *this
170
  */
170
  */
171
template<typename Derived>
171
template<typename Derived>
172
template<typename OtherDerived>
172
template<typename OtherDerived>
173
EIGEN_STRONG_INLINE Derived &
173
EIGEN_STRONG_INLINE Derived &
174
MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
174
MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
175
{
175
{
176
  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar>());
176
  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
177
  return derived();
177
  return derived();
178
}
178
}
179
179
180
} // end namespace Eigen
180
} // end namespace Eigen
181
181
182
#endif // EIGEN_CWISE_BINARY_OP_H
182
#endif // EIGEN_CWISE_BINARY_OP_H
183
183
(-)a/Eigen/src/Core/DiagonalMatrix.h (-3 / +3 lines)
Lines 315-340 template<> struct storage_kind_to_shape< Link Here
315
struct Diagonal2Dense {};
315
struct Diagonal2Dense {};
316
316
317
template<> struct AssignmentKind<DenseShape,DiagonalShape> { typedef Diagonal2Dense Kind; };
317
template<> struct AssignmentKind<DenseShape,DiagonalShape> { typedef Diagonal2Dense Kind; };
318
318
319
// Diagonal matrix to Dense assignment
319
// Diagonal matrix to Dense assignment
320
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
320
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
321
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense, Scalar>
321
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense, Scalar>
322
{
322
{
323
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
323
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
324
  {
324
  {
325
    dst.setZero();
325
    dst.setZero();
326
    dst.diagonal() = src.diagonal();
326
    dst.diagonal() = src.diagonal();
327
  }
327
  }
328
  
328
  
329
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &/*func*/)
329
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
330
  { dst.diagonal() += src.diagonal(); }
330
  { dst.diagonal() += src.diagonal(); }
331
  
331
  
332
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &/*func*/)
332
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
333
  { dst.diagonal() -= src.diagonal(); }
333
  { dst.diagonal() -= src.diagonal(); }
334
};
334
};
335
335
336
} // namespace internal
336
} // namespace internal
337
337
338
} // end namespace Eigen
338
} // end namespace Eigen
339
339
340
#endif // EIGEN_DIAGONALMATRIX_H
340
#endif // EIGEN_DIAGONALMATRIX_H
(-)a/Eigen/src/Core/Dot.h (-5 / +7 lines)
Lines 23-54 template<typename T, typename U, Link Here
23
                && U::IsVectorAtCompileTime
23
                && U::IsVectorAtCompileTime
24
                && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
24
                && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
25
                      |  // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
25
                      |  // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
26
                         // revert to || as soon as not needed anymore.
26
                         // revert to || as soon as not needed anymore.
27
                    (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
27
                    (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
28
>
28
>
29
struct dot_nocheck
29
struct dot_nocheck
30
{
30
{
31
  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
31
  typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
32
  typedef typename conj_prod::result_type ResScalar;
32
  EIGEN_DEVICE_FUNC
33
  EIGEN_DEVICE_FUNC
33
  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
34
  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
34
  {
35
  {
35
    return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
36
    return a.template binaryExpr<conj_prod>(b).sum();
36
  }
37
  }
37
};
38
};
38
39
39
template<typename T, typename U>
40
template<typename T, typename U>
40
struct dot_nocheck<T, U, true>
41
struct dot_nocheck<T, U, true>
41
{
42
{
42
  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
43
  typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
44
  typedef typename conj_prod::result_type ResScalar;
43
  EIGEN_DEVICE_FUNC
45
  EIGEN_DEVICE_FUNC
44
  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
46
  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
45
  {
47
  {
46
    return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
48
    return a.transpose().template binaryExpr<conj_prod>(b).sum();
47
  }
49
  }
48
};
50
};
49
51
50
} // end namespace internal
52
} // end namespace internal
51
53
52
/** \returns the dot product of *this with other.
54
/** \returns the dot product of *this with other.
53
  *
55
  *
54
  * \only_for_vectors
56
  * \only_for_vectors
Lines 57-73 struct dot_nocheck<T, U, true> Link Here
57
  * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
59
  * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
58
  * second variable.
60
  * second variable.
59
  *
61
  *
60
  * \sa squaredNorm(), norm()
62
  * \sa squaredNorm(), norm()
61
  */
63
  */
62
template<typename Derived>
64
template<typename Derived>
63
template<typename OtherDerived>
65
template<typename OtherDerived>
64
EIGEN_DEVICE_FUNC
66
EIGEN_DEVICE_FUNC
65
typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
67
typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
66
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
68
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
67
{
69
{
68
  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
70
  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
69
  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
71
  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
70
  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
72
  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
71
  typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
73
  typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
72
  EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
74
  EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
73
75
(-)a/Eigen/src/Core/EigenBase.h (-2 / +2 lines)
Lines 133-155 Derived& DenseBase<Derived>::operator=(c Link Here
133
  call_assignment(derived(), other.derived());
133
  call_assignment(derived(), other.derived());
134
  return derived();
134
  return derived();
135
}
135
}
136
136
137
template<typename Derived>
137
template<typename Derived>
138
template<typename OtherDerived>
138
template<typename OtherDerived>
139
Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
139
Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
140
{
140
{
141
  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar>());
141
  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
142
  return derived();
142
  return derived();
143
}
143
}
144
144
145
template<typename Derived>
145
template<typename Derived>
146
template<typename OtherDerived>
146
template<typename OtherDerived>
147
Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
147
Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
148
{
148
{
149
  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar>());
149
  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
150
  return derived();
150
  return derived();
151
}
151
}
152
152
153
} // end namespace Eigen
153
} // end namespace Eigen
154
154
155
#endif // EIGEN_EIGENBASE_H
155
#endif // EIGEN_EIGENBASE_H
(-)a/Eigen/src/Core/MatrixBase.h (-2 / +2 lines)
Lines 188-204 template<typename Derived> class MatrixB Link Here
188
188
189
    template<typename DiagonalDerived>
189
    template<typename DiagonalDerived>
190
    EIGEN_DEVICE_FUNC
190
    EIGEN_DEVICE_FUNC
191
    const Product<Derived, DiagonalDerived, LazyProduct>
191
    const Product<Derived, DiagonalDerived, LazyProduct>
192
    operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;
192
    operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;
193
193
194
    template<typename OtherDerived>
194
    template<typename OtherDerived>
195
    EIGEN_DEVICE_FUNC
195
    EIGEN_DEVICE_FUNC
196
    typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
196
    typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
197
    dot(const MatrixBase<OtherDerived>& other) const;
197
    dot(const MatrixBase<OtherDerived>& other) const;
198
198
199
    EIGEN_DEVICE_FUNC RealScalar squaredNorm() const;
199
    EIGEN_DEVICE_FUNC RealScalar squaredNorm() const;
200
    EIGEN_DEVICE_FUNC RealScalar norm() const;
200
    EIGEN_DEVICE_FUNC RealScalar norm() const;
201
    RealScalar stableNorm() const;
201
    RealScalar stableNorm() const;
202
    RealScalar blueNorm() const;
202
    RealScalar blueNorm() const;
203
    RealScalar hypotNorm() const;
203
    RealScalar hypotNorm() const;
204
    EIGEN_DEVICE_FUNC const PlainObject normalized() const;
204
    EIGEN_DEVICE_FUNC const PlainObject normalized() const;
Lines 376-392 template<typename Derived> class MatrixB Link Here
376
    inline JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
376
    inline JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
377
    inline BDCSVD<PlainObject>    bdcSvd(unsigned int computationOptions = 0) const;
377
    inline BDCSVD<PlainObject>    bdcSvd(unsigned int computationOptions = 0) const;
378
378
379
/////////// Geometry module ///////////
379
/////////// Geometry module ///////////
380
380
381
    #ifndef EIGEN_PARSED_BY_DOXYGEN
381
    #ifndef EIGEN_PARSED_BY_DOXYGEN
382
    /// \internal helper struct to form the return type of the cross product
382
    /// \internal helper struct to form the return type of the cross product
383
    template<typename OtherDerived> struct cross_product_return_type {
383
    template<typename OtherDerived> struct cross_product_return_type {
384
      typedef typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;
384
      typedef typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;
385
      typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;
385
      typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;
386
    };
386
    };
387
    #endif // EIGEN_PARSED_BY_DOXYGEN
387
    #endif // EIGEN_PARSED_BY_DOXYGEN
388
    template<typename OtherDerived>
388
    template<typename OtherDerived>
389
    EIGEN_DEVICE_FUNC
389
    EIGEN_DEVICE_FUNC
390
#ifndef EIGEN_PARSED_BY_DOXYGEN
390
#ifndef EIGEN_PARSED_BY_DOXYGEN
391
    inline typename cross_product_return_type<OtherDerived>::type
391
    inline typename cross_product_return_type<OtherDerived>::type
392
#else
392
#else
(-)a/Eigen/src/Core/NoAlias.h (-3 / +3 lines)
Lines 34-66 class NoAlias Link Here
34
    typedef typename ExpressionType::Scalar Scalar;
34
    typedef typename ExpressionType::Scalar Scalar;
35
    
35
    
36
    explicit NoAlias(ExpressionType& expression) : m_expression(expression) {}
36
    explicit NoAlias(ExpressionType& expression) : m_expression(expression) {}
37
    
37
    
38
    template<typename OtherDerived>
38
    template<typename OtherDerived>
39
    EIGEN_DEVICE_FUNC
39
    EIGEN_DEVICE_FUNC
40
    EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
40
    EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
41
    {
41
    {
42
      call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar>());
42
      call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
43
      return m_expression;
43
      return m_expression;
44
    }
44
    }
45
    
45
    
46
    template<typename OtherDerived>
46
    template<typename OtherDerived>
47
    EIGEN_DEVICE_FUNC
47
    EIGEN_DEVICE_FUNC
48
    EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
48
    EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
49
    {
49
    {
50
      call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar>());
50
      call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
51
      return m_expression;
51
      return m_expression;
52
    }
52
    }
53
    
53
    
54
    template<typename OtherDerived>
54
    template<typename OtherDerived>
55
    EIGEN_DEVICE_FUNC
55
    EIGEN_DEVICE_FUNC
56
    EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
56
    EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
57
    {
57
    {
58
      call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar>());
58
      call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
59
      return m_expression;
59
      return m_expression;
60
    }
60
    }
61
61
62
    EIGEN_DEVICE_FUNC
62
    EIGEN_DEVICE_FUNC
63
    ExpressionType& expression() const
63
    ExpressionType& expression() const
64
    {
64
    {
65
      return m_expression;
65
      return m_expression;
66
    }
66
    }
(-)a/Eigen/src/Core/PlainObjectBase.h (-1 / +1 lines)
Lines 713-729 class PlainObjectBase : public internal: Link Here
713
    EIGEN_DEVICE_FUNC 
713
    EIGEN_DEVICE_FUNC 
714
    EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
714
    EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
715
    {
715
    {
716
      // I don't think we need this resize call since the lazyAssign will anyways resize
716
      // I don't think we need this resize call since the lazyAssign will anyways resize
717
      // and lazyAssign will be called by the assign selector.
717
      // and lazyAssign will be called by the assign selector.
718
      //_resize_to_match(other);
718
      //_resize_to_match(other);
719
      // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
719
      // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
720
      // it wouldn't allow to copy a row-vector into a column-vector.
720
      // it wouldn't allow to copy a row-vector into a column-vector.
721
      internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op<Scalar>());
721
      internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
722
      return this->derived();
722
      return this->derived();
723
    }
723
    }
724
724
725
    template<typename T0, typename T1>
725
    template<typename T0, typename T1>
726
    EIGEN_DEVICE_FUNC
726
    EIGEN_DEVICE_FUNC
727
    EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
727
    EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
728
    {
728
    {
729
      EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
729
      EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
(-)a/Eigen/src/Core/Product.h (-1 / +2 lines)
Lines 13-33 Link Here
13
namespace Eigen {
13
namespace Eigen {
14
14
15
template<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl;
15
template<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl;
16
16
17
namespace internal {
17
namespace internal {
18
18
19
// Determine the scalar of Product<Lhs, Rhs>. This is normally the same as Lhs::Scalar times
19
// Determine the scalar of Product<Lhs, Rhs>. This is normally the same as Lhs::Scalar times
20
// Rhs::Scalar, but product with permutation matrices inherit the scalar of the other factor.
20
// Rhs::Scalar, but product with permutation matrices inherit the scalar of the other factor.
21
// TODO: this could be removed once ScalarBinaryOpTraits handles void.
21
template<typename Lhs, typename Rhs, typename LhsShape = typename evaluator_traits<Lhs>::Shape, 
22
template<typename Lhs, typename Rhs, typename LhsShape = typename evaluator_traits<Lhs>::Shape, 
22
         typename RhsShape = typename evaluator_traits<Rhs>::Shape >
23
         typename RhsShape = typename evaluator_traits<Rhs>::Shape >
23
struct product_result_scalar
24
struct product_result_scalar
24
{
25
{
25
  typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
26
  typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
26
};
27
};
27
28
28
template<typename Lhs, typename Rhs, typename RhsShape>
29
template<typename Lhs, typename Rhs, typename RhsShape>
29
struct product_result_scalar<Lhs, Rhs, PermutationShape, RhsShape>
30
struct product_result_scalar<Lhs, Rhs, PermutationShape, RhsShape>
30
{
31
{
31
  typedef typename Rhs::Scalar Scalar;
32
  typedef typename Rhs::Scalar Scalar;
32
};
33
};
33
34
(-)a/Eigen/src/Core/ProductEvaluators.h (-27 / +28 lines)
Lines 119-168 struct product_evaluator<Product<Lhs, Rh Link Here
119
  }
119
  }
120
  
120
  
121
protected:  
121
protected:  
122
  PlainObject m_result;
122
  PlainObject m_result;
123
};
123
};
124
124
125
// Dense = Product
125
// Dense = Product
126
template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>
126
template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>
127
struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scalar>, Dense2Dense,
127
struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,Options>::Scalar>, Dense2Dense,
128
  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type>
128
  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type>
129
{
129
{
130
  typedef Product<Lhs,Rhs,Options> SrcXprType;
130
  typedef Product<Lhs,Rhs,Options> SrcXprType;
131
  static EIGEN_STRONG_INLINE
131
  static EIGEN_STRONG_INLINE
132
  void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
132
  void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &)
133
  {
133
  {
134
    // FIXME shall we handle nested_eval here?
134
    // FIXME shall we handle nested_eval here?
135
    generic_product_impl<Lhs, Rhs>::evalTo(dst, src.lhs(), src.rhs());
135
    generic_product_impl<Lhs, Rhs>::evalTo(dst, src.lhs(), src.rhs());
136
  }
136
  }
137
};
137
};
138
138
139
// Dense += Product
139
// Dense += Product
140
template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>
140
template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>
141
struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<Scalar>, Dense2Dense,
141
struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,Options>::Scalar>, Dense2Dense,
142
  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type>
142
  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type>
143
{
143
{
144
  typedef Product<Lhs,Rhs,Options> SrcXprType;
144
  typedef Product<Lhs,Rhs,Options> SrcXprType;
145
  static EIGEN_STRONG_INLINE
145
  static EIGEN_STRONG_INLINE
146
  void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar> &)
146
  void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)
147
  {
147
  {
148
    // FIXME shall we handle nested_eval here?
148
    // FIXME shall we handle nested_eval here?
149
    generic_product_impl<Lhs, Rhs>::addTo(dst, src.lhs(), src.rhs());
149
    generic_product_impl<Lhs, Rhs>::addTo(dst, src.lhs(), src.rhs());
150
  }
150
  }
151
};
151
};
152
152
153
// Dense -= Product
153
// Dense -= Product
154
template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>
154
template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>
155
struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<Scalar>, Dense2Dense,
155
struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,Options>::Scalar>, Dense2Dense,
156
  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type>
156
  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type>
157
{
157
{
158
  typedef Product<Lhs,Rhs,Options> SrcXprType;
158
  typedef Product<Lhs,Rhs,Options> SrcXprType;
159
  static EIGEN_STRONG_INLINE
159
  static EIGEN_STRONG_INLINE
160
  void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar> &)
160
  void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)
161
  {
161
  {
162
    // FIXME shall we handle nested_eval here?
162
    // FIXME shall we handle nested_eval here?
163
    generic_product_impl<Lhs, Rhs>::subTo(dst, src.lhs(), src.rhs());
163
    generic_product_impl<Lhs, Rhs>::subTo(dst, src.lhs(), src.rhs());
164
  }
164
  }
165
};
165
};
166
166
167
167
168
// Dense ?= scalar * Product
168
// Dense ?= scalar * Product
Lines 182-228 struct Assignment<DstXprType, CwiseUnary Link Here
182
};
182
};
183
183
184
//----------------------------------------
184
//----------------------------------------
185
// Catch "Dense ?= xpr + Product<>" expression to save one temporary
185
// Catch "Dense ?= xpr + Product<>" expression to save one temporary
186
// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct
186
// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct
187
// TODO enable it for "Dense ?= xpr - Product<>" as well.
187
// TODO enable it for "Dense ?= xpr - Product<>" as well.
188
188
189
template<typename OtherXpr, typename Lhs, typename Rhs>
189
template<typename OtherXpr, typename Lhs, typename Rhs>
190
struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar>, const OtherXpr,
190
struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, const OtherXpr,
191
                                               const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > {
191
                                               const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > {
192
  static const bool value = true;
192
  static const bool value = true;
193
};
193
};
194
194
195
template<typename DstXprType, typename OtherXpr, typename ProductType, typename Scalar, typename Func1, typename Func2>
195
template<typename DstXprType, typename OtherXpr, typename ProductType, typename Func1, typename Func2>
196
struct assignment_from_xpr_plus_product
196
struct assignment_from_xpr_plus_product
197
{
197
{
198
  typedef CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr, const ProductType> SrcXprType;
198
  typedef CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar,typename ProductType::Scalar>, const OtherXpr, const ProductType> SrcXprType;
199
  template<typename InitialFunc>
199
  static EIGEN_STRONG_INLINE
200
  static EIGEN_STRONG_INLINE
200
  void run(DstXprType &dst, const SrcXprType &src, const Func1& func)
201
  void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
201
  {
202
  {
202
    call_assignment_no_alias(dst, src.lhs(), func);
203
    call_assignment_no_alias(dst, src.lhs(), Func1());
203
    call_assignment_no_alias(dst, src.rhs(), Func2());
204
    call_assignment_no_alias(dst, src.rhs(), Func2());
204
  }
205
  }
205
};
206
};
206
207
207
template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename Scalar>
208
template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar>
208
struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr,
209
struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<OtherScalar,ProdScalar>, const OtherXpr,
209
                                           const Product<Lhs,Rhs,DefaultProduct> >, internal::assign_op<Scalar>, Dense2Dense>
210
                                           const Product<Lhs,Rhs,DefaultProduct> >, internal::assign_op<DstScalar,SrcScalar>, Dense2Dense>
210
  : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, Scalar, internal::assign_op<Scalar>, internal::add_assign_op<Scalar> >
211
  : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<DstScalar,OtherScalar>, internal::add_assign_op<DstScalar,ProdScalar> >
211
{};
212
{};
212
template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename Scalar>
213
template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar>
213
struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr,
214
struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<OtherScalar,ProdScalar>, const OtherXpr,
214
                                           const Product<Lhs,Rhs,DefaultProduct> >, internal::add_assign_op<Scalar>, Dense2Dense>
215
                                           const Product<Lhs,Rhs,DefaultProduct> >, internal::add_assign_op<DstScalar,SrcScalar>, Dense2Dense>
215
  : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, Scalar, internal::add_assign_op<Scalar>, internal::add_assign_op<Scalar> >
216
  : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<DstScalar,OtherScalar>, internal::add_assign_op<DstScalar,ProdScalar> >
216
{};
217
{};
217
template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename Scalar>
218
template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar>
218
struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr,
219
struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<OtherScalar,ProdScalar>, const OtherXpr,
219
                                           const Product<Lhs,Rhs,DefaultProduct> >, internal::sub_assign_op<Scalar>, Dense2Dense>
220
                                           const Product<Lhs,Rhs,DefaultProduct> >, internal::sub_assign_op<DstScalar,SrcScalar>, Dense2Dense>
220
  : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, Scalar, internal::sub_assign_op<Scalar>, internal::sub_assign_op<Scalar> >
221
  : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<DstScalar,OtherScalar>, internal::sub_assign_op<DstScalar,ProdScalar> >
221
{};
222
{};
222
//----------------------------------------
223
//----------------------------------------
223
224
224
template<typename Lhs, typename Rhs>
225
template<typename Lhs, typename Rhs>
225
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
226
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
226
{
227
{
227
  template<typename Dst>
228
  template<typename Dst>
228
  static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
229
  static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
Lines 364-394 struct generic_product_impl<Lhs,Rhs,Dens Link Here
364
{
365
{
365
  typedef typename Product<Lhs,Rhs>::Scalar Scalar;
366
  typedef typename Product<Lhs,Rhs>::Scalar Scalar;
366
  
367
  
367
  template<typename Dst>
368
  template<typename Dst>
368
  static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
369
  static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
369
  {
370
  {
370
    // Same as: dst.noalias() = lhs.lazyProduct(rhs);
371
    // Same as: dst.noalias() = lhs.lazyProduct(rhs);
371
    // but easier on the compiler side
372
    // but easier on the compiler side
372
    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<Scalar>());
373
    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<typename Dst::Scalar,Scalar>());
373
  }
374
  }
374
  
375
  
375
  template<typename Dst>
376
  template<typename Dst>
376
  static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
377
  static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
377
  {
378
  {
378
    // dst.noalias() += lhs.lazyProduct(rhs);
379
    // dst.noalias() += lhs.lazyProduct(rhs);
379
    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<Scalar>());
380
    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>());
380
  }
381
  }
381
  
382
  
382
  template<typename Dst>
383
  template<typename Dst>
383
  static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
384
  static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
384
  {
385
  {
385
    // dst.noalias() -= lhs.lazyProduct(rhs);
386
    // dst.noalias() -= lhs.lazyProduct(rhs);
386
    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<Scalar>());
387
    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>());
387
  }
388
  }
388
  
389
  
389
//   template<typename Dst>
390
//   template<typename Dst>
390
//   static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
391
//   static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
391
//   { dst.noalias() += alpha * lhs.lazyProduct(rhs); }
392
//   { dst.noalias() += alpha * lhs.lazyProduct(rhs); }
392
};
393
};
393
394
394
// This specialization enforces the use of a coefficient-based evaluation strategy
395
// This specialization enforces the use of a coefficient-based evaluation strategy
Lines 730-746 struct generic_product_impl<Lhs,Rhs,Dens Link Here
730
/***************************************************************************
731
/***************************************************************************
731
* Diagonal products
732
* Diagonal products
732
***************************************************************************/
733
***************************************************************************/
733
  
734
  
734
template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder>
735
template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder>
735
struct diagonal_product_evaluator_base
736
struct diagonal_product_evaluator_base
736
  : evaluator_base<Derived>
737
  : evaluator_base<Derived>
737
{
738
{
738
   typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
739
   typedef typename ScalarBinaryOpTraits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
739
public:
740
public:
740
  enum {
741
  enum {
741
    CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost,
742
    CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost,
742
    
743
    
743
    MatrixFlags = evaluator<MatrixType>::Flags,
744
    MatrixFlags = evaluator<MatrixType>::Flags,
744
    DiagFlags = evaluator<DiagonalType>::Flags,
745
    DiagFlags = evaluator<DiagonalType>::Flags,
745
    _StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor,
746
    _StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor,
746
    _ScalarAccessOnDiag =  !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
747
    _ScalarAccessOnDiag =  !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
(-)a/Eigen/src/Core/Redux.h (-2 / +2 lines)
Lines 445-476 DenseBase<Derived>::maxCoeff() const Link Here
445
  * \sa trace(), prod(), mean()
445
  * \sa trace(), prod(), mean()
446
  */
446
  */
447
template<typename Derived>
447
template<typename Derived>
448
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
448
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
449
DenseBase<Derived>::sum() const
449
DenseBase<Derived>::sum() const
450
{
450
{
451
  if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
451
  if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
452
    return Scalar(0);
452
    return Scalar(0);
453
  return derived().redux(Eigen::internal::scalar_sum_op<Scalar>());
453
  return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>());
454
}
454
}
455
455
456
/** \returns the mean of all coefficients of *this
456
/** \returns the mean of all coefficients of *this
457
*
457
*
458
* \sa trace(), prod(), sum()
458
* \sa trace(), prod(), sum()
459
*/
459
*/
460
template<typename Derived>
460
template<typename Derived>
461
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
461
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
462
DenseBase<Derived>::mean() const
462
DenseBase<Derived>::mean() const
463
{
463
{
464
#ifdef __INTEL_COMPILER
464
#ifdef __INTEL_COMPILER
465
  #pragma warning push
465
  #pragma warning push
466
  #pragma warning ( disable : 2259 )
466
  #pragma warning ( disable : 2259 )
467
#endif
467
#endif
468
  return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar>())) / Scalar(this->size());
468
  return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->size());
469
#ifdef __INTEL_COMPILER
469
#ifdef __INTEL_COMPILER
470
  #pragma warning pop
470
  #pragma warning pop
471
#endif
471
#endif
472
}
472
}
473
473
474
/** \returns the product of all coefficients of *this
474
/** \returns the product of all coefficients of *this
475
  *
475
  *
476
  * Example: \include MatrixBase_prod.cpp
476
  * Example: \include MatrixBase_prod.cpp
(-)a/Eigen/src/Core/Ref.h (-1 / +1 lines)
Lines 257-273 template<typename TPlainObjectType, int Link Here
257
    EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type)
257
    EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type)
258
    {
258
    {
259
      Base::construct(expr);
259
      Base::construct(expr);
260
    }
260
    }
261
261
262
    template<typename Expression>
262
    template<typename Expression>
263
    EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type)
263
    EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type)
264
    {
264
    {
265
      internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar>());
265
      internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar,Scalar>());
266
      Base::construct(m_object);
266
      Base::construct(m_object);
267
    }
267
    }
268
268
269
  protected:
269
  protected:
270
    TPlainObjectType m_object;
270
    TPlainObjectType m_object;
271
};
271
};
272
272
273
} // end namespace Eigen
273
} // end namespace Eigen
(-)a/Eigen/src/Core/SelfCwiseBinaryOp.h (-4 / +6 lines)
Lines 7-49 Link Here
7
// Public License v. 2.0. If a copy of the MPL was not distributed
7
// Public License v. 2.0. If a copy of the MPL was not distributed
8
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
8
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
9
10
#ifndef EIGEN_SELFCWISEBINARYOP_H
10
#ifndef EIGEN_SELFCWISEBINARYOP_H
11
#define EIGEN_SELFCWISEBINARYOP_H
11
#define EIGEN_SELFCWISEBINARYOP_H
12
12
13
namespace Eigen { 
13
namespace Eigen { 
14
14
15
// TODO generalize the scalar type of 'other'
16
15
template<typename Derived>
17
template<typename Derived>
16
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
18
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
17
{
19
{
18
  typedef typename Derived::PlainObject PlainObject;
20
  typedef typename Derived::PlainObject PlainObject;
19
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar>());
21
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>());
20
  return derived();
22
  return derived();
21
}
23
}
22
24
23
template<typename Derived>
25
template<typename Derived>
24
EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
26
EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
25
{
27
{
26
  typedef typename Derived::PlainObject PlainObject;
28
  typedef typename Derived::PlainObject PlainObject;
27
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar>());
29
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>());
28
  return derived();
30
  return derived();
29
}
31
}
30
32
31
template<typename Derived>
33
template<typename Derived>
32
EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
34
EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
33
{
35
{
34
  typedef typename Derived::PlainObject PlainObject;
36
  typedef typename Derived::PlainObject PlainObject;
35
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar>());
37
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>());
36
  return derived();
38
  return derived();
37
}
39
}
38
40
39
template<typename Derived>
41
template<typename Derived>
40
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
42
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
41
{
43
{
42
  typedef typename Derived::PlainObject PlainObject;
44
  typedef typename Derived::PlainObject PlainObject;
43
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar>());
45
  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>());
44
  return derived();
46
  return derived();
45
}
47
}
46
48
47
} // end namespace Eigen
49
} // end namespace Eigen
48
50
49
#endif // EIGEN_SELFCWISEBINARYOP_H
51
#endif // EIGEN_SELFCWISEBINARYOP_H
(-)a/Eigen/src/Core/Solve.h (-6 / +7 lines)
Lines 129-171 struct evaluator<Solve<Decomposition,Rhs Link Here
129
  
129
  
130
protected:  
130
protected:  
131
  PlainObject m_result;
131
  PlainObject m_result;
132
};
132
};
133
133
134
// Specialization for "dst = dec.solve(rhs)"
134
// Specialization for "dst = dec.solve(rhs)"
135
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
135
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
136
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
136
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
137
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
137
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
138
{
138
{
139
  typedef Solve<DecType,RhsType> SrcXprType;
139
  typedef Solve<DecType,RhsType> SrcXprType;
140
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
140
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
141
  {
141
  {
142
    // FIXME shall we resize dst here?
142
    // FIXME shall we resize dst here?
143
    src.dec()._solve_impl(src.rhs(), dst);
143
    src.dec()._solve_impl(src.rhs(), dst);
144
  }
144
  }
145
};
145
};
146
146
147
// Specialization for "dst = dec.transpose().solve(rhs)"
147
// Specialization for "dst = dec.transpose().solve(rhs)"
148
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
148
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
149
struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
149
struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
150
{
150
{
151
  typedef Solve<Transpose<const DecType>,RhsType> SrcXprType;
151
  typedef Solve<Transpose<const DecType>,RhsType> SrcXprType;
152
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
152
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
153
  {
153
  {
154
    src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst);
154
    src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst);
155
  }
155
  }
156
};
156
};
157
157
158
// Specialization for "dst = dec.adjoint().solve(rhs)"
158
// Specialization for "dst = dec.adjoint().solve(rhs)"
159
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
159
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
160
struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
160
struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>,
161
                  internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
161
{
162
{
162
  typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType;
163
  typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType;
163
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
164
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
164
  {
165
  {
165
    src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
166
    src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
166
  }
167
  }
167
};
168
};
168
169
169
} // end namepsace internal
170
} // end namepsace internal
170
171
171
} // end namespace Eigen
172
} // end namespace Eigen
(-)a/Eigen/src/Core/TriangularMatrix.h (-10 / +10 lines)
Lines 362-385 template<typename _MatrixType, unsigned Link Here
362
      * \sa DenseCoeffsBase::innerStride() */
362
      * \sa DenseCoeffsBase::innerStride() */
363
    EIGEN_DEVICE_FUNC
363
    EIGEN_DEVICE_FUNC
364
    inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
364
    inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
365
365
366
    /** \sa MatrixBase::operator+=() */
366
    /** \sa MatrixBase::operator+=() */
367
    template<typename Other>
367
    template<typename Other>
368
    EIGEN_DEVICE_FUNC
368
    EIGEN_DEVICE_FUNC
369
    TriangularViewType&  operator+=(const DenseBase<Other>& other) {
369
    TriangularViewType&  operator+=(const DenseBase<Other>& other) {
370
      internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar>());
370
      internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename Other::Scalar>());
371
      return derived();
371
      return derived();
372
    }
372
    }
373
    /** \sa MatrixBase::operator-=() */
373
    /** \sa MatrixBase::operator-=() */
374
    template<typename Other>
374
    template<typename Other>
375
    EIGEN_DEVICE_FUNC
375
    EIGEN_DEVICE_FUNC
376
    TriangularViewType&  operator-=(const DenseBase<Other>& other) {
376
    TriangularViewType&  operator-=(const DenseBase<Other>& other) {
377
      internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar>());
377
      internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>());
378
      return derived();
378
      return derived();
379
    }
379
    }
380
    
380
    
381
    /** \sa MatrixBase::operator*=() */
381
    /** \sa MatrixBase::operator*=() */
382
    EIGEN_DEVICE_FUNC
382
    EIGEN_DEVICE_FUNC
383
    TriangularViewType&  operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }
383
    TriangularViewType&  operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }
384
    /** \sa DenseBase::operator/=() */
384
    /** \sa DenseBase::operator/=() */
385
    EIGEN_DEVICE_FUNC
385
    EIGEN_DEVICE_FUNC
Lines 547-563 template<typename _MatrixType, unsigned Link Here
547
***************************************************************************/
547
***************************************************************************/
548
548
549
// FIXME should we keep that possibility
549
// FIXME should we keep that possibility
550
template<typename MatrixType, unsigned int Mode>
550
template<typename MatrixType, unsigned int Mode>
551
template<typename OtherDerived>
551
template<typename OtherDerived>
552
inline TriangularView<MatrixType, Mode>&
552
inline TriangularView<MatrixType, Mode>&
553
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)
553
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)
554
{
554
{
555
  internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar>());
555
  internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
556
  return derived();
556
  return derived();
557
}
557
}
558
558
559
// FIXME should we keep that possibility
559
// FIXME should we keep that possibility
560
template<typename MatrixType, unsigned int Mode>
560
template<typename MatrixType, unsigned int Mode>
561
template<typename OtherDerived>
561
template<typename OtherDerived>
562
void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
562
void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
563
{
563
{
Lines 799-815 void call_triangular_assignment_loop(con Link Here
799
  
799
  
800
  triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
800
  triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
801
}
801
}
802
802
803
template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType>
803
template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType>
804
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
804
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
805
void call_triangular_assignment_loop(const DstXprType& dst, const SrcXprType& src)
805
void call_triangular_assignment_loop(const DstXprType& dst, const SrcXprType& src)
806
{
806
{
807
  call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar>());
807
  call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
808
}
808
}
809
809
810
template<> struct AssignmentKind<TriangularShape,TriangularShape> { typedef Triangular2Triangular Kind; };
810
template<> struct AssignmentKind<TriangularShape,TriangularShape> { typedef Triangular2Triangular Kind; };
811
template<> struct AssignmentKind<DenseShape,TriangularShape>      { typedef Triangular2Dense      Kind; };
811
template<> struct AssignmentKind<DenseShape,TriangularShape>      { typedef Triangular2Dense      Kind; };
812
template<> struct AssignmentKind<TriangularShape,DenseShape>      { typedef Dense2Triangular      Kind; };
812
template<> struct AssignmentKind<TriangularShape,DenseShape>      { typedef Dense2Triangular      Kind; };
813
813
814
814
815
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
815
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
Lines 928-970 void TriangularBase<Derived>::evalToLazy Link Here
928
  other.derived().resize(this->rows(), this->cols());
928
  other.derived().resize(this->rows(), this->cols());
929
  internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
929
  internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
930
}
930
}
931
931
932
namespace internal {
932
namespace internal {
933
  
933
  
934
// Triangular = Product
934
// Triangular = Product
935
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
935
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
936
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar>, Dense2Triangular, Scalar>
936
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular, Scalar>
937
{
937
{
938
  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
938
  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
939
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
939
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &)
940
  {
940
  {
941
    dst.setZero();
941
    dst.setZero();
942
    dst._assignProduct(src, 1);
942
    dst._assignProduct(src, 1);
943
  }
943
  }
944
};
944
};
945
945
946
// Triangular += Product
946
// Triangular += Product
947
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
947
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
948
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar>, Dense2Triangular, Scalar>
948
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular, Scalar>
949
{
949
{
950
  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
950
  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
951
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar> &)
951
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)
952
  {
952
  {
953
    dst._assignProduct(src, 1);
953
    dst._assignProduct(src, 1);
954
  }
954
  }
955
};
955
};
956
956
957
// Triangular -= Product
957
// Triangular -= Product
958
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
958
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
959
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar>, Dense2Triangular, Scalar>
959
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular, Scalar>
960
{
960
{
961
  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
961
  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
962
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar> &)
962
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)
963
  {
963
  {
964
    dst._assignProduct(src, -1);
964
    dst._assignProduct(src, -1);
965
  }
965
  }
966
};
966
};
967
967
968
} // end namespace internal
968
} // end namespace internal
969
969
970
} // end namespace Eigen
970
} // end namespace Eigen
(-)a/Eigen/src/Core/VectorwiseOp.h (-2 / +2 lines)
Lines 535-564 template<typename ExpressionType, int Di Link Here
535
      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
535
      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
536
      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
536
      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
537
      m_matrix /= extendedTo(other.derived());
537
      m_matrix /= extendedTo(other.derived());
538
      return const_cast<ExpressionType&>(m_matrix);
538
      return const_cast<ExpressionType&>(m_matrix);
539
    }
539
    }
540
540
541
    /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
541
    /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
542
    template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
542
    template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
543
    CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
543
    CwiseBinaryOp<internal::scalar_sum_op<Scalar,typename OtherDerived::Scalar>,
544
                  const ExpressionTypeNestedCleaned,
544
                  const ExpressionTypeNestedCleaned,
545
                  const typename ExtendedType<OtherDerived>::Type>
545
                  const typename ExtendedType<OtherDerived>::Type>
546
    operator+(const DenseBase<OtherDerived>& other) const
546
    operator+(const DenseBase<OtherDerived>& other) const
547
    {
547
    {
548
      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
548
      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
549
      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
549
      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
550
      return m_matrix + extendedTo(other.derived());
550
      return m_matrix + extendedTo(other.derived());
551
    }
551
    }
552
552
553
    /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */
553
    /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */
554
    template<typename OtherDerived>
554
    template<typename OtherDerived>
555
    EIGEN_DEVICE_FUNC
555
    EIGEN_DEVICE_FUNC
556
    CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
556
    CwiseBinaryOp<internal::scalar_difference_op<Scalar,typename OtherDerived::Scalar>,
557
                  const ExpressionTypeNestedCleaned,
557
                  const ExpressionTypeNestedCleaned,
558
                  const typename ExtendedType<OtherDerived>::Type>
558
                  const typename ExtendedType<OtherDerived>::Type>
559
    operator-(const DenseBase<OtherDerived>& other) const
559
    operator-(const DenseBase<OtherDerived>& other) const
560
    {
560
    {
561
      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
561
      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
562
      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
562
      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
563
      return m_matrix - extendedTo(other.derived());
563
      return m_matrix - extendedTo(other.derived());
564
    }
564
    }
(-)a/Eigen/src/Core/functors/AssignmentFunctors.h (-26 / +24 lines)
Lines 13-84 Link Here
13
namespace Eigen {
13
namespace Eigen {
14
14
15
namespace internal {
15
namespace internal {
16
  
16
  
17
/** \internal
17
/** \internal
18
  * \brief Template functor for scalar/packet assignment
18
  * \brief Template functor for scalar/packet assignment
19
  *
19
  *
20
  */
20
  */
21
template<typename Scalar> struct assign_op {
21
template<typename DstScalar,typename SrcScalar> struct assign_op {
22
22
23
  EIGEN_EMPTY_STRUCT_CTOR(assign_op)
23
  EIGEN_EMPTY_STRUCT_CTOR(assign_op)
24
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a = b; }
24
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a = b; }
25
  
25
  
26
  template<int Alignment, typename Packet>
26
  template<int Alignment, typename Packet>
27
  EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const
27
  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const
28
  { internal::pstoret<Scalar,Packet,Alignment>(a,b); }
28
  { internal::pstoret<DstScalar,Packet,Alignment>(a,b); }
29
};
29
};
30
template<typename Scalar>
30
template<typename DstScalar,typename SrcScalar>
31
struct functor_traits<assign_op<Scalar> > {
31
struct functor_traits<assign_op<DstScalar,SrcScalar> > {
32
  enum {
32
  enum {
33
    Cost = NumTraits<Scalar>::ReadCost,
33
    Cost = NumTraits<DstScalar>::ReadCost,
34
    PacketAccess = packet_traits<Scalar>::Vectorizable
34
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::Vectorizable && packet_traits<SrcScalar>::Vectorizable
35
  };
35
  };
36
};
36
};
37
37
38
/** \internal
38
/** \internal
39
  * \brief Template functor for scalar/packet assignment with addition
39
  * \brief Template functor for scalar/packet assignment with addition
40
  *
40
  *
41
  */
41
  */
42
template<typename Scalar> struct add_assign_op {
42
template<typename DstScalar,typename SrcScalar> struct add_assign_op {
43
43
44
  EIGEN_EMPTY_STRUCT_CTOR(add_assign_op)
44
  EIGEN_EMPTY_STRUCT_CTOR(add_assign_op)
45
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a += b; }
45
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a += b; }
46
  
46
  
47
  template<int Alignment, typename Packet>
47
  template<int Alignment, typename Packet>
48
  EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const
48
  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const
49
  { internal::pstoret<Scalar,Packet,Alignment>(a,internal::padd(internal::ploadt<Packet,Alignment>(a),b)); }
49
  { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::padd(internal::ploadt<Packet,Alignment>(a),b)); }
50
};
50
};
51
template<typename Scalar>
51
template<typename DstScalar,typename SrcScalar>
52
struct functor_traits<add_assign_op<Scalar> > {
52
struct functor_traits<add_assign_op<DstScalar,SrcScalar> > {
53
  enum {
53
  enum {
54
    Cost = NumTraits<Scalar>::ReadCost + NumTraits<Scalar>::AddCost,
54
    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::AddCost,
55
    PacketAccess = packet_traits<Scalar>::HasAdd
55
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasAdd
56
  };
56
  };
57
};
57
};
58
58
59
/** \internal
59
/** \internal
60
  * \brief Template functor for scalar/packet assignment with subtraction
60
  * \brief Template functor for scalar/packet assignment with subtraction
61
  *
61
  *
62
  */
62
  */
63
template<typename Scalar> struct sub_assign_op {
63
template<typename DstScalar,typename SrcScalar> struct sub_assign_op {
64
64
65
  EIGEN_EMPTY_STRUCT_CTOR(sub_assign_op)
65
  EIGEN_EMPTY_STRUCT_CTOR(sub_assign_op)
66
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a -= b; }
66
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a -= b; }
67
  
67
  
68
  template<int Alignment, typename Packet>
68
  template<int Alignment, typename Packet>
69
  EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const
69
  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const
70
  { internal::pstoret<Scalar,Packet,Alignment>(a,internal::psub(internal::ploadt<Packet,Alignment>(a),b)); }
70
  { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::psub(internal::ploadt<Packet,Alignment>(a),b)); }
71
};
71
};
72
template<typename Scalar>
72
template<typename DstScalar,typename SrcScalar>
73
struct functor_traits<sub_assign_op<Scalar> > {
73
struct functor_traits<sub_assign_op<DstScalar,SrcScalar> > {
74
  enum {
74
  enum {
75
    Cost = NumTraits<Scalar>::ReadCost + NumTraits<Scalar>::AddCost,
75
    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::AddCost,
76
    PacketAccess = packet_traits<Scalar>::HasSub
76
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasSub
77
  };
77
  };
78
};
78
};
79
79
80
/** \internal
80
/** \internal
81
  * \brief Template functor for scalar/packet assignment with multiplication
81
  * \brief Template functor for scalar/packet assignment with multiplication
82
  *
82
  *
83
  */
83
  */
84
template<typename DstScalar, typename SrcScalar=DstScalar>
84
template<typename DstScalar, typename SrcScalar=DstScalar>
Lines 93-109 struct mul_assign_op { Link Here
93
};
93
};
94
template<typename DstScalar, typename SrcScalar>
94
template<typename DstScalar, typename SrcScalar>
95
struct functor_traits<mul_assign_op<DstScalar,SrcScalar> > {
95
struct functor_traits<mul_assign_op<DstScalar,SrcScalar> > {
96
  enum {
96
  enum {
97
    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,
97
    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,
98
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasMul
98
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasMul
99
  };
99
  };
100
};
100
};
101
template<typename DstScalar,typename SrcScalar> struct functor_is_product_like<mul_assign_op<DstScalar,SrcScalar> > { enum { ret = 1 }; };
102
101
103
/** \internal
102
/** \internal
104
  * \brief Template functor for scalar/packet assignment with diviving
103
  * \brief Template functor for scalar/packet assignment with diviving
105
  *
104
  *
106
  */
105
  */
107
template<typename DstScalar, typename SrcScalar=DstScalar> struct div_assign_op {
106
template<typename DstScalar, typename SrcScalar=DstScalar> struct div_assign_op {
108
107
109
  EIGEN_EMPTY_STRUCT_CTOR(div_assign_op)
108
  EIGEN_EMPTY_STRUCT_CTOR(div_assign_op)
Lines 115-131 template<typename DstScalar, typename Sr Link Here
115
};
114
};
116
template<typename DstScalar, typename SrcScalar>
115
template<typename DstScalar, typename SrcScalar>
117
struct functor_traits<div_assign_op<DstScalar,SrcScalar> > {
116
struct functor_traits<div_assign_op<DstScalar,SrcScalar> > {
118
  enum {
117
  enum {
119
    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,
118
    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,
120
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasDiv
119
    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasDiv
121
  };
120
  };
122
};
121
};
123
template<typename DstScalar,typename SrcScalar> struct functor_is_product_like<div_assign_op<DstScalar,SrcScalar> > { enum { ret = 1 }; };
124
122
125
/** \internal
123
/** \internal
126
  * \brief Template functor for scalar/packet assignment with swapping
124
  * \brief Template functor for scalar/packet assignment with swapping
127
  *
125
  *
128
  * It works as follow. For a non-vectorized evaluation loop, we have:
126
  * It works as follow. For a non-vectorized evaluation loop, we have:
129
  *   for(i) func(A.coeffRef(i), B.coeff(i));
127
  *   for(i) func(A.coeffRef(i), B.coeff(i));
130
  * where B is a SwapWrapper expression. The trick is to make SwapWrapper::coeff behaves like a non-const coeffRef.
128
  * where B is a SwapWrapper expression. The trick is to make SwapWrapper::coeff behaves like a non-const coeffRef.
131
  * Actually, SwapWrapper might not even be needed since even if B is a plain expression, since it has to be writable
129
  * Actually, SwapWrapper might not even be needed since even if B is a plain expression, since it has to be writable
(-)a/Eigen/src/Core/functors/BinaryFunctors.h (-39 / +25 lines)
Lines 16-101 namespace internal { Link Here
16
16
17
//---------- associative binary functors ----------
17
//---------- associative binary functors ----------
18
18
19
/** \internal
19
/** \internal
20
  * \brief Template functor to compute the sum of two scalars
20
  * \brief Template functor to compute the sum of two scalars
21
  *
21
  *
22
  * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, DenseBase::sum()
22
  * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, DenseBase::sum()
23
  */
23
  */
24
template<typename Scalar> struct scalar_sum_op {
24
template<typename LhsScalar,typename RhsScalar> struct scalar_sum_op {
25
//   typedef Scalar result_type;
25
  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_sum_op>::ReturnType result_type;
26
  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
26
  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
27
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
27
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; }
28
  template<typename Packet>
28
  template<typename Packet>
29
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
29
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
30
  { return internal::padd(a,b); }
30
  { return internal::padd(a,b); }
31
  template<typename Packet>
31
  template<typename Packet>
32
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
32
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
33
  { return internal::predux(a); }
33
  { return internal::predux(a); }
34
};
34
};
35
template<typename Scalar>
35
template<typename LhsScalar,typename RhsScalar>
36
struct functor_traits<scalar_sum_op<Scalar> > {
36
struct functor_traits<scalar_sum_op<LhsScalar,RhsScalar> > {
37
  enum {
37
  enum {
38
    Cost = NumTraits<Scalar>::AddCost,
38
    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2, // rough estimate!
39
    PacketAccess = packet_traits<Scalar>::HasAdd
39
    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasAdd && packet_traits<RhsScalar>::HasAdd
40
    // TODO vectorize mixed sum
40
  };
41
  };
41
};
42
};
42
43
43
/** \internal
44
/** \internal
44
  * \brief Template specialization to deprecate the summation of boolean expressions.
45
  * \brief Template specialization to deprecate the summation of boolean expressions.
45
  * This is required to solve Bug 426.
46
  * This is required to solve Bug 426.
46
  * \sa DenseBase::count(), DenseBase::any(), ArrayBase::cast(), MatrixBase::cast()
47
  * \sa DenseBase::count(), DenseBase::any(), ArrayBase::cast(), MatrixBase::cast()
47
  */
48
  */
48
template<> struct scalar_sum_op<bool> : scalar_sum_op<int> {
49
template<> struct scalar_sum_op<bool,bool> : scalar_sum_op<int,int> {
49
  EIGEN_DEPRECATED
50
  EIGEN_DEPRECATED
50
  scalar_sum_op() {}
51
  scalar_sum_op() {}
51
};
52
};
52
53
53
54
54
/** \internal
55
/** \internal
55
  * \brief Template functor to compute the product of two scalars
56
  * \brief Template functor to compute the product of two scalars
56
  *
57
  *
57
  * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()
58
  * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()
58
  */
59
  */
59
template<typename LhsScalar,typename RhsScalar> struct scalar_product_op {
60
template<typename LhsScalar,typename RhsScalar> struct scalar_product_op {
60
  enum {
61
  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_product_op>::ReturnType result_type;
61
    // TODO vectorize mixed product
62
    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
63
  };
64
  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
65
  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
62
  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
66
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
63
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
67
  template<typename Packet>
64
  template<typename Packet>
68
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
65
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
69
  { return internal::pmul(a,b); }
66
  { return internal::pmul(a,b); }
70
  template<typename Packet>
67
  template<typename Packet>
71
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
68
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
72
  { return internal::predux_mul(a); }
69
  { return internal::predux_mul(a); }
73
};
70
};
74
template<typename LhsScalar,typename RhsScalar>
71
template<typename LhsScalar,typename RhsScalar>
75
struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
72
struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
76
  enum {
73
  enum {
77
    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
74
    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
78
    PacketAccess = scalar_product_op<LhsScalar,RhsScalar>::Vectorizable
75
    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
76
    // TODO vectorize mixed product
79
  };
77
  };
80
};
78
};
81
79
82
/** \internal
80
/** \internal
83
  * \brief Template functor to compute the conjugate product of two scalars
81
  * \brief Template functor to compute the conjugate product of two scalars
84
  *
82
  *
85
  * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)
83
  * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)
86
  */
84
  */
87
template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op {
85
template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op {
88
86
89
  enum {
87
  enum {
90
    Conj = NumTraits<LhsScalar>::IsComplex
88
    Conj = NumTraits<LhsScalar>::IsComplex
91
  };
89
  };
92
  
90
  
93
  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
91
  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_conj_product_op>::ReturnType result_type;
94
  
92
  
95
  EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
93
  EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
96
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
94
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
97
  { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
95
  { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
98
  
96
  
99
  template<typename Packet>
97
  template<typename Packet>
100
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
98
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
101
  { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
99
  { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
Lines 264-316 struct functor_traits<scalar_binary_pow_ Link Here
264
262
265
//---------- non associative binary functors ----------
263
//---------- non associative binary functors ----------
266
264
267
/** \internal
265
/** \internal
268
  * \brief Template functor to compute the difference of two scalars
266
  * \brief Template functor to compute the difference of two scalars
269
  *
267
  *
270
  * \sa class CwiseBinaryOp, MatrixBase::operator-
268
  * \sa class CwiseBinaryOp, MatrixBase::operator-
271
  */
269
  */
272
template<typename Scalar> struct scalar_difference_op {
270
template<typename LhsScalar,typename RhsScalar> struct scalar_difference_op {
271
  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_difference_op>::ReturnType result_type;
273
  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
272
  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
274
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
273
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a - b; }
275
  template<typename Packet>
274
  template<typename Packet>
276
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
275
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
277
  { return internal::psub(a,b); }
276
  { return internal::psub(a,b); }
278
};
277
};
279
template<typename Scalar>
278
template<typename LhsScalar,typename RhsScalar>
280
struct functor_traits<scalar_difference_op<Scalar> > {
279
struct functor_traits<scalar_difference_op<LhsScalar,RhsScalar> > {
281
  enum {
280
  enum {
282
    Cost = NumTraits<Scalar>::AddCost,
281
    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,
283
    PacketAccess = packet_traits<Scalar>::HasSub
282
    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasSub && packet_traits<RhsScalar>::HasSub
284
  };
283
  };
285
};
284
};
286
285
287
/** \internal
286
/** \internal
288
  * \brief Template functor to compute the quotient of two scalars
287
  * \brief Template functor to compute the quotient of two scalars
289
  *
288
  *
290
  * \sa class CwiseBinaryOp, Cwise::operator/()
289
  * \sa class CwiseBinaryOp, Cwise::operator/()
291
  */
290
  */
292
template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op {
291
template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op {
293
  enum {
292
  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_quotient_op>::ReturnType result_type;
294
    // TODO vectorize mixed product
295
    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv
296
  };
297
  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
298
  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
293
  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
299
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }
294
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }
300
  template<typename Packet>
295
  template<typename Packet>
301
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
296
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
302
  { return internal::pdiv(a,b); }
297
  { return internal::pdiv(a,b); }
303
};
298
};
304
template<typename LhsScalar,typename RhsScalar>
299
template<typename LhsScalar,typename RhsScalar>
305
struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
300
struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
306
  typedef typename scalar_quotient_op<LhsScalar,RhsScalar>::result_type result_type;
301
  typedef typename scalar_quotient_op<LhsScalar,RhsScalar>::result_type result_type;
307
  enum {
302
  enum {
308
    PacketAccess = scalar_quotient_op<LhsScalar,RhsScalar>::Vectorizable,
303
    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv,
309
    Cost = NumTraits<result_type>::template Div<PacketAccess>::Cost
304
    Cost = NumTraits<result_type>::template Div<PacketAccess>::Cost
310
  };
305
  };
311
};
306
};
312
307
313
308
314
309
315
/** \internal
310
/** \internal
316
  * \brief Template functor to compute the and of two booleans
311
  * \brief Template functor to compute the and of two booleans
Lines 441-457 struct scalar_multiple_op { Link Here
441
  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
436
  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
442
};
437
};
443
template<typename Scalar>
438
template<typename Scalar>
444
struct functor_traits<scalar_multiple_op<Scalar> >
439
struct functor_traits<scalar_multiple_op<Scalar> >
445
{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
440
{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
446
441
447
template<typename Scalar1, typename Scalar2>
442
template<typename Scalar1, typename Scalar2>
448
struct scalar_multiple2_op {
443
struct scalar_multiple2_op {
449
  typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type;
444
  typedef typename ScalarBinaryOpTraits<Scalar1,Scalar2>::ReturnType result_type;
450
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { }
445
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { }
451
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { }
446
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { }
452
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; }
447
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; }
453
  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
448
  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
454
};
449
};
455
template<typename Scalar1,typename Scalar2>
450
template<typename Scalar1,typename Scalar2>
456
struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
451
struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
457
{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
452
{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
Lines 476-510 struct scalar_quotient1_op { Link Here
476
  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
471
  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
477
};
472
};
478
template<typename Scalar>
473
template<typename Scalar>
479
struct functor_traits<scalar_quotient1_op<Scalar> >
474
struct functor_traits<scalar_quotient1_op<Scalar> >
480
{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
475
{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
481
476
482
template<typename Scalar1, typename Scalar2>
477
template<typename Scalar1, typename Scalar2>
483
struct scalar_quotient2_op {
478
struct scalar_quotient2_op {
484
  typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type;
479
  typedef typename ScalarBinaryOpTraits<Scalar1,Scalar2>::ReturnType result_type;
485
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_quotient2_op(const scalar_quotient2_op& other) : m_other(other.m_other) { }
480
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_quotient2_op(const scalar_quotient2_op& other) : m_other(other.m_other) { }
486
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_quotient2_op(const Scalar2& other) : m_other(other) { }
481
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_quotient2_op(const Scalar2& other) : m_other(other) { }
487
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a / m_other; }
482
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a / m_other; }
488
  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
483
  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
489
};
484
};
490
template<typename Scalar1,typename Scalar2>
485
template<typename Scalar1,typename Scalar2>
491
struct functor_traits<scalar_quotient2_op<Scalar1,Scalar2> >
486
struct functor_traits<scalar_quotient2_op<Scalar1,Scalar2> >
492
{ enum { Cost = 2 * NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
487
{ enum { Cost = 2 * NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
493
488
494
// In Eigen, any binary op (Product, CwiseBinaryOp) require the Lhs and Rhs to have the same scalar type, except for multiplication
495
// where the mixing of different types is handled by scalar_product_traits
496
// In particular, real * complex<real> is allowed.
497
// FIXME move this to functor_traits adding a functor_default
498
template<typename Functor> struct functor_is_product_like { enum { ret = 0 }; };
499
template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
500
template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
501
template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_quotient_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
502
503
489
504
/** \internal
490
/** \internal
505
  * \brief Template functor to add a scalar to a fixed other one
491
  * \brief Template functor to add a scalar to a fixed other one
506
  * \sa class CwiseUnaryOp, Array::operator+
492
  * \sa class CwiseUnaryOp, Array::operator+
507
  */
493
  */
508
/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */
494
/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */
509
template<typename Scalar>
495
template<typename Scalar>
510
struct scalar_add_op {
496
struct scalar_add_op {
(-)a/Eigen/src/Core/products/GeneralBlockPanelKernel.h (-2 / +2 lines)
Lines 358-374 inline void computeProductBlockingSizes( Link Here
358
 *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
358
 *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
359
 */
359
 */
360
template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
360
template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
361
class gebp_traits
361
class gebp_traits
362
{
362
{
363
public:
363
public:
364
  typedef _LhsScalar LhsScalar;
364
  typedef _LhsScalar LhsScalar;
365
  typedef _RhsScalar RhsScalar;
365
  typedef _RhsScalar RhsScalar;
366
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
366
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
367
367
368
  enum {
368
  enum {
369
    ConjLhs = _ConjLhs,
369
    ConjLhs = _ConjLhs,
370
    ConjRhs = _ConjRhs,
370
    ConjRhs = _ConjRhs,
371
    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
371
    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
372
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
372
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
373
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
373
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
374
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
374
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
Lines 473-489 protected: Link Here
473
};
473
};
474
474
475
template<typename RealScalar, bool _ConjLhs>
475
template<typename RealScalar, bool _ConjLhs>
476
class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
476
class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
477
{
477
{
478
public:
478
public:
479
  typedef std::complex<RealScalar> LhsScalar;
479
  typedef std::complex<RealScalar> LhsScalar;
480
  typedef RealScalar RhsScalar;
480
  typedef RealScalar RhsScalar;
481
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
481
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
482
482
483
  enum {
483
  enum {
484
    ConjLhs = _ConjLhs,
484
    ConjLhs = _ConjLhs,
485
    ConjRhs = false,
485
    ConjRhs = false,
486
    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
486
    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
487
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
487
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
488
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
488
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
489
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
489
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
(-)a/Eigen/src/Core/products/GeneralMatrixMatrix.h (-2 / +2 lines)
Lines 20-36 template<typename _LhsScalar, typename _ Link Here
20
template<
20
template<
21
  typename Index,
21
  typename Index,
22
  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
22
  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
23
  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
23
  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
24
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
24
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
25
{
25
{
26
  typedef gebp_traits<RhsScalar,LhsScalar> Traits;
26
  typedef gebp_traits<RhsScalar,LhsScalar> Traits;
27
  
27
  
28
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
28
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
29
  static EIGEN_STRONG_INLINE void run(
29
  static EIGEN_STRONG_INLINE void run(
30
    Index rows, Index cols, Index depth,
30
    Index rows, Index cols, Index depth,
31
    const LhsScalar* lhs, Index lhsStride,
31
    const LhsScalar* lhs, Index lhsStride,
32
    const RhsScalar* rhs, Index rhsStride,
32
    const RhsScalar* rhs, Index rhsStride,
33
    ResScalar* res, Index resStride,
33
    ResScalar* res, Index resStride,
34
    ResScalar alpha,
34
    ResScalar alpha,
35
    level3_blocking<RhsScalar,LhsScalar>& blocking,
35
    level3_blocking<RhsScalar,LhsScalar>& blocking,
36
    GemmParallelInfo<Index>* info = 0)
36
    GemmParallelInfo<Index>* info = 0)
Lines 50-66 template< Link Here
50
  typename Index,
50
  typename Index,
51
  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
51
  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
52
  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
52
  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
53
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
53
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
54
{
54
{
55
55
56
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
56
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
57
  
57
  
58
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
58
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
59
static void run(Index rows, Index cols, Index depth,
59
static void run(Index rows, Index cols, Index depth,
60
  const LhsScalar* _lhs, Index lhsStride,
60
  const LhsScalar* _lhs, Index lhsStride,
61
  const RhsScalar* _rhs, Index rhsStride,
61
  const RhsScalar* _rhs, Index rhsStride,
62
  ResScalar* _res, Index resStride,
62
  ResScalar* _res, Index resStride,
63
  ResScalar alpha,
63
  ResScalar alpha,
64
  level3_blocking<LhsScalar,RhsScalar>& blocking,
64
  level3_blocking<LhsScalar,RhsScalar>& blocking,
65
  GemmParallelInfo<Index>* info = 0)
65
  GemmParallelInfo<Index>* info = 0)
66
{
66
{
(-)a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h (-2 / +2 lines)
Lines 35-68 template <typename Index, Link Here
35
                              int ResStorageOrder, int  UpLo, int Version = Specialized>
35
                              int ResStorageOrder, int  UpLo, int Version = Specialized>
36
struct general_matrix_matrix_triangular_product;
36
struct general_matrix_matrix_triangular_product;
37
37
38
// as usual if the result is row major => we transpose the product
38
// as usual if the result is row major => we transpose the product
39
template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
39
template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
40
                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>
40
                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>
41
struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo,Version>
41
struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo,Version>
42
{
42
{
43
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
43
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
44
  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
44
  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
45
                                      const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride,
45
                                      const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride,
46
                                      const ResScalar& alpha, level3_blocking<RhsScalar,LhsScalar>& blocking)
46
                                      const ResScalar& alpha, level3_blocking<RhsScalar,LhsScalar>& blocking)
47
  {
47
  {
48
    general_matrix_matrix_triangular_product<Index,
48
    general_matrix_matrix_triangular_product<Index,
49
        RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
49
        RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
50
        LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
50
        LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
51
        ColMajor, UpLo==Lower?Upper:Lower>
51
        ColMajor, UpLo==Lower?Upper:Lower>
52
      ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking);
52
      ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking);
53
  }
53
  }
54
};
54
};
55
55
56
template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
56
template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
57
                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>
57
                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>
58
struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Version>
58
struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Version>
59
{
59
{
60
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
60
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
61
  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
61
  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
62
                                      const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride,
62
                                      const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride,
63
                                      const ResScalar& alpha, level3_blocking<LhsScalar,RhsScalar>& blocking)
63
                                      const ResScalar& alpha, level3_blocking<LhsScalar,RhsScalar>& blocking)
64
  {
64
  {
65
    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
65
    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
66
66
67
    typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
67
    typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
68
    typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
68
    typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
(-)a/Eigen/src/Core/products/GeneralMatrixVector.h (-2 / +2 lines)
Lines 53-69 namespace internal { Link Here
53
 * One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment
53
 * One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment
54
 * strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow
54
 * strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow
55
 * compared to unaligned loads on a 4 byte boundary.
55
 * compared to unaligned loads on a 4 byte boundary.
56
 *
56
 *
57
 */
57
 */
58
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
58
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
59
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
59
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
60
{
60
{
61
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
61
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
62
62
63
enum {
63
enum {
64
  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
64
  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
65
              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
65
              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
66
  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
66
  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
67
  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
67
  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
68
  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
68
  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
69
};
69
};
Lines 329-345 EIGEN_DONT_INLINE void general_matrix_ve Link Here
329
 *
329
 *
330
 * Mixing type logic:
330
 * Mixing type logic:
331
 *  - alpha is always a complex (or converted to a complex)
331
 *  - alpha is always a complex (or converted to a complex)
332
 *  - no vectorization
332
 *  - no vectorization
333
 */
333
 */
334
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
334
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
335
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
335
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
336
{
336
{
337
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
337
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
338
338
339
enum {
339
enum {
340
  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
340
  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
341
              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
341
              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
342
  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
342
  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
343
  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
343
  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
344
  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
344
  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
345
};
345
};
(-)a/Eigen/src/Core/products/TriangularMatrixVector.h (-2 / +2 lines)
Lines 15-31 namespace Eigen { Link Here
15
namespace internal {
15
namespace internal {
16
16
17
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version=Specialized>
17
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version=Specialized>
18
struct triangular_matrix_vector_product;
18
struct triangular_matrix_vector_product;
19
19
20
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
20
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
21
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
21
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
22
{
22
{
23
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
23
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
24
  enum {
24
  enum {
25
    IsLower = ((Mode&Lower)==Lower),
25
    IsLower = ((Mode&Lower)==Lower),
26
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
26
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
27
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
27
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
28
  };
28
  };
29
  static EIGEN_DONT_INLINE  void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
29
  static EIGEN_DONT_INLINE  void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
30
                                     const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha);
30
                                     const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha);
31
};
31
};
Lines 86-102 EIGEN_DONT_INLINE void triangular_matrix Link Here
86
          RhsMapper(&rhs.coeffRef(size), rhsIncr),
86
          RhsMapper(&rhs.coeffRef(size), rhsIncr),
87
          _res, resIncr, alpha);
87
          _res, resIncr, alpha);
88
    }
88
    }
89
  }
89
  }
90
90
91
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
91
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
92
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
92
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
93
{
93
{
94
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
94
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
95
  enum {
95
  enum {
96
    IsLower = ((Mode&Lower)==Lower),
96
    IsLower = ((Mode&Lower)==Lower),
97
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
97
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
98
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
98
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
99
  };
99
  };
100
  static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
100
  static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
101
                                    const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
101
                                    const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
102
};
102
};
(-)a/Eigen/src/Core/util/ForwardDeclarations.h (-2 / +3 lines)
Lines 125-140 template<typename MatrixType, unsigned i Link Here
125
template<typename MatrixType> class SparseView;
125
template<typename MatrixType> class SparseView;
126
template<typename ExpressionType> class WithFormat;
126
template<typename ExpressionType> class WithFormat;
127
template<typename MatrixType> struct CommaInitializer;
127
template<typename MatrixType> struct CommaInitializer;
128
template<typename Derived> class ReturnByValue;
128
template<typename Derived> class ReturnByValue;
129
template<typename ExpressionType> class ArrayWrapper;
129
template<typename ExpressionType> class ArrayWrapper;
130
template<typename ExpressionType> class MatrixWrapper;
130
template<typename ExpressionType> class MatrixWrapper;
131
template<typename Derived> class SolverBase;
131
template<typename Derived> class SolverBase;
132
template<typename XprType> class InnerIterator;
132
template<typename XprType> class InnerIterator;
133
template<typename ScalarA, typename ScalarB, typename BinaryOp=void> struct ScalarBinaryOpTraits;
133
134
134
namespace internal {
135
namespace internal {
135
template<typename DecompositionType> struct kernel_retval_base;
136
template<typename DecompositionType> struct kernel_retval_base;
136
template<typename DecompositionType> struct kernel_retval;
137
template<typename DecompositionType> struct kernel_retval;
137
template<typename DecompositionType> struct image_retval_base;
138
template<typename DecompositionType> struct image_retval_base;
138
template<typename DecompositionType> struct image_retval;
139
template<typename DecompositionType> struct image_retval;
139
} // end namespace internal
140
} // end namespace internal
140
141
Lines 169-186 struct ProductReturnType; Link Here
169
template<typename Lhs, typename Rhs> struct LazyProductReturnType;
170
template<typename Lhs, typename Rhs> struct LazyProductReturnType;
170
171
171
namespace internal {
172
namespace internal {
172
173
173
// Provides scalar/packet-wise product and product with accumulation
174
// Provides scalar/packet-wise product and product with accumulation
174
// with optional conjugation of the arguments.
175
// with optional conjugation of the arguments.
175
template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
176
template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
176
177
177
template<typename Scalar> struct scalar_sum_op;
178
template<typename LhsScalar,typename RhsScalar> struct scalar_sum_op;
178
template<typename Scalar> struct scalar_difference_op;
179
template<typename LhsScalar,typename RhsScalar> struct scalar_difference_op;
179
template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
180
template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
180
template<typename Scalar> struct scalar_opposite_op;
181
template<typename Scalar> struct scalar_opposite_op;
181
template<typename Scalar> struct scalar_conjugate_op;
182
template<typename Scalar> struct scalar_conjugate_op;
182
template<typename Scalar> struct scalar_real_op;
183
template<typename Scalar> struct scalar_real_op;
183
template<typename Scalar> struct scalar_imag_op;
184
template<typename Scalar> struct scalar_imag_op;
184
template<typename Scalar> struct scalar_abs_op;
185
template<typename Scalar> struct scalar_abs_op;
185
template<typename Scalar> struct scalar_abs2_op;
186
template<typename Scalar> struct scalar_abs2_op;
186
template<typename Scalar> struct scalar_sqrt_op;
187
template<typename Scalar> struct scalar_sqrt_op;
(-)a/Eigen/src/Core/util/Macros.h (-2 / +2 lines)
Lines 880-898 namespace Eigen { Link Here
880
  template<typename OtherDerived> \
880
  template<typename OtherDerived> \
881
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
881
  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
882
  (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
882
  (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
883
  { \
883
  { \
884
    return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
884
    return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
885
  }
885
  }
886
886
887
// the expression type of a cwise product
887
// the expression type of a cwise product
888
#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
888
#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \
889
    CwiseBinaryOp< \
889
    CwiseBinaryOp< \
890
      internal::scalar_product_op< \
890
      EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)< \
891
          typename internal::traits<LHS>::Scalar, \
891
          typename internal::traits<LHS>::Scalar, \
892
          typename internal::traits<RHS>::Scalar \
892
          typename internal::traits<RHS>::Scalar \
893
      >, \
893
      >, \
894
      const LHS, \
894
      const LHS, \
895
      const RHS \
895
      const RHS \
896
    >
896
    >
897
897
898
#ifdef EIGEN_EXCEPTIONS
898
#ifdef EIGEN_EXCEPTIONS
(-)a/Eigen/src/Core/util/Meta.h (-27 / +37 lines)
Lines 370-412 struct meta_least_common_multiple<A,B,K, Link Here
370
};
370
};
371
371
372
/** \internal determines whether the product of two numeric types is allowed and what the return type is */
372
/** \internal determines whether the product of two numeric types is allowed and what the return type is */
373
template<typename T, typename U> struct scalar_product_traits
373
template<typename T, typename U> struct scalar_product_traits
374
{
374
{
375
  enum { Defined = 0 };
375
  enum { Defined = 0 };
376
};
376
};
377
377
378
template<typename T> struct scalar_product_traits<T,T>
379
{
380
  enum {
381
    // Cost = NumTraits<T>::MulCost,
382
    Defined = 1
383
  };
384
  typedef T ReturnType;
385
};
386
387
template<typename T> struct scalar_product_traits<T,std::complex<T> >
388
{
389
  enum {
390
    // Cost = 2*NumTraits<T>::MulCost,
391
    Defined = 1
392
  };
393
  typedef std::complex<T> ReturnType;
394
};
395
396
template<typename T> struct scalar_product_traits<std::complex<T>, T>
397
{
398
  enum {
399
    // Cost = 2*NumTraits<T>::MulCost,
400
    Defined = 1
401
  };
402
  typedef std::complex<T> ReturnType;
403
};
404
405
// FIXME quick workaround around current limitation of result_of
378
// FIXME quick workaround around current limitation of result_of
406
// template<typename Scalar, typename ArgType0, typename ArgType1>
379
// template<typename Scalar, typename ArgType0, typename ArgType1>
407
// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {
380
// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {
408
// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
381
// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
409
// };
382
// };
410
383
411
} // end namespace internal
384
} // end namespace internal
412
385
Lines 429-439 using std::numeric_limits; Link Here
429
template<typename T>
402
template<typename T>
430
T div_ceil(const T &a, const T &b)
403
T div_ceil(const T &a, const T &b)
431
{
404
{
432
  return (a+b-1) / b;
405
  return (a+b-1) / b;
433
}
406
}
434
407
435
} // end namespace numext
408
} // end namespace numext
436
409
410
411
/** \class ScalarBinaryOpTraits
412
  * \ingroup Core_Module
413
  *
414
  * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
415
  *
416
  * \sa CwiseBinaryOp
417
  */
418
template<typename ScalarA, typename ScalarB, typename BinaryOp>
419
struct ScalarBinaryOpTraits
420
#ifndef EIGEN_PARSED_BY_DOXYGEN
421
  // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
422
  : internal::scalar_product_traits<ScalarA,ScalarB>
423
#endif // EIGEN_PARSED_BY_DOXYGEN
424
{};
425
426
template<typename T, typename BinaryOp>
427
struct ScalarBinaryOpTraits<T,T,BinaryOp>
428
{
429
  enum { Defined = 1 };
430
  typedef T ReturnType;
431
};
432
433
template<typename T, typename BinaryOp>
434
struct ScalarBinaryOpTraits<T,std::complex<T>,BinaryOp>
435
{
436
  enum { Defined = 1 };
437
  typedef std::complex<T> ReturnType;
438
};
439
440
template<typename T, typename BinaryOp>
441
struct ScalarBinaryOpTraits<std::complex<T>, T,BinaryOp>
442
{
443
  enum { Defined = 1 };
444
  typedef std::complex<T> ReturnType;
445
};
446
437
} // end namespace Eigen
447
} // end namespace Eigen
438
448
439
#endif // EIGEN_META_H
449
#endif // EIGEN_META_H
(-)a/Eigen/src/Core/util/XprHelper.h (-7 / +3 lines)
Lines 644-667 std::string demangle_flags(int f) Link Here
644
  if(f&NoPreferredStorageOrderBit)  res += " | NoPreferredStorageOrderBit";
644
  if(f&NoPreferredStorageOrderBit)  res += " | NoPreferredStorageOrderBit";
645
  
645
  
646
  return res;
646
  return res;
647
}
647
}
648
#endif
648
#endif
649
649
650
} // end namespace internal
650
} // end namespace internal
651
651
652
// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
652
// We require Lhs and Rhs to have "compatible" scalar types.
653
// that would take two operands of different types. If there were such an example, then this check should be
654
// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
655
// currently they take only one typename Scalar template parameter.
656
// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
653
// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
657
// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
654
// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
658
// add together a float matrix and a double matrix.
655
// add together a float matrix and a double matrix.
656
// Treat "void" as a special case. Needed for permutation products. TODO: this should be handled by ScalarBinaryOpTraits
659
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
657
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
660
  EIGEN_STATIC_ASSERT((internal::functor_is_product_like<BINOP>::ret \
658
  EIGEN_STATIC_ASSERT(int(internal::is_same_or_void<LHS, RHS>::value) || int(ScalarBinaryOpTraits<LHS, RHS,BINOP>::Defined), \
661
                        ? int(internal::scalar_product_traits<LHS, RHS>::Defined) \
662
                        : int(internal::is_same_or_void<LHS, RHS>::value)), \
663
    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
659
    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
664
    
660
    
665
} // end namespace Eigen
661
} // end namespace Eigen
666
662
667
#endif // EIGEN_XPRHELPER_H
663
#endif // EIGEN_XPRHELPER_H
(-)a/Eigen/src/Geometry/AlignedBox.h (-3 / +3 lines)
Lines 107-142 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTO Link Here
107
  inline VectorType& (min)() { return m_min; }
107
  inline VectorType& (min)() { return m_min; }
108
  /** \returns the maximal corner */
108
  /** \returns the maximal corner */
109
  inline const VectorType& (max)() const { return m_max; }
109
  inline const VectorType& (max)() const { return m_max; }
110
  /** \returns a non const reference to the maximal corner */
110
  /** \returns a non const reference to the maximal corner */
111
  inline VectorType& (max)() { return m_max; }
111
  inline VectorType& (max)() { return m_max; }
112
112
113
  /** \returns the center of the box */
113
  /** \returns the center of the box */
114
  inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
114
  inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
115
                            const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> >
115
                            const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const VectorType, const VectorType> >
116
  center() const
116
  center() const
117
  { return (m_min+m_max)/2; }
117
  { return (m_min+m_max)/2; }
118
118
119
  /** \returns the lengths of the sides of the bounding box.
119
  /** \returns the lengths of the sides of the bounding box.
120
    * Note that this function does not get the same
120
    * Note that this function does not get the same
121
    * result for integral or floating scalar types: see
121
    * result for integral or floating scalar types: see
122
    */
122
    */
123
  inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> sizes() const
123
  inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> sizes() const
124
  { return m_max - m_min; }
124
  { return m_max - m_min; }
125
125
126
  /** \returns the volume of the bounding box */
126
  /** \returns the volume of the bounding box */
127
  inline Scalar volume() const
127
  inline Scalar volume() const
128
  { return sizes().prod(); }
128
  { return sizes().prod(); }
129
129
130
  /** \returns an expression for the bounding box diagonal vector
130
  /** \returns an expression for the bounding box diagonal vector
131
    * if the length of the diagonal is needed: diagonal().norm()
131
    * if the length of the diagonal is needed: diagonal().norm()
132
    * will provide it.
132
    * will provide it.
133
    */
133
    */
134
  inline CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> diagonal() const
134
  inline CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> diagonal() const
135
  { return sizes(); }
135
  { return sizes(); }
136
136
137
  /** \returns the vertex of the bounding box at the corner defined by
137
  /** \returns the vertex of the bounding box at the corner defined by
138
    * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
138
    * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
139
    * For 1D bounding boxes corners are named by 2 enum constants:
139
    * For 1D bounding boxes corners are named by 2 enum constants:
140
    * BottomLeft and BottomRight.
140
    * BottomLeft and BottomRight.
141
    * For 2D bounding boxes, corners are named by 4 enum constants:
141
    * For 2D bounding boxes, corners are named by 4 enum constants:
142
    * BottomLeft, BottomRight, TopLeft, TopRight.
142
    * BottomLeft, BottomRight, TopLeft, TopRight.
(-)a/Eigen/src/Geometry/Homogeneous.h (-6 / +6 lines)
Lines 324-355 struct unary_evaluator<Homogeneous<ArgTy Link Here
324
  }
324
  }
325
325
326
protected:
326
protected:
327
  PlainObject m_temp;
327
  PlainObject m_temp;
328
};
328
};
329
329
330
// dense = homogeneous
330
// dense = homogeneous
331
template< typename DstXprType, typename ArgType, typename Scalar>
331
template< typename DstXprType, typename ArgType, typename Scalar>
332
struct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
332
struct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense, Scalar>
333
{
333
{
334
  typedef Homogeneous<ArgType,Vertical> SrcXprType;
334
  typedef Homogeneous<ArgType,Vertical> SrcXprType;
335
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
335
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &)
336
  {
336
  {
337
    dst.template topRows<ArgType::RowsAtCompileTime>(src.nestedExpression().rows()) = src.nestedExpression();
337
    dst.template topRows<ArgType::RowsAtCompileTime>(src.nestedExpression().rows()) = src.nestedExpression();
338
    dst.row(dst.rows()-1).setOnes();
338
    dst.row(dst.rows()-1).setOnes();
339
  }
339
  }
340
};
340
};
341
341
342
// dense = homogeneous
342
// dense = homogeneous
343
template< typename DstXprType, typename ArgType, typename Scalar>
343
template< typename DstXprType, typename ArgType, typename Scalar>
344
struct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
344
struct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense, Scalar>
345
{
345
{
346
  typedef Homogeneous<ArgType,Horizontal> SrcXprType;
346
  typedef Homogeneous<ArgType,Horizontal> SrcXprType;
347
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
347
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &)
348
  {
348
  {
349
    dst.template leftCols<ArgType::ColsAtCompileTime>(src.nestedExpression().cols()) = src.nestedExpression();
349
    dst.template leftCols<ArgType::ColsAtCompileTime>(src.nestedExpression().cols()) = src.nestedExpression();
350
    dst.col(dst.cols()-1).setOnes();
350
    dst.col(dst.cols()-1).setOnes();
351
  }
351
  }
352
};
352
};
353
353
354
template<typename LhsArg, typename Rhs, int ProductTag>
354
template<typename LhsArg, typename Rhs, int ProductTag>
355
struct generic_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs, HomogeneousShape, DenseShape, ProductTag>
355
struct generic_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs, HomogeneousShape, DenseShape, ProductTag>
Lines 368-384 struct homogeneous_right_product_refacto Link Here
368
    Dim  = Lhs::ColsAtCompileTime,
368
    Dim  = Lhs::ColsAtCompileTime,
369
    Rows = Lhs::RowsAtCompileTime
369
    Rows = Lhs::RowsAtCompileTime
370
  };
370
  };
371
  typedef typename Rhs::template ConstNRowsBlockXpr<Dim>::Type          LinearBlockConst;
371
  typedef typename Rhs::template ConstNRowsBlockXpr<Dim>::Type          LinearBlockConst;
372
  typedef typename remove_const<LinearBlockConst>::type                 LinearBlock;
372
  typedef typename remove_const<LinearBlockConst>::type                 LinearBlock;
373
  typedef typename Rhs::ConstRowXpr                                     ConstantColumn;
373
  typedef typename Rhs::ConstRowXpr                                     ConstantColumn;
374
  typedef Replicate<const ConstantColumn,Rows,1>                        ConstantBlock;
374
  typedef Replicate<const ConstantColumn,Rows,1>                        ConstantBlock;
375
  typedef Product<Lhs,LinearBlock,LazyProduct>                          LinearProduct;
375
  typedef Product<Lhs,LinearBlock,LazyProduct>                          LinearProduct;
376
  typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;
376
  typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;
377
};
377
};
378
378
379
template<typename Lhs, typename Rhs, int ProductTag>
379
template<typename Lhs, typename Rhs, int ProductTag>
380
struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, HomogeneousShape, DenseShape>
380
struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, HomogeneousShape, DenseShape>
381
 : public evaluator<typename homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs>::Xpr>
381
 : public evaluator<typename homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs>::Xpr>
382
{
382
{
383
  typedef Product<Lhs, Rhs, LazyProduct> XprType;
383
  typedef Product<Lhs, Rhs, LazyProduct> XprType;
384
  typedef homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs> helper;
384
  typedef homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs> helper;
Lines 409-425 struct homogeneous_left_product_refactor Link Here
409
    Dim = Rhs::RowsAtCompileTime,
409
    Dim = Rhs::RowsAtCompileTime,
410
    Cols = Rhs::ColsAtCompileTime
410
    Cols = Rhs::ColsAtCompileTime
411
  };
411
  };
412
  typedef typename Lhs::template ConstNColsBlockXpr<Dim>::Type          LinearBlockConst;
412
  typedef typename Lhs::template ConstNColsBlockXpr<Dim>::Type          LinearBlockConst;
413
  typedef typename remove_const<LinearBlockConst>::type                 LinearBlock;
413
  typedef typename remove_const<LinearBlockConst>::type                 LinearBlock;
414
  typedef typename Lhs::ConstColXpr                                     ConstantColumn;
414
  typedef typename Lhs::ConstColXpr                                     ConstantColumn;
415
  typedef Replicate<const ConstantColumn,1,Cols>                        ConstantBlock;
415
  typedef Replicate<const ConstantColumn,1,Cols>                        ConstantBlock;
416
  typedef Product<LinearBlock,Rhs,LazyProduct>                          LinearProduct;
416
  typedef Product<LinearBlock,Rhs,LazyProduct>                          LinearProduct;
417
  typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;
417
  typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;
418
};
418
};
419
419
420
template<typename Lhs, typename Rhs, int ProductTag>
420
template<typename Lhs, typename Rhs, int ProductTag>
421
struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, HomogeneousShape>
421
struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, HomogeneousShape>
422
 : public evaluator<typename homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression>::Xpr>
422
 : public evaluator<typename homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression>::Xpr>
423
{
423
{
424
  typedef Product<Lhs, Rhs, LazyProduct> XprType;
424
  typedef Product<Lhs, Rhs, LazyProduct> XprType;
425
  typedef homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression> helper;
425
  typedef homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression> helper;
(-)a/Eigen/src/Householder/HouseholderSequence.h (-1 / +1 lines)
Lines 103-119 struct hseq_side_dependent_impl<VectorsT Link Here
103
  {
103
  {
104
    Index start = k+1+h.m_shift;
104
    Index start = k+1+h.m_shift;
105
    return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
105
    return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
106
  }
106
  }
107
};
107
};
108
108
109
template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type
109
template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type
110
{
110
{
111
  typedef typename scalar_product_traits<OtherScalarType, typename MatrixType::Scalar>::ReturnType
111
  typedef typename ScalarBinaryOpTraits<OtherScalarType, typename MatrixType::Scalar>::ReturnType
112
    ResultScalar;
112
    ResultScalar;
113
  typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime,
113
  typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime,
114
                 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type;
114
                 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type;
115
};
115
};
116
116
117
} // end namespace internal
117
} // end namespace internal
118
118
119
template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
119
template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
(-)a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h (-2 / +2 lines)
Lines 86-105 struct evaluator<SolveWithGuess<Decompos Link Here
86
  
86
  
87
protected:  
87
protected:  
88
  PlainObject m_result;
88
  PlainObject m_result;
89
};
89
};
90
90
91
// Specialization for "dst = dec.solveWithGuess(rhs)"
91
// Specialization for "dst = dec.solveWithGuess(rhs)"
92
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
92
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
93
template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar>
93
template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar>
94
struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
94
struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
95
{
95
{
96
  typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType;
96
  typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType;
97
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
97
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
98
  {
98
  {
99
    // FIXME shall we resize dst here?
99
    // FIXME shall we resize dst here?
100
    dst = src.guess();
100
    dst = src.guess();
101
    src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/);
101
    src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/);
102
  }
102
  }
103
};
103
};
104
104
105
} // end namepsace internal
105
} // end namepsace internal
(-)a/Eigen/src/LU/FullPivLU.h (-3 / +3 lines)
Lines 834-855 void FullPivLU<_MatrixType>::_solve_impl Link Here
834
}
834
}
835
835
836
#endif
836
#endif
837
837
838
namespace internal {
838
namespace internal {
839
839
840
840
841
/***** Implementation of inverse() *****************************************************/
841
/***** Implementation of inverse() *****************************************************/
842
template<typename DstXprType, typename MatrixType, typename Scalar>
842
template<typename DstXprType, typename MatrixType>
843
struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar>
843
struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivLU<MatrixType>::Scalar>, Dense2Dense>
844
{
844
{
845
  typedef FullPivLU<MatrixType> LuType;
845
  typedef FullPivLU<MatrixType> LuType;
846
  typedef Inverse<LuType> SrcXprType;
846
  typedef Inverse<LuType> SrcXprType;
847
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
847
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &)
848
  {
848
  {
849
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
849
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
850
  }
850
  }
851
};
851
};
852
} // end namespace internal
852
} // end namespace internal
853
853
854
/******* MatrixBase methods *****************************************************************/
854
/******* MatrixBase methods *****************************************************************/
855
855
(-)a/Eigen/src/LU/InverseImpl.h (-3 / +3 lines)
Lines 281-301 struct compute_inverse_and_det_with_chec Link Here
281
*** MatrixBase methods ***
281
*** MatrixBase methods ***
282
*************************/
282
*************************/
283
283
284
} // end namespace internal
284
} // end namespace internal
285
285
286
namespace internal {
286
namespace internal {
287
287
288
// Specialization for "dense = dense_xpr.inverse()"
288
// Specialization for "dense = dense_xpr.inverse()"
289
template<typename DstXprType, typename XprType, typename Scalar>
289
template<typename DstXprType, typename XprType>
290
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<Scalar>, Dense2Dense, Scalar>
290
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
291
{
291
{
292
  typedef Inverse<XprType> SrcXprType;
292
  typedef Inverse<XprType> SrcXprType;
293
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
293
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
294
  {
294
  {
295
    // FIXME shall we resize dst here?
295
    // FIXME shall we resize dst here?
296
    const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
296
    const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
297
    EIGEN_ONLY_USED_FOR_DEBUG(Size);
297
    EIGEN_ONLY_USED_FOR_DEBUG(Size);
298
    eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
298
    eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
299
              && "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
299
              && "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
300
300
301
    typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type  ActualXprType;
301
    typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type  ActualXprType;
(-)a/Eigen/src/LU/PartialPivLU.h (-2 / +2 lines)
Lines 521-541 MatrixType PartialPivLU<MatrixType>::rec Link Here
521
}
521
}
522
522
523
/***** Implementation details *****************************************************/
523
/***** Implementation details *****************************************************/
524
524
525
namespace internal {
525
namespace internal {
526
526
527
/***** Implementation of inverse() *****************************************************/
527
/***** Implementation of inverse() *****************************************************/
528
template<typename DstXprType, typename MatrixType, typename Scalar>
528
template<typename DstXprType, typename MatrixType, typename Scalar>
529
struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar>
529
struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
530
{
530
{
531
  typedef PartialPivLU<MatrixType> LuType;
531
  typedef PartialPivLU<MatrixType> LuType;
532
  typedef Inverse<LuType> SrcXprType;
532
  typedef Inverse<LuType> SrcXprType;
533
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
533
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
534
  {
534
  {
535
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
535
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
536
  }
536
  }
537
};
537
};
538
} // end namespace internal
538
} // end namespace internal
539
539
540
/******** MatrixBase methods *******/
540
/******** MatrixBase methods *******/
541
541
(-)a/Eigen/src/QR/ColPivHouseholderQR.h (-2 / +2 lines)
Lines 593-613 void ColPivHouseholderQR<_MatrixType>::_ Link Here
593
  for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i);
593
  for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i);
594
  for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero();
594
  for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero();
595
}
595
}
596
#endif
596
#endif
597
597
598
namespace internal {
598
namespace internal {
599
599
600
template<typename DstXprType, typename MatrixType, typename Scalar>
600
template<typename DstXprType, typename MatrixType, typename Scalar>
601
struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar>
601
struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
602
{
602
{
603
  typedef ColPivHouseholderQR<MatrixType> QrType;
603
  typedef ColPivHouseholderQR<MatrixType> QrType;
604
  typedef Inverse<QrType> SrcXprType;
604
  typedef Inverse<QrType> SrcXprType;
605
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
605
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
606
  {
606
  {
607
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
607
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
608
  }
608
  }
609
};
609
};
610
610
611
} // end namespace internal
611
} // end namespace internal
612
612
613
/** \returns the matrix Q as a sequence of householder transformations.
613
/** \returns the matrix Q as a sequence of householder transformations.
(-)a/Eigen/src/QR/CompleteOrthogonalDecomposition.h (-2 / +2 lines)
Lines 505-525 void CompleteOrthogonalDecomposition<_Ma Link Here
505
  // Undo permutation to get x = P^{-1} * y.
505
  // Undo permutation to get x = P^{-1} * y.
506
  dst = colsPermutation() * dst;
506
  dst = colsPermutation() * dst;
507
}
507
}
508
#endif
508
#endif
509
509
510
namespace internal {
510
namespace internal {
511
511
512
template<typename DstXprType, typename MatrixType, typename Scalar>
512
template<typename DstXprType, typename MatrixType, typename Scalar>
513
struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar>
513
struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
514
{
514
{
515
  typedef CompleteOrthogonalDecomposition<MatrixType> CodType;
515
  typedef CompleteOrthogonalDecomposition<MatrixType> CodType;
516
  typedef Inverse<CodType> SrcXprType;
516
  typedef Inverse<CodType> SrcXprType;
517
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
517
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
518
  {
518
  {
519
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));
519
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));
520
  }
520
  }
521
};
521
};
522
522
523
} // end namespace internal
523
} // end namespace internal
524
524
525
/** \returns the matrix Q as a sequence of householder transformations */
525
/** \returns the matrix Q as a sequence of householder transformations */
(-)a/Eigen/src/QR/FullPivHouseholderQR.h (-2 / +2 lines)
Lines 555-575 void FullPivHouseholderQR<_MatrixType>:: Link Here
555
  for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i);
555
  for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i);
556
  for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero();
556
  for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero();
557
}
557
}
558
#endif
558
#endif
559
559
560
namespace internal {
560
namespace internal {
561
  
561
  
562
template<typename DstXprType, typename MatrixType, typename Scalar>
562
template<typename DstXprType, typename MatrixType, typename Scalar>
563
struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar>
563
struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar>
564
{
564
{
565
  typedef FullPivHouseholderQR<MatrixType> QrType;
565
  typedef FullPivHouseholderQR<MatrixType> QrType;
566
  typedef Inverse<QrType> SrcXprType;
566
  typedef Inverse<QrType> SrcXprType;
567
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
567
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
568
  {    
568
  {    
569
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
569
    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
570
  }
570
  }
571
};
571
};
572
572
573
/** \ingroup QR_Module
573
/** \ingroup QR_Module
574
  *
574
  *
575
  * \brief Expression type for return value of FullPivHouseholderQR::matrixQ()
575
  * \brief Expression type for return value of FullPivHouseholderQR::matrixQ()
(-)a/Eigen/src/SparseCore/SparseAssign.h (-10 / +10 lines)
Lines 29-46 Derived& SparseMatrixBase<Derived>::oper Link Here
29
  return derived();
29
  return derived();
30
}
30
}
31
31
32
template<typename Derived>
32
template<typename Derived>
33
template<typename OtherDerived>
33
template<typename OtherDerived>
34
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
34
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
35
{
35
{
36
  // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
36
  // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
37
  internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar> >
37
  internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
38
          ::run(derived(), other.derived(), internal::assign_op<Scalar>());
38
          ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
39
  return derived();
39
  return derived();
40
}
40
}
41
41
42
template<typename Derived>
42
template<typename Derived>
43
inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
43
inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
44
{
44
{
45
  internal::call_assignment_no_alias(derived(), other.derived());
45
  internal::call_assignment_no_alias(derived(), other.derived());
46
  return derived();
46
  return derived();
Lines 122-205 void assign_sparse_to_sparse(DstXprType Link Here
122
    dst = temp.markAsRValue();
122
    dst = temp.markAsRValue();
123
  }
123
  }
124
}
124
}
125
125
126
// Generic Sparse to Sparse assignment
126
// Generic Sparse to Sparse assignment
127
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
127
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
128
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar>
128
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar>
129
{
129
{
130
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
130
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
131
  {
131
  {
132
    assign_sparse_to_sparse(dst.derived(), src.derived());
132
    assign_sparse_to_sparse(dst.derived(), src.derived());
133
  }
133
  }
134
};
134
};
135
135
136
// Generic Sparse to Dense assignment
136
// Generic Sparse to Dense assignment
137
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
137
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
138
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
138
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
139
{
139
{
140
  static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
140
  static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
141
  {
141
  {
142
    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
142
    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
143
143
144
    if(internal::is_same<Functor,internal::assign_op<Scalar> >::value)
144
    if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
145
      dst.setZero();
145
      dst.setZero();
146
    
146
    
147
    internal::evaluator<SrcXprType> srcEval(src);
147
    internal::evaluator<SrcXprType> srcEval(src);
148
    internal::evaluator<DstXprType> dstEval(dst);
148
    internal::evaluator<DstXprType> dstEval(dst);
149
    const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
149
    const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
150
    for (Index j=0; j<outerEvaluationSize; ++j)
150
    for (Index j=0; j<outerEvaluationSize; ++j)
151
      for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
151
      for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
152
        func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
152
        func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
153
  }
153
  }
154
};
154
};
155
155
156
// Specialization for "dst = dec.solve(rhs)"
156
// Specialization for "dst = dec.solve(rhs)"
157
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
157
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
158
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
158
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
159
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar>, Sparse2Sparse, Scalar>
159
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse, Scalar>
160
{
160
{
161
  typedef Solve<DecType,RhsType> SrcXprType;
161
  typedef Solve<DecType,RhsType> SrcXprType;
162
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
162
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
163
  {
163
  {
164
    src.dec()._solve_impl(src.rhs(), dst);
164
    src.dec()._solve_impl(src.rhs(), dst);
165
  }
165
  }
166
};
166
};
167
167
168
struct Diagonal2Sparse {};
168
struct Diagonal2Sparse {};
169
169
170
template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
170
template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
171
171
172
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
172
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
173
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar>
173
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar>
174
{
174
{
175
  typedef typename DstXprType::StorageIndex StorageIndex;
175
  typedef typename DstXprType::StorageIndex StorageIndex;
176
  typedef Array<StorageIndex,Dynamic,1> ArrayXI;
176
  typedef Array<StorageIndex,Dynamic,1> ArrayXI;
177
  typedef Array<Scalar,Dynamic,1> ArrayXS;
177
  typedef Array<Scalar,Dynamic,1> ArrayXS;
178
  template<int Options>
178
  template<int Options>
179
  static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
179
  static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
180
  {
180
  {
181
    Index size = src.diagonal().size();
181
    Index size = src.diagonal().size();
182
    dst.makeCompressed();
182
    dst.makeCompressed();
183
    dst.resizeNonZeros(size);
183
    dst.resizeNonZeros(size);
184
    Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
184
    Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
185
    Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
185
    Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
186
    Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
186
    Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
187
  }
187
  }
188
  
188
  
189
  template<typename DstDerived>
189
  template<typename DstDerived>
190
  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
190
  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
191
  {
191
  {
192
    dst.diagonal() = src.diagonal();
192
    dst.diagonal() = src.diagonal();
193
  }
193
  }
194
  
194
  
195
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &/*func*/)
195
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
196
  { dst.diagonal() += src.diagonal(); }
196
  { dst.diagonal() += src.diagonal(); }
197
  
197
  
198
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &/*func*/)
198
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
199
  { dst.diagonal() -= src.diagonal(); }
199
  { dst.diagonal() -= src.diagonal(); }
200
};
200
};
201
} // end namespace internal
201
} // end namespace internal
202
202
203
} // end namespace Eigen
203
} // end namespace Eigen
204
204
205
#endif // EIGEN_SPARSEASSIGN_H
205
#endif // EIGEN_SPARSEASSIGN_H
(-)a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h (-10 / +10 lines)
Lines 574-632 SparseMatrixBase<Derived>::operator+=(co Link Here
574
{
574
{
575
  return derived() = derived() + other.derived();
575
  return derived() = derived() + other.derived();
576
}
576
}
577
577
578
template<typename Derived>
578
template<typename Derived>
579
template<typename OtherDerived>
579
template<typename OtherDerived>
580
Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
580
Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
581
{
581
{
582
  call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar>());
582
  call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
583
  return derived();
583
  return derived();
584
}
584
}
585
585
586
template<typename Derived>
586
template<typename Derived>
587
template<typename OtherDerived>
587
template<typename OtherDerived>
588
Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
588
Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
589
{
589
{
590
  call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar>());
590
  call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
591
  return derived();
591
  return derived();
592
}
592
}
593
    
593
    
594
template<typename Derived>
594
template<typename Derived>
595
template<typename OtherDerived>
595
template<typename OtherDerived>
596
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
596
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
597
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
597
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
598
{
598
{
599
  return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
599
  return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
600
}
600
}
601
601
602
template<typename DenseDerived, typename SparseDerived>
602
template<typename DenseDerived, typename SparseDerived>
603
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>
603
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
604
operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
604
operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
605
{
605
{
606
  return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
606
  return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
607
}
607
}
608
608
609
template<typename SparseDerived, typename DenseDerived>
609
template<typename SparseDerived, typename DenseDerived>
610
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
610
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
611
operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
611
operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
612
{
612
{
613
  return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
613
  return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
614
}
614
}
615
615
616
template<typename DenseDerived, typename SparseDerived>
616
template<typename DenseDerived, typename SparseDerived>
617
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>
617
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
618
operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
618
operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
619
{
619
{
620
  return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
620
  return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
621
}
621
}
622
622
623
template<typename SparseDerived, typename DenseDerived>
623
template<typename SparseDerived, typename DenseDerived>
624
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
624
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
625
operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
625
operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
626
{
626
{
627
  return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
627
  return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
628
}
628
}
629
629
630
} // end namespace Eigen
630
} // end namespace Eigen
631
631
632
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
632
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
(-)a/Eigen/src/SparseCore/SparseDenseProduct.h (-2 / +2 lines)
Lines 69-85 struct sparse_time_dense_product_impl<Sp Link Here
69
    res.coeffRef(i,col) += alpha * tmp;
69
    res.coeffRef(i,col) += alpha * tmp;
70
  }
70
  }
71
  
71
  
72
};
72
};
73
73
74
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
74
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
75
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
75
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
76
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
76
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
77
// struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
77
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
78
// {
78
// {
79
//   enum {
79
//   enum {
80
//     Defined = 1
80
//     Defined = 1
81
//   };
81
//   };
82
//   typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
82
//   typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
83
// };
83
// };
84
84
85
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
85
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
Lines 92-108 struct sparse_time_dense_product_impl<Sp Link Here
92
  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
92
  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
93
  {
93
  {
94
    evaluator<Lhs> lhsEval(lhs);
94
    evaluator<Lhs> lhsEval(lhs);
95
    for(Index c=0; c<rhs.cols(); ++c)
95
    for(Index c=0; c<rhs.cols(); ++c)
96
    {
96
    {
97
      for(Index j=0; j<lhs.outerSize(); ++j)
97
      for(Index j=0; j<lhs.outerSize(); ++j)
98
      {
98
      {
99
//        typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
99
//        typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
100
        typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
100
        typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
101
        for(LhsInnerIterator it(lhsEval,j); it ;++it)
101
        for(LhsInnerIterator it(lhsEval,j); it ;++it)
102
          res.coeffRef(it.index(),c) += it.value() * rhs_j;
102
          res.coeffRef(it.index(),c) += it.value() * rhs_j;
103
      }
103
      }
104
    }
104
    }
105
  }
105
  }
106
};
106
};
107
107
108
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
108
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
(-)a/Eigen/src/SparseCore/SparseMatrix.h (-2 / +2 lines)
Lines 435-451 class SparseMatrix Link Here
435
    //---
435
    //---
436
436
437
    template<typename InputIterators>
437
    template<typename InputIterators>
438
    void setFromTriplets(const InputIterators& begin, const InputIterators& end);
438
    void setFromTriplets(const InputIterators& begin, const InputIterators& end);
439
439
440
    template<typename InputIterators,typename DupFunctor>
440
    template<typename InputIterators,typename DupFunctor>
441
    void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
441
    void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
442
442
443
    void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar>()); }
443
    void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
444
444
445
    template<typename DupFunctor>
445
    template<typename DupFunctor>
446
    void collapseDuplicates(DupFunctor dup_func = DupFunctor());
446
    void collapseDuplicates(DupFunctor dup_func = DupFunctor());
447
447
448
    //---
448
    //---
449
    
449
    
450
    /** \internal
450
    /** \internal
451
      * same as insert(Index,Index) except that the indices are given relative to the storage order */
451
      * same as insert(Index,Index) except that the indices are given relative to the storage order */
Lines 974-990 void set_from_triplets(const InputIterat Link Here
974
  * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
974
  * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
975
  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
975
  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
976
  * be explicitely stored into a std::vector for instance.
976
  * be explicitely stored into a std::vector for instance.
977
  */
977
  */
978
template<typename Scalar, int _Options, typename _Index>
978
template<typename Scalar, int _Options, typename _Index>
979
template<typename InputIterators>
979
template<typename InputIterators>
980
void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
980
void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
981
{
981
{
982
  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar>());
982
  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
983
}
983
}
984
984
985
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
985
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
986
  * \code
986
  * \code
987
  * value = dup_func(OldValue, NewValue)
987
  * value = dup_func(OldValue, NewValue)
988
  * \endcode 
988
  * \endcode 
989
  * Here is a C++11 example keeping the latest entry only:
989
  * Here is a C++11 example keeping the latest entry only:
990
  * \code
990
  * \code
(-)a/Eigen/src/SparseCore/SparseMatrixBase.h (-1 / +1 lines)
Lines 251-267 template<typename Derived> class SparseM Link Here
251
    Derived& operator+=(const DiagonalBase<OtherDerived>& other);
251
    Derived& operator+=(const DiagonalBase<OtherDerived>& other);
252
    template<typename OtherDerived>
252
    template<typename OtherDerived>
253
    Derived& operator-=(const DiagonalBase<OtherDerived>& other);
253
    Derived& operator-=(const DiagonalBase<OtherDerived>& other);
254
254
255
    Derived& operator*=(const Scalar& other);
255
    Derived& operator*=(const Scalar& other);
256
    Derived& operator/=(const Scalar& other);
256
    Derived& operator/=(const Scalar& other);
257
257
258
    template<typename OtherDerived> struct CwiseProductDenseReturnType {
258
    template<typename OtherDerived> struct CwiseProductDenseReturnType {
259
      typedef CwiseBinaryOp<internal::scalar_product_op<typename internal::scalar_product_traits<
259
      typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<
260
                                                          typename internal::traits<Derived>::Scalar,
260
                                                          typename internal::traits<Derived>::Scalar,
261
                                                          typename internal::traits<OtherDerived>::Scalar
261
                                                          typename internal::traits<OtherDerived>::Scalar
262
                                                        >::ReturnType>,
262
                                                        >::ReturnType>,
263
                            const Derived,
263
                            const Derived,
264
                            const OtherDerived
264
                            const OtherDerived
265
                          > Type;
265
                          > Type;
266
    };
266
    };
267
267
(-)a/Eigen/src/SparseCore/SparseProduct.h (-6 / +6 lines)
Lines 94-135 struct generic_product_impl<Lhs, Rhs, Sp Link Here
94
// sparse-triangular * sparse
94
// sparse-triangular * sparse
95
template<typename Lhs, typename Rhs, int ProductType>
95
template<typename Lhs, typename Rhs, int ProductType>
96
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>
96
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>
97
 : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
97
 : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
98
{};
98
{};
99
99
100
// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
100
// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
101
template< typename DstXprType, typename Lhs, typename Rhs>
101
template< typename DstXprType, typename Lhs, typename Rhs>
102
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Dense>
102
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
103
{
103
{
104
  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
104
  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
105
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &)
105
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
106
  {
106
  {
107
    generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
107
    generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
108
  }
108
  }
109
};
109
};
110
110
111
// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
111
// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
112
template< typename DstXprType, typename Lhs, typename Rhs>
112
template< typename DstXprType, typename Lhs, typename Rhs>
113
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar>, Sparse2Dense>
113
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
114
{
114
{
115
  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
115
  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
116
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &)
116
  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
117
  {
117
  {
118
    generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
118
    generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
119
  }
119
  }
120
};
120
};
121
121
122
// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
122
// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
123
template< typename DstXprType, typename Lhs, typename Rhs>
123
template< typename DstXprType, typename Lhs, typename Rhs>
124
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar>, Sparse2Dense>
124
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
125
{
125
{
126
  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
126
  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
127
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &)
127
  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
128
  {
128
  {
129
    generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
129
    generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
130
  }
130
  }
131
};
131
};
132
132
133
template<typename Lhs, typename Rhs, int Options>
133
template<typename Lhs, typename Rhs, int Options>
134
struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
134
struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
135
 : public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
135
 : public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
(-)a/Eigen/src/SparseCore/SparseSelfAdjointView.h (-5 / +5 lines)
Lines 218-240 struct SparseSelfAdjoint2Sparse {}; Link Here
218
template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
218
template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
219
template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
219
template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
220
220
221
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
221
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
222
struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar>
222
struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar>
223
{
223
{
224
  typedef typename DstXprType::StorageIndex StorageIndex;
224
  typedef typename DstXprType::StorageIndex StorageIndex;
225
  template<typename DestScalar,int StorageOrder>
225
  template<typename DestScalar,int StorageOrder>
226
  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
226
  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
227
  {
227
  {
228
    internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
228
    internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
229
  }
229
  }
230
  
230
  
231
  template<typename DestScalar>
231
  template<typename DestScalar>
232
  static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
232
  static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
233
  {
233
  {
234
    // TODO directly evaluate into dst;
234
    // TODO directly evaluate into dst;
235
    SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
235
    SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
236
    internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
236
    internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
237
    dst = tmp;
237
    dst = tmp;
238
  }
238
  }
239
};
239
};
240
240
Lines 581-611 class SparseSymmetricPermutationProduct Link Here
581
    MatrixTypeNested m_matrix;
581
    MatrixTypeNested m_matrix;
582
    const Perm& m_perm;
582
    const Perm& m_perm;
583
583
584
};
584
};
585
585
586
namespace internal {
586
namespace internal {
587
  
587
  
588
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
588
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
589
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar>, Sparse2Sparse>
589
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
590
{
590
{
591
  typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
591
  typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
592
  typedef typename DstXprType::StorageIndex DstIndex;
592
  typedef typename DstXprType::StorageIndex DstIndex;
593
  template<int Options>
593
  template<int Options>
594
  static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
594
  static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
595
  {
595
  {
596
    // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
596
    // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
597
    SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
597
    SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
598
    internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
598
    internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
599
    dst = tmp;
599
    dst = tmp;
600
  }
600
  }
601
  
601
  
602
  template<typename DestType,unsigned int DestMode>
602
  template<typename DestType,unsigned int DestMode>
603
  static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
603
  static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
604
  {
604
  {
605
    internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
605
    internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
606
  }
606
  }
607
};
607
};
608
608
609
} // end namespace internal
609
} // end namespace internal
610
610
611
} // end namespace Eigen
611
} // end namespace Eigen
(-)a/Eigen/src/SparseQR/SparseQR.h (-4 / +4 lines)
Lines 700-737 template<typename SparseQRType> Link Here
700
struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >
700
struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >
701
{
701
{
702
  typedef typename SparseQRType::MatrixType MatrixType;
702
  typedef typename SparseQRType::MatrixType MatrixType;
703
  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
703
  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
704
  typedef SparseShape Shape;
704
  typedef SparseShape Shape;
705
};
705
};
706
706
707
template< typename DstXprType, typename SparseQRType>
707
template< typename DstXprType, typename SparseQRType>
708
struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Sparse>
708
struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Sparse>
709
{
709
{
710
  typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
710
  typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
711
  typedef typename DstXprType::Scalar Scalar;
711
  typedef typename DstXprType::Scalar Scalar;
712
  typedef typename DstXprType::StorageIndex StorageIndex;
712
  typedef typename DstXprType::StorageIndex StorageIndex;
713
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &/*func*/)
713
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
714
  {
714
  {
715
    typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());
715
    typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());
716
    idMat.setIdentity();
716
    idMat.setIdentity();
717
    // Sort the sparse householder reflectors if needed
717
    // Sort the sparse householder reflectors if needed
718
    const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();
718
    const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();
719
    dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false);
719
    dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false);
720
  }
720
  }
721
};
721
};
722
722
723
template< typename DstXprType, typename SparseQRType>
723
template< typename DstXprType, typename SparseQRType>
724
struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Dense>
724
struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Dense>
725
{
725
{
726
  typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
726
  typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
727
  typedef typename DstXprType::Scalar Scalar;
727
  typedef typename DstXprType::Scalar Scalar;
728
  typedef typename DstXprType::StorageIndex StorageIndex;
728
  typedef typename DstXprType::StorageIndex StorageIndex;
729
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &/*func*/)
729
  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
730
  {
730
  {
731
    dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());
731
    dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());
732
  }
732
  }
733
};
733
};
734
734
735
} // end namespace internal
735
} // end namespace internal
736
736
737
} // end namespace Eigen
737
} // end namespace Eigen
(-)a/Eigen/src/plugins/ArrayCwiseBinaryOps.h (-2 / +2 lines)
Lines 1-18 Link Here
1
/** \returns an expression of the coefficient wise product of \c *this and \a other
1
/** \returns an expression of the coefficient wise product of \c *this and \a other
2
  *
2
  *
3
  * \sa MatrixBase::cwiseProduct
3
  * \sa MatrixBase::cwiseProduct
4
  */
4
  */
5
template<typename OtherDerived>
5
template<typename OtherDerived>
6
EIGEN_DEVICE_FUNC
6
EIGEN_DEVICE_FUNC
7
EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)
7
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)
8
operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
8
operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
9
{
9
{
10
  return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived());
10
  return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived());
11
}
11
}
12
12
13
/** \returns an expression of the coefficient wise quotient of \c *this and \a other
13
/** \returns an expression of the coefficient wise quotient of \c *this and \a other
14
  *
14
  *
15
  * \sa MatrixBase::cwiseQuotient
15
  * \sa MatrixBase::cwiseQuotient
16
  */
16
  */
17
template<typename OtherDerived>
17
template<typename OtherDerived>
18
EIGEN_DEVICE_FUNC
18
EIGEN_DEVICE_FUNC
(-)a/Eigen/src/plugins/CommonCwiseBinaryOps.h (-3 / +15 lines)
Lines 1-35 Link Here
1
// This file is part of Eigen, a lightweight C++ template library
1
// This file is part of Eigen, a lightweight C++ template library
2
// for linear algebra.
2
// for linear algebra.
3
//
3
//
4
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
4
// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
5
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
//
6
//
7
// This Source Code Form is subject to the terms of the Mozilla
7
// This Source Code Form is subject to the terms of the Mozilla
8
// Public License v. 2.0. If a copy of the MPL was not distributed
8
// Public License v. 2.0. If a copy of the MPL was not distributed
9
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
10
11
// This file is a base class plugin containing common coefficient wise functions.
11
// This file is a base class plugin containing common coefficient wise functions.
12
12
13
/** \returns an expression of the difference of \c *this and \a other
13
/** \returns an expression of the difference of \c *this and \a other
14
  *
14
  *
15
  * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
15
  * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
16
  *
16
  *
17
  * \sa class CwiseBinaryOp, operator-=()
17
  * \sa class CwiseBinaryOp, operator-=()
18
  */
18
  */
19
EIGEN_MAKE_CWISE_BINARY_OP(operator-,internal::scalar_difference_op)
19
template<typename OtherDerived>
20
EIGEN_DEVICE_FUNC
21
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,difference)
22
operator-(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
23
{
24
  return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,difference)(derived(), other.derived());
25
}
20
26
21
/** \returns an expression of the sum of \c *this and \a other
27
/** \returns an expression of the sum of \c *this and \a other
22
  *
28
  *
23
  * \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
29
  * \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
24
  *
30
  *
25
  * \sa class CwiseBinaryOp, operator+=()
31
  * \sa class CwiseBinaryOp, operator+=()
26
  */
32
  */
27
EIGEN_MAKE_CWISE_BINARY_OP(operator+,internal::scalar_sum_op)
33
template<typename OtherDerived>
34
EIGEN_DEVICE_FUNC
35
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,sum)
36
operator+(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
37
{
38
  return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,sum)(derived(), other.derived());
39
}
28
40
29
/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
41
/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
30
  *
42
  *
31
  * The template parameter \a CustomBinaryOp is the type of the functor
43
  * The template parameter \a CustomBinaryOp is the type of the functor
32
  * of the custom operator (see class CwiseBinaryOp for an example)
44
  * of the custom operator (see class CwiseBinaryOp for an example)
33
  *
45
  *
34
  * Here is an example illustrating the use of custom functors:
46
  * Here is an example illustrating the use of custom functors:
35
  * \include class_CwiseBinaryOp.cpp
47
  * \include class_CwiseBinaryOp.cpp
(-)a/Eigen/src/plugins/CommonCwiseUnaryOps.h (-3 / +3 lines)
Lines 68-84 inline const ScalarQuotient1ReturnType Link Here
68
operator/(const Scalar& scalar) const
68
operator/(const Scalar& scalar) const
69
{
69
{
70
  return ScalarQuotient1ReturnType(derived(), internal::scalar_quotient1_op<Scalar>(scalar));
70
  return ScalarQuotient1ReturnType(derived(), internal::scalar_quotient1_op<Scalar>(scalar));
71
}
71
}
72
72
73
/** Overloaded for efficiently multipling with compatible scalar types */
73
/** Overloaded for efficiently multipling with compatible scalar types */
74
template <typename T>
74
template <typename T>
75
EIGEN_DEVICE_FUNC inline
75
EIGEN_DEVICE_FUNC inline
76
typename internal::enable_if<internal::scalar_product_traits<T,Scalar>::Defined,
76
typename internal::enable_if<ScalarBinaryOpTraits<T,Scalar>::Defined,
77
                             const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived> >::type
77
                             const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived> >::type
78
operator*(const T& scalar) const
78
operator*(const T& scalar) const
79
{
79
{
80
#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
80
#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
81
  EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
81
  EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
82
#endif
82
#endif
83
  return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived>(
83
  return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived>(
84
            derived(), internal::scalar_multiple2_op<Scalar,T>(scalar) );
84
            derived(), internal::scalar_multiple2_op<Scalar,T>(scalar) );
Lines 86-115 operator*(const T& scalar) const Link Here
86
86
87
EIGEN_DEVICE_FUNC
87
EIGEN_DEVICE_FUNC
88
inline friend const ScalarMultipleReturnType
88
inline friend const ScalarMultipleReturnType
89
operator*(const Scalar& scalar, const StorageBaseType& matrix)
89
operator*(const Scalar& scalar, const StorageBaseType& matrix)
90
{ return matrix*scalar; }
90
{ return matrix*scalar; }
91
91
92
template <typename T>
92
template <typename T>
93
EIGEN_DEVICE_FUNC inline friend
93
EIGEN_DEVICE_FUNC inline friend
94
typename internal::enable_if<internal::scalar_product_traits<Scalar,T>::Defined,
94
typename internal::enable_if<ScalarBinaryOpTraits<Scalar,T>::Defined,
95
                             const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived> >::type
95
                             const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived> >::type
96
operator*(const T& scalar, const StorageBaseType& matrix)
96
operator*(const T& scalar, const StorageBaseType& matrix)
97
{
97
{
98
#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
98
#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
99
  EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
99
  EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
100
#endif
100
#endif
101
  return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived>(
101
  return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived>(
102
            matrix.derived(), internal::scalar_multiple2_op<Scalar,T>(scalar) );
102
            matrix.derived(), internal::scalar_multiple2_op<Scalar,T>(scalar) );
103
}
103
}
104
104
105
template <typename T>
105
template <typename T>
106
EIGEN_DEVICE_FUNC inline
106
EIGEN_DEVICE_FUNC inline
107
typename internal::enable_if<internal::scalar_product_traits<Scalar,T>::Defined,
107
typename internal::enable_if<ScalarBinaryOpTraits<Scalar,T>::Defined,
108
                             const CwiseUnaryOp<internal::scalar_quotient2_op<Scalar,T>, const Derived> >::type
108
                             const CwiseUnaryOp<internal::scalar_quotient2_op<Scalar,T>, const Derived> >::type
109
operator/(const T& scalar) const
109
operator/(const T& scalar) const
110
{
110
{
111
#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
111
#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
112
  EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
112
  EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
113
#endif
113
#endif
114
  return CwiseUnaryOp<internal::scalar_quotient2_op<Scalar,T>, const Derived>(
114
  return CwiseUnaryOp<internal::scalar_quotient2_op<Scalar,T>, const Derived>(
115
            derived(), internal::scalar_quotient2_op<Scalar,T>(scalar) );
115
            derived(), internal::scalar_quotient2_op<Scalar,T>(scalar) );
(-)a/Eigen/src/plugins/MatrixCwiseBinaryOps.h (-2 / +2 lines)
Lines 14-33 Link Here
14
  *
14
  *
15
  * Example: \include MatrixBase_cwiseProduct.cpp
15
  * Example: \include MatrixBase_cwiseProduct.cpp
16
  * Output: \verbinclude MatrixBase_cwiseProduct.out
16
  * Output: \verbinclude MatrixBase_cwiseProduct.out
17
  *
17
  *
18
  * \sa class CwiseBinaryOp, cwiseAbs2
18
  * \sa class CwiseBinaryOp, cwiseAbs2
19
  */
19
  */
20
template<typename OtherDerived>
20
template<typename OtherDerived>
21
EIGEN_DEVICE_FUNC
21
EIGEN_DEVICE_FUNC
22
EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)
22
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)
23
cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
23
cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
24
{
24
{
25
  return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived());
25
  return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived());
26
}
26
}
27
27
28
/** \returns an expression of the coefficient-wise == operator of *this and \a other
28
/** \returns an expression of the coefficient-wise == operator of *this and \a other
29
  *
29
  *
30
  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
30
  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
31
  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
31
  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
32
  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
32
  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
33
  * isMuchSmallerThan().
33
  * isMuchSmallerThan().
(-)a/blas/PackedTriangularMatrixVector.h (-2 / +2 lines)
Lines 13-29 Link Here
13
namespace internal {
13
namespace internal {
14
14
15
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>
15
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>
16
struct packed_triangular_matrix_vector_product;
16
struct packed_triangular_matrix_vector_product;
17
17
18
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs>
18
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs>
19
struct packed_triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor>
19
struct packed_triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor>
20
{
20
{
21
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
21
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
22
  enum {
22
  enum {
23
    IsLower     = (Mode & Lower)   ==Lower,
23
    IsLower     = (Mode & Lower)   ==Lower,
24
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
24
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
25
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
25
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
26
  };
26
  };
27
  static void run(Index size, const LhsScalar* lhs, const RhsScalar* rhs, ResScalar* res, ResScalar alpha)
27
  static void run(Index size, const LhsScalar* lhs, const RhsScalar* rhs, ResScalar* res, ResScalar alpha)
28
  {
28
  {
29
    internal::conj_if<ConjRhs> cj;
29
    internal::conj_if<ConjRhs> cj;
Lines 42-58 struct packed_triangular_matrix_vector_p Link Here
42
      lhs += IsLower ? size-i: i+1;
42
      lhs += IsLower ? size-i: i+1;
43
    }
43
    }
44
  };
44
  };
45
};
45
};
46
46
47
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs>
47
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs>
48
struct packed_triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor>
48
struct packed_triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor>
49
{
49
{
50
  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
50
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
51
  enum {
51
  enum {
52
    IsLower     = (Mode & Lower)   ==Lower,
52
    IsLower     = (Mode & Lower)   ==Lower,
53
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
53
    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
54
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
54
    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
55
  };
55
  };
56
  static void run(Index size, const LhsScalar* lhs, const RhsScalar* rhs, ResScalar* res, ResScalar alpha)
56
  static void run(Index size, const LhsScalar* lhs, const RhsScalar* rhs, ResScalar* res, ResScalar alpha)
57
  {
57
  {
58
    internal::conj_if<ConjRhs> cj;
58
    internal::conj_if<ConjRhs> cj;
(-)a/test/array.cpp (-1 / +1 lines)
Lines 67-83 template<typename ArrayType> void array( Link Here
67
  // reductions
67
  // reductions
68
  VERIFY_IS_APPROX(m1.abs().colwise().sum().sum(), m1.abs().sum());
68
  VERIFY_IS_APPROX(m1.abs().colwise().sum().sum(), m1.abs().sum());
69
  VERIFY_IS_APPROX(m1.abs().rowwise().sum().sum(), m1.abs().sum());
69
  VERIFY_IS_APPROX(m1.abs().rowwise().sum().sum(), m1.abs().sum());
70
  using std::abs;
70
  using std::abs;
71
  VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.colwise().sum().sum() - m1.sum()), m1.abs().sum());
71
  VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.colwise().sum().sum() - m1.sum()), m1.abs().sum());
72
  VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum());
72
  VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum());
73
  if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision<Scalar>()))
73
  if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision<Scalar>()))
74
      VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum());
74
      VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum());
75
  VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar>()));
75
  VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>()));
76
76
77
  // vector-wise ops
77
  // vector-wise ops
78
  m3 = m1;
78
  m3 = m1;
79
  VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1);
79
  VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1);
80
  m3 = m1;
80
  m3 = m1;
81
  VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1);
81
  VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1);
82
  m3 = m1;
82
  m3 = m1;
83
  VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1);
83
  VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1);
(-)a/test/mixingtypes.cpp (-8 / +24 lines)
Lines 37-76 template<int SizeAtCompileType> void mix Link Here
37
  typedef Matrix<std::complex<double>, SizeAtCompileType, SizeAtCompileType> Mat_cd;
37
  typedef Matrix<std::complex<double>, SizeAtCompileType, SizeAtCompileType> Mat_cd;
38
  typedef Matrix<float, SizeAtCompileType, 1> Vec_f;
38
  typedef Matrix<float, SizeAtCompileType, 1> Vec_f;
39
  typedef Matrix<double, SizeAtCompileType, 1> Vec_d;
39
  typedef Matrix<double, SizeAtCompileType, 1> Vec_d;
40
  typedef Matrix<std::complex<float>, SizeAtCompileType, 1> Vec_cf;
40
  typedef Matrix<std::complex<float>, SizeAtCompileType, 1> Vec_cf;
41
  typedef Matrix<std::complex<double>, SizeAtCompileType, 1> Vec_cd;
41
  typedef Matrix<std::complex<double>, SizeAtCompileType, 1> Vec_cd;
42
42
43
  Mat_f mf    = Mat_f::Random(size,size);
43
  Mat_f mf    = Mat_f::Random(size,size);
44
  Mat_d md    = mf.template cast<double>();
44
  Mat_d md    = mf.template cast<double>();
45
  //Mat_d rd    = md;
45
  Mat_cf mcf  = Mat_cf::Random(size,size);
46
  Mat_cf mcf  = Mat_cf::Random(size,size);
46
  Mat_cd mcd  = mcf.template cast<complex<double> >();
47
  Mat_cd mcd  = mcf.template cast<complex<double> >();
47
  Mat_cd rcd = mcd;
48
  Mat_cd rcd = mcd;
48
  Vec_f vf    = Vec_f::Random(size,1);
49
  Vec_f vf    = Vec_f::Random(size,1);
49
  Vec_d vd    = vf.template cast<double>();
50
  Vec_d vd    = vf.template cast<double>();
50
  Vec_cf vcf  = Vec_cf::Random(size,1);
51
  Vec_cf vcf  = Vec_cf::Random(size,1);
51
  Vec_cd vcd  = vcf.template cast<complex<double> >();
52
  Vec_cd vcd  = vcf.template cast<complex<double> >();
52
  float           sf  = internal::random<float>();
53
  float           sf  = internal::random<float>();
53
  double          sd  = internal::random<double>();
54
  double          sd  = internal::random<double>();
54
  complex<float>  scf = internal::random<complex<float> >();
55
  complex<float>  scf = internal::random<complex<float> >();
55
  complex<double> scd = internal::random<complex<double> >();
56
  complex<double> scd = internal::random<complex<double> >();
56
57
57
58
58
  mf+mf;
59
  mf+mf;
59
  VERIFY_RAISES_ASSERT(mf+md);
60
60
#if !EIGEN_HAS_STD_RESULT_OF
61
//   VERIFY_RAISES_ASSERT(mf+md); // does not even compile
61
  // this one does not even compile with C++11
62
  VERIFY_RAISES_ASSERT(mf+mcf);
63
#endif
64
62
65
#ifdef EIGEN_DONT_VECTORIZE
63
#ifdef EIGEN_DONT_VECTORIZE
66
  VERIFY_RAISES_ASSERT(vf=vd);
64
  VERIFY_RAISES_ASSERT(vf=vd);
67
  VERIFY_RAISES_ASSERT(vf+=vd);
65
  VERIFY_RAISES_ASSERT(vf+=vd);
68
  VERIFY_RAISES_ASSERT(mcd=md);
69
#endif
66
#endif
70
  
67
  
71
  // check scalar products
68
  // check scalar products
72
  VERIFY_IS_APPROX(vcf * sf , vcf * complex<float>(sf));
69
  VERIFY_IS_APPROX(vcf * sf , vcf * complex<float>(sf));
73
  VERIFY_IS_APPROX(sd * vcd, complex<double>(sd) * vcd);
70
  VERIFY_IS_APPROX(sd * vcd, complex<double>(sd) * vcd);
74
  VERIFY_IS_APPROX(vf * scf , vf.template cast<complex<float> >() * scf);
71
  VERIFY_IS_APPROX(vf * scf , vf.template cast<complex<float> >() * scf);
75
  VERIFY_IS_APPROX(scd * vd, scd * vd.template cast<complex<double> >());
72
  VERIFY_IS_APPROX(scd * vd, scd * vd.template cast<complex<double> >());
76
73
Lines 181-206 template<int SizeAtCompileType> void mix Link Here
181
  VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = sd * md * mcd),
178
  VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = sd * md * mcd),
182
                   Mat_cd((sd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>()));
179
                   Mat_cd((sd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>()));
183
  VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * mcd * md),
180
  VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * mcd * md),
184
                   Mat_cd((scd * mcd * md.template cast<CD>().eval()).template triangularView<Upper>()));
181
                   Mat_cd((scd * mcd * md.template cast<CD>().eval()).template triangularView<Upper>()));
185
  VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * md * mcd),
182
  VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * md * mcd),
186
                   Mat_cd((scd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>()));
183
                   Mat_cd((scd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>()));
187
184
188
185
189
  VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() );
186
190
  VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast<CD>().eval().array() );
187
  VERIFY_IS_APPROX( md.array()  * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() );
188
  VERIFY_IS_APPROX( mcd.array() * md.array(),  mcd.array() * md.template cast<CD>().eval().array() );
189
190
  VERIFY_IS_APPROX( md.array()  + mcd.array(), md.template cast<CD>().eval().array() + mcd.array() );
191
  VERIFY_IS_APPROX( mcd.array() + md.array(),  mcd.array() + md.template cast<CD>().eval().array() );
192
193
  VERIFY_IS_APPROX( md.array()  - mcd.array(), md.template cast<CD>().eval().array() - mcd.array() );
194
  VERIFY_IS_APPROX( mcd.array() - md.array(),  mcd.array() - md.template cast<CD>().eval().array() );
191
195
192
//   VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast<CD>().eval().array() / mcd.array() );
196
//   VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast<CD>().eval().array() / mcd.array() );
193
  VERIFY_IS_APPROX( mcd.array() / md.array(), mcd.array() / md.template cast<CD>().eval().array() );
197
  VERIFY_IS_APPROX( mcd.array() / md.array(), mcd.array() / md.template cast<CD>().eval().array() );
194
198
195
  rcd = mcd;
199
  rcd = mcd;
200
  VERIFY_IS_APPROX( rcd = md, md.template cast<CD>().eval() );
201
  rcd = mcd;
202
  VERIFY_IS_APPROX( rcd += md, mcd + md.template cast<CD>().eval() );
203
  rcd = mcd;
204
  VERIFY_IS_APPROX( rcd -= md, mcd - md.template cast<CD>().eval() );
205
  rcd = mcd;
196
  VERIFY_IS_APPROX( rcd.array() *= md.array(), mcd.array() * md.template cast<CD>().eval().array() );
206
  VERIFY_IS_APPROX( rcd.array() *= md.array(), mcd.array() * md.template cast<CD>().eval().array() );
197
  rcd = mcd;
207
  rcd = mcd;
198
  VERIFY_IS_APPROX( rcd.array() /= md.array(), mcd.array() / md.template cast<CD>().eval().array() );
208
  VERIFY_IS_APPROX( rcd.array() /= md.array(), mcd.array() / md.template cast<CD>().eval().array() );
209
210
  rcd = mcd;
211
  VERIFY_IS_APPROX( rcd += md + mcd*md, mcd + (md.template cast<CD>().eval()) + mcd*(md.template cast<CD>().eval()));
212
213
  rcd = mcd;
214
  VERIFY_IS_APPROX( rcd += mcd + md*md, mcd + mcd + ((md*md).template cast<CD>().eval()) );
199
}
215
}
200
216
201
void test_mixingtypes()
217
void test_mixingtypes()
202
{
218
{
203
  for(int i = 0; i < g_repeat; i++) {
219
  for(int i = 0; i < g_repeat; i++) {
204
    CALL_SUBTEST_1(mixingtypes<3>());
220
    CALL_SUBTEST_1(mixingtypes<3>());
205
    CALL_SUBTEST_2(mixingtypes<4>());
221
    CALL_SUBTEST_2(mixingtypes<4>());
206
    CALL_SUBTEST_3(mixingtypes<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)));
222
    CALL_SUBTEST_3(mixingtypes<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)));
(-)a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h (-6 / +6 lines)
Lines 496-541 struct make_coherent_impl<Matrix<A_Scala Link Here
496
    else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0))
496
    else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0))
497
    {
497
    {
498
      b.resize(a.size());
498
      b.resize(a.size());
499
      b.setZero();
499
      b.setZero();
500
    }
500
    }
501
  }
501
  }
502
};
502
};
503
503
504
} // end namespace internal
505
504
template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
506
template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
505
struct scalar_product_traits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar>
507
struct ScalarBinaryOpTraits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar>
506
{
508
{
507
  enum { Defined = 1 };
509
  enum { Defined = 1 };
508
  typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
510
  typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
509
};
511
};
510
512
511
template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
513
template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
512
struct scalar_product_traits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> >
514
struct ScalarBinaryOpTraits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> >
513
{
515
{
514
  enum { Defined = 1 };
516
  enum { Defined = 1 };
515
  typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
517
  typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
516
};
518
};
517
519
518
template<typename DerType>
520
template<typename DerType>
519
struct scalar_product_traits<AutoDiffScalar<DerType>,typename DerType::Scalar>
521
struct ScalarBinaryOpTraits<AutoDiffScalar<DerType>,typename DerType::Scalar>
520
{
522
{
521
  enum { Defined = 1 };
523
  enum { Defined = 1 };
522
  typedef AutoDiffScalar<DerType> ReturnType;
524
  typedef AutoDiffScalar<DerType> ReturnType;
523
};
525
};
524
526
525
template<typename DerType>
527
template<typename DerType>
526
struct scalar_product_traits<typename DerType::Scalar,AutoDiffScalar<DerType> >
528
struct ScalarBinaryOpTraits<typename DerType::Scalar,AutoDiffScalar<DerType> >
527
{
529
{
528
  enum { Defined = 1 };
530
  enum { Defined = 1 };
529
  typedef AutoDiffScalar<DerType> ReturnType;
531
  typedef AutoDiffScalar<DerType> ReturnType;
530
};
532
};
531
533
532
} // end namespace internal
533
534
#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \
534
#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \
535
  template<typename DerType> \
535
  template<typename DerType> \
536
  inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > \
536
  inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > \
537
  FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
537
  FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
538
    using namespace Eigen; \
538
    using namespace Eigen; \
539
    typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
539
    typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
540
    typedef AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > ReturnType; \
540
    typedef AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > ReturnType; \
541
    CODE; \
541
    CODE; \
(-)a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h (-2 / +2 lines)
Lines 198-214 void KroneckerProductSparse<Lhs,Rhs>::ev Link Here
198
198
199
namespace internal {
199
namespace internal {
200
200
201
template<typename _Lhs, typename _Rhs>
201
template<typename _Lhs, typename _Rhs>
202
struct traits<KroneckerProduct<_Lhs,_Rhs> >
202
struct traits<KroneckerProduct<_Lhs,_Rhs> >
203
{
203
{
204
  typedef typename remove_all<_Lhs>::type Lhs;
204
  typedef typename remove_all<_Lhs>::type Lhs;
205
  typedef typename remove_all<_Rhs>::type Rhs;
205
  typedef typename remove_all<_Rhs>::type Rhs;
206
  typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
206
  typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
207
  typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
207
  typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
208
208
209
  enum {
209
  enum {
210
    Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
210
    Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
211
    Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
211
    Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
212
    MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
212
    MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
213
    MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret
213
    MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret
214
  };
214
  };
Lines 217-233 struct traits<KroneckerProduct<_Lhs,_Rhs Link Here
217
};
217
};
218
218
219
template<typename _Lhs, typename _Rhs>
219
template<typename _Lhs, typename _Rhs>
220
struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
220
struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
221
{
221
{
222
  typedef MatrixXpr XprKind;
222
  typedef MatrixXpr XprKind;
223
  typedef typename remove_all<_Lhs>::type Lhs;
223
  typedef typename remove_all<_Lhs>::type Lhs;
224
  typedef typename remove_all<_Rhs>::type Rhs;
224
  typedef typename remove_all<_Rhs>::type Rhs;
225
  typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
225
  typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
226
  typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind;
226
  typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind;
227
  typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
227
  typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
228
228
229
  enum {
229
  enum {
230
    LhsFlags = Lhs::Flags,
230
    LhsFlags = Lhs::Flags,
231
    RhsFlags = Rhs::Flags,
231
    RhsFlags = Rhs::Flags,
232
232
233
    RowsAtCompileTime = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
233
    RowsAtCompileTime = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,

Return to bug 279