Bugzilla – Attachment 713 Details for
Bug 279
Be more flexible regarding mixing types
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
Log In
[x]
|
Forgot Password
Login:
[x]
This bugzilla service is closed. All entries have been migrated to
https://gitlab.com/libeigen/eigen
[patch]
Work in progress patch
relax_binary_op.diff (text/plain), 140.22 KB, created by
Gael Guennebaud
on 2016-06-03 16:40:37 UTC
(
hide
)
Description:
Work in progress patch
Filename:
MIME Type:
Creator:
Gael Guennebaud
Created:
2016-06-03 16:40:37 UTC
Size:
140.22 KB
patch
obsolete
>diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h >--- a/Eigen/src/Core/ArrayBase.h >+++ b/Eigen/src/Core/ArrayBase.h >@@ -171,30 +171,30 @@ template<typename Derived> class ArrayBa > * > * \returns a reference to \c *this > */ > template<typename Derived> > template<typename OtherDerived> > EIGEN_STRONG_INLINE Derived & > ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other) > { >- call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar>()); >+ call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > /** replaces \c *this by \c *this + \a other. > * > * \returns a reference to \c *this > */ > template<typename Derived> > template<typename OtherDerived> > EIGEN_STRONG_INLINE Derived & > ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other) > { >- call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar>()); >+ call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > /** replaces \c *this by \c *this * \a other coefficient wise. > * > * \returns a reference to \c *this > */ > template<typename Derived> >diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h >--- a/Eigen/src/Core/AssignEvaluator.h >+++ b/Eigen/src/Core/AssignEvaluator.h >@@ -682,17 +682,17 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE vo > Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); > > dense_assignment_loop<Kernel>::run(kernel); > } > > template<typename DstXprType, typename SrcXprType> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(const DstXprType& dst, const SrcXprType& src) > { >- call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar>()); >+ call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>()); > } > > /*************************************************************************** > * Part 6 : Generic assignment > ***************************************************************************/ > > // Based on the respective shapes of the destination and source, > // the class AssignmentKind determine the kind of assignment mechanism. >@@ -717,23 +717,23 @@ struct Assignment; > // Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite complicated. > // So this intermediate function removes everything related to "assume-aliasing" such that Assignment > // does not has to bother about these annoying details. > > template<typename Dst, typename Src> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_assignment(Dst& dst, const Src& src) > { >- call_assignment(dst, src, internal::assign_op<typename Dst::Scalar>()); >+ call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); > } > template<typename Dst, typename Src> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_assignment(const Dst& dst, const Src& src) > { >- call_assignment(dst, src, internal::assign_op<typename Dst::Scalar>()); >+ call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); > } > > // Deal with "assume-aliasing" > template<typename Dst, typename Src, typename Func> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing<Src>::value, void*>::type = 0) > { > typename plain_matrix_type<Src>::type tmp(src); >@@ -782,17 +782,17 @@ void call_assignment_no_alias(Dst& dst, > EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar); > > Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func); > } > template<typename Dst, typename Src> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_assignment_no_alias(Dst& dst, const Src& src) > { >- call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar>()); >+ call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); > } > > template<typename Dst, typename Src, typename Func> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func) > { > Index dstRows = src.rows(); > Index dstCols = src.cols(); >@@ -804,17 +804,17 @@ void call_assignment_no_alias_no_transpo > EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src) > > Assignment<Dst,Src,Func>::run(dst, src, func); > } > template<typename Dst, typename Src> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src) > { >- call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar>()); >+ call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); > } > > // forward declaration > template<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, const Src &src); > > // Generic Dense to Dense assignment > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Scalar> >@@ -833,17 +833,17 @@ struct Assignment<DstXprType, SrcXprType > }; > > // Generic assignment through evalTo. > // TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism. > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Scalar> > { > EIGEN_DEVICE_FUNC >- static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); > src.evalTo(dst); > } > }; > > } // namespace internal > >diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h >--- a/Eigen/src/Core/CwiseBinaryOp.h >+++ b/Eigen/src/Core/CwiseBinaryOp.h >@@ -155,29 +155,29 @@ public: > * > * \returns a reference to \c *this > */ > template<typename Derived> > template<typename OtherDerived> > EIGEN_STRONG_INLINE Derived & > MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other) > { >- call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar>()); >+ call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > /** replaces \c *this by \c *this + \a other. > * > * \returns a reference to \c *this > */ > template<typename Derived> > template<typename OtherDerived> > EIGEN_STRONG_INLINE Derived & > MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other) > { >- call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar>()); >+ call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > } // end namespace Eigen > > #endif // EIGEN_CWISE_BINARY_OP_H > >diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h >--- a/Eigen/src/Core/DiagonalMatrix.h >+++ b/Eigen/src/Core/DiagonalMatrix.h >@@ -315,26 +315,26 @@ template<> struct storage_kind_to_shape< > struct Diagonal2Dense {}; > > template<> struct AssignmentKind<DenseShape,DiagonalShape> { typedef Diagonal2Dense Kind; }; > > // Diagonal matrix to Dense assignment > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense, Scalar> > { >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > dst.setZero(); > dst.diagonal() = src.diagonal(); > } > >- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { dst.diagonal() += src.diagonal(); } > >- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { dst.diagonal() -= src.diagonal(); } > }; > > } // namespace internal > > } // end namespace Eigen > > #endif // EIGEN_DIAGONALMATRIX_H >diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h >--- a/Eigen/src/Core/Dot.h >+++ b/Eigen/src/Core/Dot.h >@@ -23,32 +23,34 @@ template<typename T, typename U, > && U::IsVectorAtCompileTime > && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1) > | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&". > // revert to || as soon as not needed anymore. > (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1)) > > > struct dot_nocheck > { >- typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar; >+ typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod; >+ typedef typename conj_prod::result_type ResScalar; > EIGEN_DEVICE_FUNC > static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) > { >- return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum(); >+ return a.template binaryExpr<conj_prod>(b).sum(); > } > }; > > template<typename T, typename U> > struct dot_nocheck<T, U, true> > { >- typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar; >+ typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod; >+ typedef typename conj_prod::result_type ResScalar; > EIGEN_DEVICE_FUNC > static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) > { >- return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum(); >+ return a.transpose().template binaryExpr<conj_prod>(b).sum(); > } > }; > > } // end namespace internal > > /** \returns the dot product of *this with other. > * > * \only_for_vectors >@@ -57,17 +59,17 @@ struct dot_nocheck<T, U, true> > * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the > * second variable. > * > * \sa squaredNorm(), norm() > */ > template<typename Derived> > template<typename OtherDerived> > EIGEN_DEVICE_FUNC >-typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType >+typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType > MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const > { > EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) > EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) > EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) > typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func; > EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); > >diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h >--- a/Eigen/src/Core/EigenBase.h >+++ b/Eigen/src/Core/EigenBase.h >@@ -133,23 +133,23 @@ Derived& DenseBase<Derived>::operator=(c > call_assignment(derived(), other.derived()); > return derived(); > } > > template<typename Derived> > template<typename OtherDerived> > Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other) > { >- call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar>()); >+ call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > template<typename Derived> > template<typename OtherDerived> > Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other) > { >- call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar>()); >+ call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > } // end namespace Eigen > > #endif // EIGEN_EIGENBASE_H >diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h >--- a/Eigen/src/Core/MatrixBase.h >+++ b/Eigen/src/Core/MatrixBase.h >@@ -188,17 +188,17 @@ template<typename Derived> class MatrixB > > template<typename DiagonalDerived> > EIGEN_DEVICE_FUNC > const Product<Derived, DiagonalDerived, LazyProduct> > operator*(const DiagonalBase<DiagonalDerived> &diagonal) const; > > template<typename OtherDerived> > EIGEN_DEVICE_FUNC >- typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType >+ typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType > dot(const MatrixBase<OtherDerived>& other) const; > > EIGEN_DEVICE_FUNC RealScalar squaredNorm() const; > EIGEN_DEVICE_FUNC RealScalar norm() const; > RealScalar stableNorm() const; > RealScalar blueNorm() const; > RealScalar hypotNorm() const; > EIGEN_DEVICE_FUNC const PlainObject normalized() const; >@@ -376,17 +376,17 @@ template<typename Derived> class MatrixB > inline JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const; > inline BDCSVD<PlainObject> bdcSvd(unsigned int computationOptions = 0) const; > > /////////// Geometry module /////////// > > #ifndef EIGEN_PARSED_BY_DOXYGEN > /// \internal helper struct to form the return type of the cross product > template<typename OtherDerived> struct cross_product_return_type { >- typedef typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar; >+ typedef typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar; > typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type; > }; > #endif // EIGEN_PARSED_BY_DOXYGEN > template<typename OtherDerived> > EIGEN_DEVICE_FUNC > #ifndef EIGEN_PARSED_BY_DOXYGEN > inline typename cross_product_return_type<OtherDerived>::type > #else >diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h >--- a/Eigen/src/Core/NoAlias.h >+++ b/Eigen/src/Core/NoAlias.h >@@ -34,33 +34,33 @@ class NoAlias > typedef typename ExpressionType::Scalar Scalar; > > explicit NoAlias(ExpressionType& expression) : m_expression(expression) {} > > template<typename OtherDerived> > EIGEN_DEVICE_FUNC > EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other) > { >- call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar>()); >+ call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); > return m_expression; > } > > template<typename OtherDerived> > EIGEN_DEVICE_FUNC > EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other) > { >- call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar>()); >+ call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); > return m_expression; > } > > template<typename OtherDerived> > EIGEN_DEVICE_FUNC > EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other) > { >- call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar>()); >+ call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); > return m_expression; > } > > EIGEN_DEVICE_FUNC > ExpressionType& expression() const > { > return m_expression; > } >diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h >--- a/Eigen/src/Core/PlainObjectBase.h >+++ b/Eigen/src/Core/PlainObjectBase.h >@@ -713,17 +713,17 @@ class PlainObjectBase : public internal: > EIGEN_DEVICE_FUNC > EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other) > { > // I don't think we need this resize call since the lazyAssign will anyways resize > // and lazyAssign will be called by the assign selector. > //_resize_to_match(other); > // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because > // it wouldn't allow to copy a row-vector into a column-vector. >- internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op<Scalar>()); >+ internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); > return this->derived(); > } > > template<typename T0, typename T1> > EIGEN_DEVICE_FUNC > EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0) > { > EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) && >diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h >--- a/Eigen/src/Core/Product.h >+++ b/Eigen/src/Core/Product.h >@@ -13,21 +13,22 @@ > namespace Eigen { > > template<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl; > > namespace internal { > > // Determine the scalar of Product<Lhs, Rhs>. This is normally the same as Lhs::Scalar times > // Rhs::Scalar, but product with permutation matrices inherit the scalar of the other factor. >+// TODO: this could be removed once ScalarBinaryOpTraits handles void. > template<typename Lhs, typename Rhs, typename LhsShape = typename evaluator_traits<Lhs>::Shape, > typename RhsShape = typename evaluator_traits<Rhs>::Shape > > struct product_result_scalar > { >- typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar; >+ typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar; > }; > > template<typename Lhs, typename Rhs, typename RhsShape> > struct product_result_scalar<Lhs, Rhs, PermutationShape, RhsShape> > { > typedef typename Rhs::Scalar Scalar; > }; > >diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h >--- a/Eigen/src/Core/ProductEvaluators.h >+++ b/Eigen/src/Core/ProductEvaluators.h >@@ -119,50 +119,50 @@ struct product_evaluator<Product<Lhs, Rh > } > > protected: > PlainObject m_result; > }; > > // Dense = Product > template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> >-struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scalar>, Dense2Dense, >+struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,Options>::Scalar>, Dense2Dense, > typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type> > { > typedef Product<Lhs,Rhs,Options> SrcXprType; > static EIGEN_STRONG_INLINE >- void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &) > { > // FIXME shall we handle nested_eval here? > generic_product_impl<Lhs, Rhs>::evalTo(dst, src.lhs(), src.rhs()); > } > }; > > // Dense += Product > template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> >-struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<Scalar>, Dense2Dense, >+struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,Options>::Scalar>, Dense2Dense, > typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type> > { > typedef Product<Lhs,Rhs,Options> SrcXprType; > static EIGEN_STRONG_INLINE >- void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar> &) >+ void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &) > { > // FIXME shall we handle nested_eval here? > generic_product_impl<Lhs, Rhs>::addTo(dst, src.lhs(), src.rhs()); > } > }; > > // Dense -= Product > template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> >-struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<Scalar>, Dense2Dense, >+struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,Options>::Scalar>, Dense2Dense, > typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct),Scalar>::type> > { > typedef Product<Lhs,Rhs,Options> SrcXprType; > static EIGEN_STRONG_INLINE >- void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar> &) >+ void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &) > { > // FIXME shall we handle nested_eval here? > generic_product_impl<Lhs, Rhs>::subTo(dst, src.lhs(), src.rhs()); > } > }; > > > // Dense ?= scalar * Product >@@ -182,47 +182,48 @@ struct Assignment<DstXprType, CwiseUnary > }; > > //---------------------------------------- > // Catch "Dense ?= xpr + Product<>" expression to save one temporary > // FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct > // TODO enable it for "Dense ?= xpr - Product<>" as well. > > template<typename OtherXpr, typename Lhs, typename Rhs> >-struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar>, const OtherXpr, >+struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, const OtherXpr, > const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > { > static const bool value = true; > }; > >-template<typename DstXprType, typename OtherXpr, typename ProductType, typename Scalar, typename Func1, typename Func2> >+template<typename DstXprType, typename OtherXpr, typename ProductType, typename Func1, typename Func2> > struct assignment_from_xpr_plus_product > { >- typedef CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr, const ProductType> SrcXprType; >+ typedef CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar,typename ProductType::Scalar>, const OtherXpr, const ProductType> SrcXprType; >+ template<typename InitialFunc> > static EIGEN_STRONG_INLINE >- void run(DstXprType &dst, const SrcXprType &src, const Func1& func) >+ void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/) > { >- call_assignment_no_alias(dst, src.lhs(), func); >+ call_assignment_no_alias(dst, src.lhs(), Func1()); > call_assignment_no_alias(dst, src.rhs(), Func2()); > } > }; > >-template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename Scalar> >-struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr, >- const Product<Lhs,Rhs,DefaultProduct> >, internal::assign_op<Scalar>, Dense2Dense> >- : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, Scalar, internal::assign_op<Scalar>, internal::add_assign_op<Scalar> > >+template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> >+struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<OtherScalar,ProdScalar>, const OtherXpr, >+ const Product<Lhs,Rhs,DefaultProduct> >, internal::assign_op<DstScalar,SrcScalar>, Dense2Dense> >+ : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<DstScalar,OtherScalar>, internal::add_assign_op<DstScalar,ProdScalar> > > {}; >-template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename Scalar> >-struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr, >- const Product<Lhs,Rhs,DefaultProduct> >, internal::add_assign_op<Scalar>, Dense2Dense> >- : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, Scalar, internal::add_assign_op<Scalar>, internal::add_assign_op<Scalar> > >+template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> >+struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<OtherScalar,ProdScalar>, const OtherXpr, >+ const Product<Lhs,Rhs,DefaultProduct> >, internal::add_assign_op<DstScalar,SrcScalar>, Dense2Dense> >+ : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<DstScalar,OtherScalar>, internal::add_assign_op<DstScalar,ProdScalar> > > {}; >-template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename Scalar> >-struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const OtherXpr, >- const Product<Lhs,Rhs,DefaultProduct> >, internal::sub_assign_op<Scalar>, Dense2Dense> >- : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, Scalar, internal::sub_assign_op<Scalar>, internal::sub_assign_op<Scalar> > >+template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> >+struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_sum_op<OtherScalar,ProdScalar>, const OtherXpr, >+ const Product<Lhs,Rhs,DefaultProduct> >, internal::sub_assign_op<DstScalar,SrcScalar>, Dense2Dense> >+ : assignment_from_xpr_plus_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<DstScalar,OtherScalar>, internal::sub_assign_op<DstScalar,ProdScalar> > > {}; > //---------------------------------------- > > template<typename Lhs, typename Rhs> > struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct> > { > template<typename Dst> > static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) >@@ -364,31 +365,31 @@ struct generic_product_impl<Lhs,Rhs,Dens > { > typedef typename Product<Lhs,Rhs>::Scalar Scalar; > > template<typename Dst> > static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) > { > // Same as: dst.noalias() = lhs.lazyProduct(rhs); > // but easier on the compiler side >- call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<Scalar>()); >+ call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<typename Dst::Scalar,Scalar>()); > } > > template<typename Dst> > static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) > { > // dst.noalias() += lhs.lazyProduct(rhs); >- call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<Scalar>()); >+ call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>()); > } > > template<typename Dst> > static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) > { > // dst.noalias() -= lhs.lazyProduct(rhs); >- call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<Scalar>()); >+ call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>()); > } > > // template<typename Dst> > // static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) > // { dst.noalias() += alpha * lhs.lazyProduct(rhs); } > }; > > // This specialization enforces the use of a coefficient-based evaluation strategy >@@ -730,17 +731,17 @@ struct generic_product_impl<Lhs,Rhs,Dens > /*************************************************************************** > * Diagonal products > ***************************************************************************/ > > template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder> > struct diagonal_product_evaluator_base > : evaluator_base<Derived> > { >- typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar; >+ typedef typename ScalarBinaryOpTraits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar; > public: > enum { > CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost, > > MatrixFlags = evaluator<MatrixType>::Flags, > DiagFlags = evaluator<DiagonalType>::Flags, > _StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor, > _ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft) >diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h >--- a/Eigen/src/Core/Redux.h >+++ b/Eigen/src/Core/Redux.h >@@ -445,32 +445,32 @@ DenseBase<Derived>::maxCoeff() const > * \sa trace(), prod(), mean() > */ > template<typename Derived> > EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar > DenseBase<Derived>::sum() const > { > if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) > return Scalar(0); >- return derived().redux(Eigen::internal::scalar_sum_op<Scalar>()); >+ return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>()); > } > > /** \returns the mean of all coefficients of *this > * > * \sa trace(), prod(), sum() > */ > template<typename Derived> > EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar > DenseBase<Derived>::mean() const > { > #ifdef __INTEL_COMPILER > #pragma warning push > #pragma warning ( disable : 2259 ) > #endif >- return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar>())) / Scalar(this->size()); >+ return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->size()); > #ifdef __INTEL_COMPILER > #pragma warning pop > #endif > } > > /** \returns the product of all coefficients of *this > * > * Example: \include MatrixBase_prod.cpp >diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h >--- a/Eigen/src/Core/Ref.h >+++ b/Eigen/src/Core/Ref.h >@@ -257,17 +257,17 @@ template<typename TPlainObjectType, int > EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type) > { > Base::construct(expr); > } > > template<typename Expression> > EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) > { >- internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar>()); >+ internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar,Scalar>()); > Base::construct(m_object); > } > > protected: > TPlainObjectType m_object; > }; > > } // end namespace Eigen >diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h >--- a/Eigen/src/Core/SelfCwiseBinaryOp.h >+++ b/Eigen/src/Core/SelfCwiseBinaryOp.h >@@ -7,43 +7,45 @@ > // Public License v. 2.0. If a copy of the MPL was not distributed > // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. > > #ifndef EIGEN_SELFCWISEBINARYOP_H > #define EIGEN_SELFCWISEBINARYOP_H > > namespace Eigen { > >+// TODO generalize the scalar type of 'other' >+ > template<typename Derived> > EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other) > { > typedef typename Derived::PlainObject PlainObject; >- internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar>()); >+ internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>()); > return derived(); > } > > template<typename Derived> > EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other) > { > typedef typename Derived::PlainObject PlainObject; >- internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar>()); >+ internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>()); > return derived(); > } > > template<typename Derived> > EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other) > { > typedef typename Derived::PlainObject PlainObject; >- internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar>()); >+ internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>()); > return derived(); > } > > template<typename Derived> > EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other) > { > typedef typename Derived::PlainObject PlainObject; >- internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar>()); >+ internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>()); > return derived(); > } > > } // end namespace Eigen > > #endif // EIGEN_SELFCWISEBINARYOP_H >diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h >--- a/Eigen/src/Core/Solve.h >+++ b/Eigen/src/Core/Solve.h >@@ -129,43 +129,44 @@ struct evaluator<Solve<Decomposition,Rhs > > protected: > PlainObject m_result; > }; > > // Specialization for "dst = dec.solve(rhs)" > // NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere > template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> >-struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef Solve<DecType,RhsType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > // FIXME shall we resize dst here? > src.dec()._solve_impl(src.rhs(), dst); > } > }; > > // Specialization for "dst = dec.transpose().solve(rhs)" > template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> >-struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef Solve<Transpose<const DecType>,RhsType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst); > } > }; > > // Specialization for "dst = dec.adjoint().solve(rhs)" > template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> >-struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>, >+ internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst); > } > }; > > } // end namepsace internal > > } // end namespace Eigen >diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h >--- a/Eigen/src/Core/TriangularMatrix.h >+++ b/Eigen/src/Core/TriangularMatrix.h >@@ -362,24 +362,24 @@ template<typename _MatrixType, unsigned > * \sa DenseCoeffsBase::innerStride() */ > EIGEN_DEVICE_FUNC > inline Index innerStride() const { return derived().nestedExpression().innerStride(); } > > /** \sa MatrixBase::operator+=() */ > template<typename Other> > EIGEN_DEVICE_FUNC > TriangularViewType& operator+=(const DenseBase<Other>& other) { >- internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar>()); >+ internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename Other::Scalar>()); > return derived(); > } > /** \sa MatrixBase::operator-=() */ > template<typename Other> > EIGEN_DEVICE_FUNC > TriangularViewType& operator-=(const DenseBase<Other>& other) { >- internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar>()); >+ internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>()); > return derived(); > } > > /** \sa MatrixBase::operator*=() */ > EIGEN_DEVICE_FUNC > TriangularViewType& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; } > /** \sa DenseBase::operator/=() */ > EIGEN_DEVICE_FUNC >@@ -547,17 +547,17 @@ template<typename _MatrixType, unsigned > ***************************************************************************/ > > // FIXME should we keep that possibility > template<typename MatrixType, unsigned int Mode> > template<typename OtherDerived> > inline TriangularView<MatrixType, Mode>& > TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other) > { >- internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar>()); >+ internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > // FIXME should we keep that possibility > template<typename MatrixType, unsigned int Mode> > template<typename OtherDerived> > void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other) > { >@@ -799,17 +799,17 @@ void call_triangular_assignment_loop(con > > triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel); > } > > template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE > void call_triangular_assignment_loop(const DstXprType& dst, const SrcXprType& src) > { >- call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar>()); >+ call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>()); > } > > template<> struct AssignmentKind<TriangularShape,TriangularShape> { typedef Triangular2Triangular Kind; }; > template<> struct AssignmentKind<DenseShape,TriangularShape> { typedef Triangular2Dense Kind; }; > template<> struct AssignmentKind<TriangularShape,DenseShape> { typedef Dense2Triangular Kind; }; > > > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> >@@ -928,43 +928,43 @@ void TriangularBase<Derived>::evalToLazy > other.derived().resize(this->rows(), this->cols()); > internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression()); > } > > namespace internal { > > // Triangular = Product > template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> >-struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar>, Dense2Triangular, Scalar> >+struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular, Scalar> > { > typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &) > { > dst.setZero(); > dst._assignProduct(src, 1); > } > }; > > // Triangular += Product > template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> >-struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar>, Dense2Triangular, Scalar> >+struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular, Scalar> > { > typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &) > { > dst._assignProduct(src, 1); > } > }; > > // Triangular -= Product > template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> >-struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar>, Dense2Triangular, Scalar> >+struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular, Scalar> > { > typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &) > { > dst._assignProduct(src, -1); > } > }; > > } // end namespace internal > > } // end namespace Eigen >diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h >--- a/Eigen/src/Core/VectorwiseOp.h >+++ b/Eigen/src/Core/VectorwiseOp.h >@@ -535,30 +535,30 @@ template<typename ExpressionType, int Di > EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) > EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) > m_matrix /= extendedTo(other.derived()); > return const_cast<ExpressionType&>(m_matrix); > } > > /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */ > template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC >- CwiseBinaryOp<internal::scalar_sum_op<Scalar>, >+ CwiseBinaryOp<internal::scalar_sum_op<Scalar,typename OtherDerived::Scalar>, > const ExpressionTypeNestedCleaned, > const typename ExtendedType<OtherDerived>::Type> > operator+(const DenseBase<OtherDerived>& other) const > { > EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) > EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) > return m_matrix + extendedTo(other.derived()); > } > > /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */ > template<typename OtherDerived> > EIGEN_DEVICE_FUNC >- CwiseBinaryOp<internal::scalar_difference_op<Scalar>, >+ CwiseBinaryOp<internal::scalar_difference_op<Scalar,typename OtherDerived::Scalar>, > const ExpressionTypeNestedCleaned, > const typename ExtendedType<OtherDerived>::Type> > operator-(const DenseBase<OtherDerived>& other) const > { > EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) > EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) > return m_matrix - extendedTo(other.derived()); > } >diff --git a/Eigen/src/Core/functors/AssignmentFunctors.h b/Eigen/src/Core/functors/AssignmentFunctors.h >--- a/Eigen/src/Core/functors/AssignmentFunctors.h >+++ b/Eigen/src/Core/functors/AssignmentFunctors.h >@@ -13,72 +13,72 @@ > namespace Eigen { > > namespace internal { > > /** \internal > * \brief Template functor for scalar/packet assignment > * > */ >-template<typename Scalar> struct assign_op { >+template<typename DstScalar,typename SrcScalar> struct assign_op { > > EIGEN_EMPTY_STRUCT_CTOR(assign_op) >- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a = b; } >+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a = b; } > > template<int Alignment, typename Packet> >- EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const >- { internal::pstoret<Scalar,Packet,Alignment>(a,b); } >+ EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const >+ { internal::pstoret<DstScalar,Packet,Alignment>(a,b); } > }; >-template<typename Scalar> >-struct functor_traits<assign_op<Scalar> > { >+template<typename DstScalar,typename SrcScalar> >+struct functor_traits<assign_op<DstScalar,SrcScalar> > { > enum { >- Cost = NumTraits<Scalar>::ReadCost, >- PacketAccess = packet_traits<Scalar>::Vectorizable >+ Cost = NumTraits<DstScalar>::ReadCost, >+ PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::Vectorizable && packet_traits<SrcScalar>::Vectorizable > }; > }; > > /** \internal > * \brief Template functor for scalar/packet assignment with addition > * > */ >-template<typename Scalar> struct add_assign_op { >+template<typename DstScalar,typename SrcScalar> struct add_assign_op { > > EIGEN_EMPTY_STRUCT_CTOR(add_assign_op) >- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a += b; } >+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a += b; } > > template<int Alignment, typename Packet> >- EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const >- { internal::pstoret<Scalar,Packet,Alignment>(a,internal::padd(internal::ploadt<Packet,Alignment>(a),b)); } >+ EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const >+ { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::padd(internal::ploadt<Packet,Alignment>(a),b)); } > }; >-template<typename Scalar> >-struct functor_traits<add_assign_op<Scalar> > { >+template<typename DstScalar,typename SrcScalar> >+struct functor_traits<add_assign_op<DstScalar,SrcScalar> > { > enum { >- Cost = NumTraits<Scalar>::ReadCost + NumTraits<Scalar>::AddCost, >- PacketAccess = packet_traits<Scalar>::HasAdd >+ Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::AddCost, >+ PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasAdd > }; > }; > > /** \internal > * \brief Template functor for scalar/packet assignment with subtraction > * > */ >-template<typename Scalar> struct sub_assign_op { >+template<typename DstScalar,typename SrcScalar> struct sub_assign_op { > > EIGEN_EMPTY_STRUCT_CTOR(sub_assign_op) >- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a -= b; } >+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a -= b; } > > template<int Alignment, typename Packet> >- EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const >- { internal::pstoret<Scalar,Packet,Alignment>(a,internal::psub(internal::ploadt<Packet,Alignment>(a),b)); } >+ EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const >+ { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::psub(internal::ploadt<Packet,Alignment>(a),b)); } > }; >-template<typename Scalar> >-struct functor_traits<sub_assign_op<Scalar> > { >+template<typename DstScalar,typename SrcScalar> >+struct functor_traits<sub_assign_op<DstScalar,SrcScalar> > { > enum { >- Cost = NumTraits<Scalar>::ReadCost + NumTraits<Scalar>::AddCost, >- PacketAccess = packet_traits<Scalar>::HasSub >+ Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::AddCost, >+ PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasSub > }; > }; > > /** \internal > * \brief Template functor for scalar/packet assignment with multiplication > * > */ > template<typename DstScalar, typename SrcScalar=DstScalar> >@@ -93,17 +93,16 @@ struct mul_assign_op { > }; > template<typename DstScalar, typename SrcScalar> > struct functor_traits<mul_assign_op<DstScalar,SrcScalar> > { > enum { > Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost, > PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasMul > }; > }; >-template<typename DstScalar,typename SrcScalar> struct functor_is_product_like<mul_assign_op<DstScalar,SrcScalar> > { enum { ret = 1 }; }; > > /** \internal > * \brief Template functor for scalar/packet assignment with diviving > * > */ > template<typename DstScalar, typename SrcScalar=DstScalar> struct div_assign_op { > > EIGEN_EMPTY_STRUCT_CTOR(div_assign_op) >@@ -115,17 +114,16 @@ template<typename DstScalar, typename Sr > }; > template<typename DstScalar, typename SrcScalar> > struct functor_traits<div_assign_op<DstScalar,SrcScalar> > { > enum { > Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost, > PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasDiv > }; > }; >-template<typename DstScalar,typename SrcScalar> struct functor_is_product_like<div_assign_op<DstScalar,SrcScalar> > { enum { ret = 1 }; }; > > /** \internal > * \brief Template functor for scalar/packet assignment with swapping > * > * It works as follow. For a non-vectorized evaluation loop, we have: > * for(i) func(A.coeffRef(i), B.coeff(i)); > * where B is a SwapWrapper expression. The trick is to make SwapWrapper::coeff behaves like a non-const coeffRef. > * Actually, SwapWrapper might not even be needed since even if B is a plain expression, since it has to be writable >diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h >--- a/Eigen/src/Core/functors/BinaryFunctors.h >+++ b/Eigen/src/Core/functors/BinaryFunctors.h >@@ -16,86 +16,84 @@ namespace internal { > > //---------- associative binary functors ---------- > > /** \internal > * \brief Template functor to compute the sum of two scalars > * > * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, DenseBase::sum() > */ >-template<typename Scalar> struct scalar_sum_op { >-// typedef Scalar result_type; >+template<typename LhsScalar,typename RhsScalar> struct scalar_sum_op { >+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_sum_op>::ReturnType result_type; > EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) >- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; } >+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; } > template<typename Packet> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const > { return internal::padd(a,b); } > template<typename Packet> >- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const >+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const > { return internal::predux(a); } > }; >-template<typename Scalar> >-struct functor_traits<scalar_sum_op<Scalar> > { >+template<typename LhsScalar,typename RhsScalar> >+struct functor_traits<scalar_sum_op<LhsScalar,RhsScalar> > { > enum { >- Cost = NumTraits<Scalar>::AddCost, >- PacketAccess = packet_traits<Scalar>::HasAdd >+ Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2, // rough estimate! >+ PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasAdd && packet_traits<RhsScalar>::HasAdd >+ // TODO vectorize mixed sum > }; > }; > > /** \internal > * \brief Template specialization to deprecate the summation of boolean expressions. > * This is required to solve Bug 426. > * \sa DenseBase::count(), DenseBase::any(), ArrayBase::cast(), MatrixBase::cast() > */ >-template<> struct scalar_sum_op<bool> : scalar_sum_op<int> { >+template<> struct scalar_sum_op<bool,bool> : scalar_sum_op<int,int> { > EIGEN_DEPRECATED > scalar_sum_op() {} > }; > > > /** \internal > * \brief Template functor to compute the product of two scalars > * > * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux() > */ > template<typename LhsScalar,typename RhsScalar> struct scalar_product_op { >- enum { >- // TODO vectorize mixed product >- Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul >- }; >- typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type; >+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_product_op>::ReturnType result_type; > EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; } > template<typename Packet> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const > { return internal::pmul(a,b); } > template<typename Packet> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const > { return internal::predux_mul(a); } > }; > template<typename LhsScalar,typename RhsScalar> > struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > { > enum { > Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate! >- PacketAccess = scalar_product_op<LhsScalar,RhsScalar>::Vectorizable >+ PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul >+ // TODO vectorize mixed product > }; > }; > > /** \internal > * \brief Template functor to compute the conjugate product of two scalars > * > * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y) > */ > template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op { > > enum { > Conj = NumTraits<LhsScalar>::IsComplex > }; > >- typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type; >+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_conj_product_op>::ReturnType result_type; > > EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const > { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); } > > template<typename Packet> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const > { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); } >@@ -264,53 +262,50 @@ struct functor_traits<scalar_binary_pow_ > > //---------- non associative binary functors ---------- > > /** \internal > * \brief Template functor to compute the difference of two scalars > * > * \sa class CwiseBinaryOp, MatrixBase::operator- > */ >-template<typename Scalar> struct scalar_difference_op { >+template<typename LhsScalar,typename RhsScalar> struct scalar_difference_op { >+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_difference_op>::ReturnType result_type; > EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) >- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; } >+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a - b; } > template<typename Packet> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const > { return internal::psub(a,b); } > }; >-template<typename Scalar> >-struct functor_traits<scalar_difference_op<Scalar> > { >+template<typename LhsScalar,typename RhsScalar> >+struct functor_traits<scalar_difference_op<LhsScalar,RhsScalar> > { > enum { >- Cost = NumTraits<Scalar>::AddCost, >- PacketAccess = packet_traits<Scalar>::HasSub >+ Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2, >+ PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasSub && packet_traits<RhsScalar>::HasSub > }; > }; > > /** \internal > * \brief Template functor to compute the quotient of two scalars > * > * \sa class CwiseBinaryOp, Cwise::operator/() > */ > template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op { >- enum { >- // TODO vectorize mixed product >- Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv >- }; >- typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type; >+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_quotient_op>::ReturnType result_type; > EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; } > template<typename Packet> > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const > { return internal::pdiv(a,b); } > }; > template<typename LhsScalar,typename RhsScalar> > struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > { > typedef typename scalar_quotient_op<LhsScalar,RhsScalar>::result_type result_type; > enum { >- PacketAccess = scalar_quotient_op<LhsScalar,RhsScalar>::Vectorizable, >+ PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv, > Cost = NumTraits<result_type>::template Div<PacketAccess>::Cost > }; > }; > > > > /** \internal > * \brief Template functor to compute the and of two booleans >@@ -441,17 +436,17 @@ struct scalar_multiple_op { > typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other; > }; > template<typename Scalar> > struct functor_traits<scalar_multiple_op<Scalar> > > { enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; }; > > template<typename Scalar1, typename Scalar2> > struct scalar_multiple2_op { >- typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type; >+ typedef typename ScalarBinaryOpTraits<Scalar1,Scalar2>::ReturnType result_type; > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { } > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { } > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; } > typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other; > }; > template<typename Scalar1,typename Scalar2> > struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> > > { enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; }; >@@ -476,35 +471,26 @@ struct scalar_quotient1_op { > typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other; > }; > template<typename Scalar> > struct functor_traits<scalar_quotient1_op<Scalar> > > { enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; }; > > template<typename Scalar1, typename Scalar2> > struct scalar_quotient2_op { >- typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type; >+ typedef typename ScalarBinaryOpTraits<Scalar1,Scalar2>::ReturnType result_type; > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_quotient2_op(const scalar_quotient2_op& other) : m_other(other.m_other) { } > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_quotient2_op(const Scalar2& other) : m_other(other) { } > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a / m_other; } > typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other; > }; > template<typename Scalar1,typename Scalar2> > struct functor_traits<scalar_quotient2_op<Scalar1,Scalar2> > > { enum { Cost = 2 * NumTraits<Scalar1>::MulCost, PacketAccess = false }; }; > >-// In Eigen, any binary op (Product, CwiseBinaryOp) require the Lhs and Rhs to have the same scalar type, except for multiplication >-// where the mixing of different types is handled by scalar_product_traits >-// In particular, real * complex<real> is allowed. >-// FIXME move this to functor_traits adding a functor_default >-template<typename Functor> struct functor_is_product_like { enum { ret = 0 }; }; >-template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; }; >-template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; }; >-template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_quotient_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; }; >- > > /** \internal > * \brief Template functor to add a scalar to a fixed other one > * \sa class CwiseUnaryOp, Array::operator+ > */ > /* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */ > template<typename Scalar> > struct scalar_add_op { >diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h >--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h >+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h >@@ -358,17 +358,17 @@ inline void computeProductBlockingSizes( > * real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual > */ > template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs> > class gebp_traits > { > public: > typedef _LhsScalar LhsScalar; > typedef _RhsScalar RhsScalar; >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > > enum { > ConjLhs = _ConjLhs, > ConjRhs = _ConjRhs, > Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable, > LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1, > RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1, > ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1, >@@ -473,17 +473,17 @@ protected: > }; > > template<typename RealScalar, bool _ConjLhs> > class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false> > { > public: > typedef std::complex<RealScalar> LhsScalar; > typedef RealScalar RhsScalar; >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > > enum { > ConjLhs = _ConjLhs, > ConjRhs = false, > Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable, > LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1, > RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1, > ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1, >diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h >--- a/Eigen/src/Core/products/GeneralMatrixMatrix.h >+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h >@@ -20,17 +20,17 @@ template<typename _LhsScalar, typename _ > template< > typename Index, > typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, > typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> > struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> > { > typedef gebp_traits<RhsScalar,LhsScalar> Traits; > >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > static EIGEN_STRONG_INLINE void run( > Index rows, Index cols, Index depth, > const LhsScalar* lhs, Index lhsStride, > const RhsScalar* rhs, Index rhsStride, > ResScalar* res, Index resStride, > ResScalar alpha, > level3_blocking<RhsScalar,LhsScalar>& blocking, > GemmParallelInfo<Index>* info = 0) >@@ -50,17 +50,17 @@ template< > typename Index, > typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, > typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> > struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> > { > > typedef gebp_traits<LhsScalar,RhsScalar> Traits; > >-typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > static void run(Index rows, Index cols, Index depth, > const LhsScalar* _lhs, Index lhsStride, > const RhsScalar* _rhs, Index rhsStride, > ResScalar* _res, Index resStride, > ResScalar alpha, > level3_blocking<LhsScalar,RhsScalar>& blocking, > GemmParallelInfo<Index>* info = 0) > { >diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h >--- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h >+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h >@@ -35,34 +35,34 @@ template <typename Index, > int ResStorageOrder, int UpLo, int Version = Specialized> > struct general_matrix_matrix_triangular_product; > > // as usual if the result is row major => we transpose the product > template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, > typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo, int Version> > struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo,Version> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride, > const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, > const ResScalar& alpha, level3_blocking<RhsScalar,LhsScalar>& blocking) > { > general_matrix_matrix_triangular_product<Index, > RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, > LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, > ColMajor, UpLo==Lower?Upper:Lower> > ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking); > } > }; > > template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, > typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo, int Version> > struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Version> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride, > const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, > const ResScalar& alpha, level3_blocking<LhsScalar,RhsScalar>& blocking) > { > typedef gebp_traits<LhsScalar,RhsScalar> Traits; > > typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; > typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; >diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h >--- a/Eigen/src/Core/products/GeneralMatrixVector.h >+++ b/Eigen/src/Core/products/GeneralMatrixVector.h >@@ -53,17 +53,17 @@ namespace internal { > * One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment > * strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow > * compared to unaligned loads on a 4 byte boundary. > * > */ > template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version> > struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > > enum { > Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable > && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size), > LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1, > RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1, > ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1 > }; >@@ -329,17 +329,17 @@ EIGEN_DONT_INLINE void general_matrix_ve > * > * Mixing type logic: > * - alpha is always a complex (or converted to a complex) > * - no vectorization > */ > template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version> > struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version> > { >-typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > > enum { > Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable > && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size), > LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1, > RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1, > ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1 > }; >diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h >--- a/Eigen/src/Core/products/TriangularMatrixVector.h >+++ b/Eigen/src/Core/products/TriangularMatrixVector.h >@@ -15,17 +15,17 @@ namespace Eigen { > namespace internal { > > template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version=Specialized> > struct triangular_matrix_vector_product; > > template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version> > struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > enum { > IsLower = ((Mode&Lower)==Lower), > HasUnitDiag = (Mode & UnitDiag)==UnitDiag, > HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag > }; > static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, > const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha); > }; >@@ -86,17 +86,17 @@ EIGEN_DONT_INLINE void triangular_matrix > RhsMapper(&rhs.coeffRef(size), rhsIncr), > _res, resIncr, alpha); > } > } > > template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version> > struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > enum { > IsLower = ((Mode&Lower)==Lower), > HasUnitDiag = (Mode & UnitDiag)==UnitDiag, > HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag > }; > static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, > const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha); > }; >diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h >--- a/Eigen/src/Core/util/ForwardDeclarations.h >+++ b/Eigen/src/Core/util/ForwardDeclarations.h >@@ -125,16 +125,17 @@ template<typename MatrixType, unsigned i > template<typename MatrixType> class SparseView; > template<typename ExpressionType> class WithFormat; > template<typename MatrixType> struct CommaInitializer; > template<typename Derived> class ReturnByValue; > template<typename ExpressionType> class ArrayWrapper; > template<typename ExpressionType> class MatrixWrapper; > template<typename Derived> class SolverBase; > template<typename XprType> class InnerIterator; >+template<typename ScalarA, typename ScalarB, typename BinaryOp=void> struct ScalarBinaryOpTraits; > > namespace internal { > template<typename DecompositionType> struct kernel_retval_base; > template<typename DecompositionType> struct kernel_retval; > template<typename DecompositionType> struct image_retval_base; > template<typename DecompositionType> struct image_retval; > } // end namespace internal > >@@ -169,18 +170,18 @@ struct ProductReturnType; > template<typename Lhs, typename Rhs> struct LazyProductReturnType; > > namespace internal { > > // Provides scalar/packet-wise product and product with accumulation > // with optional conjugation of the arguments. > template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper; > >-template<typename Scalar> struct scalar_sum_op; >-template<typename Scalar> struct scalar_difference_op; >+template<typename LhsScalar,typename RhsScalar> struct scalar_sum_op; >+template<typename LhsScalar,typename RhsScalar> struct scalar_difference_op; > template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op; > template<typename Scalar> struct scalar_opposite_op; > template<typename Scalar> struct scalar_conjugate_op; > template<typename Scalar> struct scalar_real_op; > template<typename Scalar> struct scalar_imag_op; > template<typename Scalar> struct scalar_abs_op; > template<typename Scalar> struct scalar_abs2_op; > template<typename Scalar> struct scalar_sqrt_op; >diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h >--- a/Eigen/src/Core/util/Macros.h >+++ b/Eigen/src/Core/util/Macros.h >@@ -880,19 +880,19 @@ namespace Eigen { > template<typename OtherDerived> \ > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \ > (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \ > { \ > return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \ > } > > // the expression type of a cwise product >-#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \ >+#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \ > CwiseBinaryOp< \ >- internal::scalar_product_op< \ >+ EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)< \ > typename internal::traits<LHS>::Scalar, \ > typename internal::traits<RHS>::Scalar \ > >, \ > const LHS, \ > const RHS \ > > > > #ifdef EIGEN_EXCEPTIONS >diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h >--- a/Eigen/src/Core/util/Meta.h >+++ b/Eigen/src/Core/util/Meta.h >@@ -370,43 +370,16 @@ struct meta_least_common_multiple<A,B,K, > }; > > /** \internal determines whether the product of two numeric types is allowed and what the return type is */ > template<typename T, typename U> struct scalar_product_traits > { > enum { Defined = 0 }; > }; > >-template<typename T> struct scalar_product_traits<T,T> >-{ >- enum { >- // Cost = NumTraits<T>::MulCost, >- Defined = 1 >- }; >- typedef T ReturnType; >-}; >- >-template<typename T> struct scalar_product_traits<T,std::complex<T> > >-{ >- enum { >- // Cost = 2*NumTraits<T>::MulCost, >- Defined = 1 >- }; >- typedef std::complex<T> ReturnType; >-}; >- >-template<typename T> struct scalar_product_traits<std::complex<T>, T> >-{ >- enum { >- // Cost = 2*NumTraits<T>::MulCost, >- Defined = 1 >- }; >- typedef std::complex<T> ReturnType; >-}; >- > // FIXME quick workaround around current limitation of result_of > // template<typename Scalar, typename ArgType0, typename ArgType1> > // struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> { > // typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type; > // }; > > } // end namespace internal > >@@ -429,11 +402,48 @@ using std::numeric_limits; > template<typename T> > T div_ceil(const T &a, const T &b) > { > return (a+b-1) / b; > } > > } // end namespace numext > >+ >+/** \class ScalarBinaryOpTraits >+ * \ingroup Core_Module >+ * >+ * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is. >+ * >+ * \sa CwiseBinaryOp >+ */ >+template<typename ScalarA, typename ScalarB, typename BinaryOp> >+struct ScalarBinaryOpTraits >+#ifndef EIGEN_PARSED_BY_DOXYGEN >+ // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class. >+ : internal::scalar_product_traits<ScalarA,ScalarB> >+#endif // EIGEN_PARSED_BY_DOXYGEN >+{}; >+ >+template<typename T, typename BinaryOp> >+struct ScalarBinaryOpTraits<T,T,BinaryOp> >+{ >+ enum { Defined = 1 }; >+ typedef T ReturnType; >+}; >+ >+template<typename T, typename BinaryOp> >+struct ScalarBinaryOpTraits<T,std::complex<T>,BinaryOp> >+{ >+ enum { Defined = 1 }; >+ typedef std::complex<T> ReturnType; >+}; >+ >+template<typename T, typename BinaryOp> >+struct ScalarBinaryOpTraits<std::complex<T>, T,BinaryOp> >+{ >+ enum { Defined = 1 }; >+ typedef std::complex<T> ReturnType; >+}; >+ > } // end namespace Eigen > > #endif // EIGEN_META_H >diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h >--- a/Eigen/src/Core/util/XprHelper.h >+++ b/Eigen/src/Core/util/XprHelper.h >@@ -644,24 +644,20 @@ std::string demangle_flags(int f) > if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit"; > > return res; > } > #endif > > } // end namespace internal > >-// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor >-// that would take two operands of different types. If there were such an example, then this check should be >-// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as >-// currently they take only one typename Scalar template parameter. >+// We require Lhs and Rhs to have "compatible" scalar types. > // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths. > // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to > // add together a float matrix and a double matrix. >+// Treat "void" as a special case. Needed for permutation products. TODO: this should be handled by ScalarBinaryOpTraits > #define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \ >- EIGEN_STATIC_ASSERT((internal::functor_is_product_like<BINOP>::ret \ >- ? int(internal::scalar_product_traits<LHS, RHS>::Defined) \ >- : int(internal::is_same_or_void<LHS, RHS>::value)), \ >+ EIGEN_STATIC_ASSERT(int(internal::is_same_or_void<LHS, RHS>::value) || int(ScalarBinaryOpTraits<LHS, RHS,BINOP>::Defined), \ > YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) > > } // end namespace Eigen > > #endif // EIGEN_XPRHELPER_H >diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h >--- a/Eigen/src/Geometry/AlignedBox.h >+++ b/Eigen/src/Geometry/AlignedBox.h >@@ -107,36 +107,36 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTO > inline VectorType& (min)() { return m_min; } > /** \returns the maximal corner */ > inline const VectorType& (max)() const { return m_max; } > /** \returns a non const reference to the maximal corner */ > inline VectorType& (max)() { return m_max; } > > /** \returns the center of the box */ > inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>, >- const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> > >+ const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const VectorType, const VectorType> > > center() const > { return (m_min+m_max)/2; } > > /** \returns the lengths of the sides of the bounding box. > * Note that this function does not get the same > * result for integral or floating scalar types: see > */ >- inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> sizes() const >+ inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> sizes() const > { return m_max - m_min; } > > /** \returns the volume of the bounding box */ > inline Scalar volume() const > { return sizes().prod(); } > > /** \returns an expression for the bounding box diagonal vector > * if the length of the diagonal is needed: diagonal().norm() > * will provide it. > */ >- inline CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> diagonal() const >+ inline CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> diagonal() const > { return sizes(); } > > /** \returns the vertex of the bounding box at the corner defined by > * the corner-id corner. It works only for a 1D, 2D or 3D bounding box. > * For 1D bounding boxes corners are named by 2 enum constants: > * BottomLeft and BottomRight. > * For 2D bounding boxes, corners are named by 4 enum constants: > * BottomLeft, BottomRight, TopLeft, TopRight. >diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h >--- a/Eigen/src/Geometry/Homogeneous.h >+++ b/Eigen/src/Geometry/Homogeneous.h >@@ -324,32 +324,32 @@ struct unary_evaluator<Homogeneous<ArgTy > } > > protected: > PlainObject m_temp; > }; > > // dense = homogeneous > template< typename DstXprType, typename ArgType, typename Scalar> >-struct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense, Scalar> > { > typedef Homogeneous<ArgType,Vertical> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &) > { > dst.template topRows<ArgType::RowsAtCompileTime>(src.nestedExpression().rows()) = src.nestedExpression(); > dst.row(dst.rows()-1).setOnes(); > } > }; > > // dense = homogeneous > template< typename DstXprType, typename ArgType, typename Scalar> >-struct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense, Scalar> > { > typedef Homogeneous<ArgType,Horizontal> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &) > { > dst.template leftCols<ArgType::ColsAtCompileTime>(src.nestedExpression().cols()) = src.nestedExpression(); > dst.col(dst.cols()-1).setOnes(); > } > }; > > template<typename LhsArg, typename Rhs, int ProductTag> > struct generic_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs, HomogeneousShape, DenseShape, ProductTag> >@@ -368,17 +368,17 @@ struct homogeneous_right_product_refacto > Dim = Lhs::ColsAtCompileTime, > Rows = Lhs::RowsAtCompileTime > }; > typedef typename Rhs::template ConstNRowsBlockXpr<Dim>::Type LinearBlockConst; > typedef typename remove_const<LinearBlockConst>::type LinearBlock; > typedef typename Rhs::ConstRowXpr ConstantColumn; > typedef Replicate<const ConstantColumn,Rows,1> ConstantBlock; > typedef Product<Lhs,LinearBlock,LazyProduct> LinearProduct; >- typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr; >+ typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr; > }; > > template<typename Lhs, typename Rhs, int ProductTag> > struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, HomogeneousShape, DenseShape> > : public evaluator<typename homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs>::Xpr> > { > typedef Product<Lhs, Rhs, LazyProduct> XprType; > typedef homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs> helper; >@@ -409,17 +409,17 @@ struct homogeneous_left_product_refactor > Dim = Rhs::RowsAtCompileTime, > Cols = Rhs::ColsAtCompileTime > }; > typedef typename Lhs::template ConstNColsBlockXpr<Dim>::Type LinearBlockConst; > typedef typename remove_const<LinearBlockConst>::type LinearBlock; > typedef typename Lhs::ConstColXpr ConstantColumn; > typedef Replicate<const ConstantColumn,1,Cols> ConstantBlock; > typedef Product<LinearBlock,Rhs,LazyProduct> LinearProduct; >- typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr; >+ typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr; > }; > > template<typename Lhs, typename Rhs, int ProductTag> > struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, HomogeneousShape> > : public evaluator<typename homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression>::Xpr> > { > typedef Product<Lhs, Rhs, LazyProduct> XprType; > typedef homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression> helper; >diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h >--- a/Eigen/src/Householder/HouseholderSequence.h >+++ b/Eigen/src/Householder/HouseholderSequence.h >@@ -103,17 +103,17 @@ struct hseq_side_dependent_impl<VectorsT > { > Index start = k+1+h.m_shift; > return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose(); > } > }; > > template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type > { >- typedef typename scalar_product_traits<OtherScalarType, typename MatrixType::Scalar>::ReturnType >+ typedef typename ScalarBinaryOpTraits<OtherScalarType, typename MatrixType::Scalar>::ReturnType > ResultScalar; > typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime, > 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type; > }; > > } // end namespace internal > > template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence >diff --git a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h >--- a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h >+++ b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h >@@ -86,20 +86,20 @@ struct evaluator<SolveWithGuess<Decompos > > protected: > PlainObject m_result; > }; > > // Specialization for "dst = dec.solveWithGuess(rhs)" > // NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere > template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar> >-struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > // FIXME shall we resize dst here? > dst = src.guess(); > src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/); > } > }; > > } // end namepsace internal >diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h >--- a/Eigen/src/LU/FullPivLU.h >+++ b/Eigen/src/LU/FullPivLU.h >@@ -834,22 +834,22 @@ void FullPivLU<_MatrixType>::_solve_impl > } > > #endif > > namespace internal { > > > /***** Implementation of inverse() *****************************************************/ >-template<typename DstXprType, typename MatrixType, typename Scalar> >-struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+template<typename DstXprType, typename MatrixType> >+struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivLU<MatrixType>::Scalar>, Dense2Dense> > { > typedef FullPivLU<MatrixType> LuType; > typedef Inverse<LuType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &) > { > dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); > } > }; > } // end namespace internal > > /******* MatrixBase methods *****************************************************************/ > >diff --git a/Eigen/src/LU/InverseImpl.h b/Eigen/src/LU/InverseImpl.h >--- a/Eigen/src/LU/InverseImpl.h >+++ b/Eigen/src/LU/InverseImpl.h >@@ -281,21 +281,21 @@ struct compute_inverse_and_det_with_chec > *** MatrixBase methods *** > *************************/ > > } // end namespace internal > > namespace internal { > > // Specialization for "dense = dense_xpr.inverse()" >-template<typename DstXprType, typename XprType, typename Scalar> >-struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+template<typename DstXprType, typename XprType> >+struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense> > { > typedef Inverse<XprType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &) > { > // FIXME shall we resize dst here? > const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime); > EIGEN_ONLY_USED_FOR_DEBUG(Size); > eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst))) > && "Aliasing problem detected in inverse(), you need to do inverse().eval() here."); > > typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type ActualXprType; >diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h >--- a/Eigen/src/LU/PartialPivLU.h >+++ b/Eigen/src/LU/PartialPivLU.h >@@ -521,21 +521,21 @@ MatrixType PartialPivLU<MatrixType>::rec > } > > /***** Implementation details *****************************************************/ > > namespace internal { > > /***** Implementation of inverse() *****************************************************/ > template<typename DstXprType, typename MatrixType, typename Scalar> >-struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef PartialPivLU<MatrixType> LuType; > typedef Inverse<LuType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); > } > }; > } // end namespace internal > > /******** MatrixBase methods *******/ > >diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h >--- a/Eigen/src/QR/ColPivHouseholderQR.h >+++ b/Eigen/src/QR/ColPivHouseholderQR.h >@@ -593,21 +593,21 @@ void ColPivHouseholderQR<_MatrixType>::_ > for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i); > for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero(); > } > #endif > > namespace internal { > > template<typename DstXprType, typename MatrixType, typename Scalar> >-struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef ColPivHouseholderQR<MatrixType> QrType; > typedef Inverse<QrType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); > } > }; > > } // end namespace internal > > /** \returns the matrix Q as a sequence of householder transformations. >diff --git a/Eigen/src/QR/CompleteOrthogonalDecomposition.h b/Eigen/src/QR/CompleteOrthogonalDecomposition.h >--- a/Eigen/src/QR/CompleteOrthogonalDecomposition.h >+++ b/Eigen/src/QR/CompleteOrthogonalDecomposition.h >@@ -505,21 +505,21 @@ void CompleteOrthogonalDecomposition<_Ma > // Undo permutation to get x = P^{-1} * y. > dst = colsPermutation() * dst; > } > #endif > > namespace internal { > > template<typename DstXprType, typename MatrixType, typename Scalar> >-struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef CompleteOrthogonalDecomposition<MatrixType> CodType; > typedef Inverse<CodType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows())); > } > }; > > } // end namespace internal > > /** \returns the matrix Q as a sequence of householder transformations */ >diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h >--- a/Eigen/src/QR/FullPivHouseholderQR.h >+++ b/Eigen/src/QR/FullPivHouseholderQR.h >@@ -555,21 +555,21 @@ void FullPivHouseholderQR<_MatrixType>:: > for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i); > for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero(); > } > #endif > > namespace internal { > > template<typename DstXprType, typename MatrixType, typename Scalar> >-struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar>, Dense2Dense, Scalar> >+struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense, Scalar> > { > typedef FullPivHouseholderQR<MatrixType> QrType; > typedef Inverse<QrType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); > } > }; > > /** \ingroup QR_Module > * > * \brief Expression type for return value of FullPivHouseholderQR::matrixQ() >diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h >--- a/Eigen/src/SparseCore/SparseAssign.h >+++ b/Eigen/src/SparseCore/SparseAssign.h >@@ -29,18 +29,18 @@ Derived& SparseMatrixBase<Derived>::oper > return derived(); > } > > template<typename Derived> > template<typename OtherDerived> > inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other) > { > // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine >- internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar> > >- ::run(derived(), other.derived(), internal::assign_op<Scalar>()); >+ internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> > >+ ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > template<typename Derived> > inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other) > { > internal::call_assignment_no_alias(derived(), other.derived()); > return derived(); >@@ -122,84 +122,84 @@ void assign_sparse_to_sparse(DstXprType > dst = temp.markAsRValue(); > } > } > > // Generic Sparse to Sparse assignment > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar> > { >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > assign_sparse_to_sparse(dst.derived(), src.derived()); > } > }; > > // Generic Sparse to Dense assignment > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar> > { > static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) > { > eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); > >- if(internal::is_same<Functor,internal::assign_op<Scalar> >::value) >+ if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value) > dst.setZero(); > > internal::evaluator<SrcXprType> srcEval(src); > internal::evaluator<DstXprType> dstEval(dst); > const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols(); > for (Index j=0; j<outerEvaluationSize; ++j) > for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i) > func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value()); > } > }; > > // Specialization for "dst = dec.solve(rhs)" > // NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error > template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> >-struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar>, Sparse2Sparse, Scalar> >+struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse, Scalar> > { > typedef Solve<DecType,RhsType> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) > { > src.dec()._solve_impl(src.rhs(), dst); > } > }; > > struct Diagonal2Sparse {}; > > template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; }; > > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar> > { > typedef typename DstXprType::StorageIndex StorageIndex; > typedef Array<StorageIndex,Dynamic,1> ArrayXI; > typedef Array<Scalar,Dynamic,1> ArrayXS; > template<int Options> >- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > Index size = src.diagonal().size(); > dst.makeCompressed(); > dst.resizeNonZeros(size); > Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1); > Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size)); > Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal(); > } > > template<typename DstDerived> >- static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > dst.diagonal() = src.diagonal(); > } > >- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { dst.diagonal() += src.diagonal(); } > >- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { dst.diagonal() -= src.diagonal(); } > }; > } // end namespace internal > > } // end namespace Eigen > > #endif // EIGEN_SPARSEASSIGN_H >diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h >--- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h >+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h >@@ -574,59 +574,59 @@ SparseMatrixBase<Derived>::operator+=(co > { > return derived() = derived() + other.derived(); > } > > template<typename Derived> > template<typename OtherDerived> > Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other) > { >- call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar>()); >+ call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > template<typename Derived> > template<typename OtherDerived> > Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other) > { >- call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar>()); >+ call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); > return derived(); > } > > template<typename Derived> > template<typename OtherDerived> > EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type > SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const > { > return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived()); > } > > template<typename DenseDerived, typename SparseDerived> >-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived> >+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived> > operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b) > { >- return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived()); >+ return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived()); > } > > template<typename SparseDerived, typename DenseDerived> >-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived> >+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived> > operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b) > { >- return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived()); >+ return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived()); > } > > template<typename DenseDerived, typename SparseDerived> >-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived> >+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived> > operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b) > { >- return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived()); >+ return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived()); > } > > template<typename SparseDerived, typename DenseDerived> >-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived> >+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived> > operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b) > { >- return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived()); >+ return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived()); > } > > } // end namespace Eigen > > #endif // EIGEN_SPARSE_CWISE_BINARY_OP_H >diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h >--- a/Eigen/src/SparseCore/SparseDenseProduct.h >+++ b/Eigen/src/SparseCore/SparseDenseProduct.h >@@ -69,17 +69,17 @@ struct sparse_time_dense_product_impl<Sp > res.coeffRef(i,col) += alpha * tmp; > } > > }; > > // FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format? > // -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators > // template<typename T1, typename T2/*, int _Options, typename _StrideType*/> >-// struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> > >+// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> > > // { > // enum { > // Defined = 1 > // }; > // typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType; > // }; > > template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType> >@@ -92,17 +92,17 @@ struct sparse_time_dense_product_impl<Sp > static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) > { > evaluator<Lhs> lhsEval(lhs); > for(Index c=0; c<rhs.cols(); ++c) > { > for(Index j=0; j<lhs.outerSize(); ++j) > { > // typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); >- typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); >+ typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); > for(LhsInnerIterator it(lhsEval,j); it ;++it) > res.coeffRef(it.index(),c) += it.value() * rhs_j; > } > } > } > }; > > template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> >diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h >--- a/Eigen/src/SparseCore/SparseMatrix.h >+++ b/Eigen/src/SparseCore/SparseMatrix.h >@@ -435,17 +435,17 @@ class SparseMatrix > //--- > > template<typename InputIterators> > void setFromTriplets(const InputIterators& begin, const InputIterators& end); > > template<typename InputIterators,typename DupFunctor> > void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func); > >- void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar>()); } >+ void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); } > > template<typename DupFunctor> > void collapseDuplicates(DupFunctor dup_func = DupFunctor()); > > //--- > > /** \internal > * same as insert(Index,Index) except that the indices are given relative to the storage order */ >@@ -974,17 +974,17 @@ void set_from_triplets(const InputIterat > * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define > * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather > * be explicitely stored into a std::vector for instance. > */ > template<typename Scalar, int _Options, typename _Index> > template<typename InputIterators> > void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end) > { >- internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar>()); >+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>()); > } > > /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied: > * \code > * value = dup_func(OldValue, NewValue) > * \endcode > * Here is a C++11 example keeping the latest entry only: > * \code >diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h >--- a/Eigen/src/SparseCore/SparseMatrixBase.h >+++ b/Eigen/src/SparseCore/SparseMatrixBase.h >@@ -251,17 +251,17 @@ template<typename Derived> class SparseM > Derived& operator+=(const DiagonalBase<OtherDerived>& other); > template<typename OtherDerived> > Derived& operator-=(const DiagonalBase<OtherDerived>& other); > > Derived& operator*=(const Scalar& other); > Derived& operator/=(const Scalar& other); > > template<typename OtherDerived> struct CwiseProductDenseReturnType { >- typedef CwiseBinaryOp<internal::scalar_product_op<typename internal::scalar_product_traits< >+ typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits< > typename internal::traits<Derived>::Scalar, > typename internal::traits<OtherDerived>::Scalar > >::ReturnType>, > const Derived, > const OtherDerived > > Type; > }; > >diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h >--- a/Eigen/src/SparseCore/SparseProduct.h >+++ b/Eigen/src/SparseCore/SparseProduct.h >@@ -94,42 +94,42 @@ struct generic_product_impl<Lhs, Rhs, Sp > // sparse-triangular * sparse > template<typename Lhs, typename Rhs, int ProductType> > struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType> > : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType> > {}; > > // dense = sparse-product (can be sparse*sparse, sparse*perm, etc.) > template< typename DstXprType, typename Lhs, typename Rhs> >-struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Dense> >+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense> > { > typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &) > { > generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs()); > } > }; > > // dense += sparse-product (can be sparse*sparse, sparse*perm, etc.) > template< typename DstXprType, typename Lhs, typename Rhs> >-struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar>, Sparse2Dense> >+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense> > { > typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &) > { > generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs()); > } > }; > > // dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.) > template< typename DstXprType, typename Lhs, typename Rhs> >-struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar>, Sparse2Dense> >+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense> > { > typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &) > { > generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs()); > } > }; > > template<typename Lhs, typename Rhs, int Options> > struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased> > : public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject> >diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h >--- a/Eigen/src/SparseCore/SparseSelfAdjointView.h >+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h >@@ -218,23 +218,23 @@ struct SparseSelfAdjoint2Sparse {}; > template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; }; > template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; }; > > template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar> > struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar> > { > typedef typename DstXprType::StorageIndex StorageIndex; > template<typename DestScalar,int StorageOrder> >- static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst); > } > > template<typename DestScalar> >- static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/) >+ static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) > { > // TODO directly evaluate into dst; > SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols()); > internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp); > dst = tmp; > } > }; > >@@ -581,31 +581,31 @@ class SparseSymmetricPermutationProduct > MatrixTypeNested m_matrix; > const Perm& m_perm; > > }; > > namespace internal { > > template<typename DstXprType, typename MatrixType, int Mode, typename Scalar> >-struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar>, Sparse2Sparse> >+struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse> > { > typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType; > typedef typename DstXprType::StorageIndex DstIndex; > template<int Options> >- static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &) > { > // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data()); > SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp; > internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data()); > dst = tmp; > } > > template<typename DestType,unsigned int DestMode> >- static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar> &) >+ static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &) > { > internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data()); > } > }; > > } // end namespace internal > > } // end namespace Eigen >diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h >--- a/Eigen/src/SparseQR/SparseQR.h >+++ b/Eigen/src/SparseQR/SparseQR.h >@@ -700,38 +700,38 @@ template<typename SparseQRType> > struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> > > { > typedef typename SparseQRType::MatrixType MatrixType; > typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind; > typedef SparseShape Shape; > }; > > template< typename DstXprType, typename SparseQRType> >-struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Sparse> >+struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Sparse> > { > typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType; > typedef typename DstXprType::Scalar Scalar; > typedef typename DstXprType::StorageIndex StorageIndex; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/) > { > typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows()); > idMat.setIdentity(); > // Sort the sparse householder reflectors if needed > const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q(); > dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false); > } > }; > > template< typename DstXprType, typename SparseQRType> >-struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Dense> >+struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Dense> > { > typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType; > typedef typename DstXprType::Scalar Scalar; > typedef typename DstXprType::StorageIndex StorageIndex; >- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &/*func*/) >+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/) > { > dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows()); > } > }; > > } // end namespace internal > > } // end namespace Eigen >diff --git a/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/Eigen/src/plugins/ArrayCwiseBinaryOps.h >--- a/Eigen/src/plugins/ArrayCwiseBinaryOps.h >+++ b/Eigen/src/plugins/ArrayCwiseBinaryOps.h >@@ -1,18 +1,18 @@ > /** \returns an expression of the coefficient wise product of \c *this and \a other > * > * \sa MatrixBase::cwiseProduct > */ > template<typename OtherDerived> > EIGEN_DEVICE_FUNC >-EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived) >+EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product) > operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const > { >- return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived()); >+ return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived()); > } > > /** \returns an expression of the coefficient wise quotient of \c *this and \a other > * > * \sa MatrixBase::cwiseQuotient > */ > template<typename OtherDerived> > EIGEN_DEVICE_FUNC >diff --git a/Eigen/src/plugins/CommonCwiseBinaryOps.h b/Eigen/src/plugins/CommonCwiseBinaryOps.h >--- a/Eigen/src/plugins/CommonCwiseBinaryOps.h >+++ b/Eigen/src/plugins/CommonCwiseBinaryOps.h >@@ -1,35 +1,47 @@ > // This file is part of Eigen, a lightweight C++ template library > // for linear algebra. > // >-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> >+// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr> > // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> > // > // This Source Code Form is subject to the terms of the Mozilla > // Public License v. 2.0. If a copy of the MPL was not distributed > // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. > > // This file is a base class plugin containing common coefficient wise functions. > > /** \returns an expression of the difference of \c *this and \a other > * > * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). > * > * \sa class CwiseBinaryOp, operator-=() > */ >-EIGEN_MAKE_CWISE_BINARY_OP(operator-,internal::scalar_difference_op) >+template<typename OtherDerived> >+EIGEN_DEVICE_FUNC >+EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,difference) >+operator-(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const >+{ >+ return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,difference)(derived(), other.derived()); >+} > > /** \returns an expression of the sum of \c *this and \a other > * > * \note If you want to add a given scalar to all coefficients, see Cwise::operator+(). > * > * \sa class CwiseBinaryOp, operator+=() > */ >-EIGEN_MAKE_CWISE_BINARY_OP(operator+,internal::scalar_sum_op) >+template<typename OtherDerived> >+EIGEN_DEVICE_FUNC >+EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,sum) >+operator+(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const >+{ >+ return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,sum)(derived(), other.derived()); >+} > > /** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other > * > * The template parameter \a CustomBinaryOp is the type of the functor > * of the custom operator (see class CwiseBinaryOp for an example) > * > * Here is an example illustrating the use of custom functors: > * \include class_CwiseBinaryOp.cpp >diff --git a/Eigen/src/plugins/CommonCwiseUnaryOps.h b/Eigen/src/plugins/CommonCwiseUnaryOps.h >--- a/Eigen/src/plugins/CommonCwiseUnaryOps.h >+++ b/Eigen/src/plugins/CommonCwiseUnaryOps.h >@@ -68,17 +68,17 @@ inline const ScalarQuotient1ReturnType > operator/(const Scalar& scalar) const > { > return ScalarQuotient1ReturnType(derived(), internal::scalar_quotient1_op<Scalar>(scalar)); > } > > /** Overloaded for efficiently multipling with compatible scalar types */ > template <typename T> > EIGEN_DEVICE_FUNC inline >-typename internal::enable_if<internal::scalar_product_traits<T,Scalar>::Defined, >+typename internal::enable_if<ScalarBinaryOpTraits<T,Scalar>::Defined, > const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived> >::type > operator*(const T& scalar) const > { > #ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN > EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN > #endif > return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived>( > derived(), internal::scalar_multiple2_op<Scalar,T>(scalar) ); >@@ -86,30 +86,30 @@ operator*(const T& scalar) const > > EIGEN_DEVICE_FUNC > inline friend const ScalarMultipleReturnType > operator*(const Scalar& scalar, const StorageBaseType& matrix) > { return matrix*scalar; } > > template <typename T> > EIGEN_DEVICE_FUNC inline friend >-typename internal::enable_if<internal::scalar_product_traits<Scalar,T>::Defined, >+typename internal::enable_if<ScalarBinaryOpTraits<Scalar,T>::Defined, > const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived> >::type > operator*(const T& scalar, const StorageBaseType& matrix) > { > #ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN > EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN > #endif > return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,T>, const Derived>( > matrix.derived(), internal::scalar_multiple2_op<Scalar,T>(scalar) ); > } > > template <typename T> > EIGEN_DEVICE_FUNC inline >-typename internal::enable_if<internal::scalar_product_traits<Scalar,T>::Defined, >+typename internal::enable_if<ScalarBinaryOpTraits<Scalar,T>::Defined, > const CwiseUnaryOp<internal::scalar_quotient2_op<Scalar,T>, const Derived> >::type > operator/(const T& scalar) const > { > #ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN > EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN > #endif > return CwiseUnaryOp<internal::scalar_quotient2_op<Scalar,T>, const Derived>( > derived(), internal::scalar_quotient2_op<Scalar,T>(scalar) ); >diff --git a/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/Eigen/src/plugins/MatrixCwiseBinaryOps.h >--- a/Eigen/src/plugins/MatrixCwiseBinaryOps.h >+++ b/Eigen/src/plugins/MatrixCwiseBinaryOps.h >@@ -14,20 +14,20 @@ > * > * Example: \include MatrixBase_cwiseProduct.cpp > * Output: \verbinclude MatrixBase_cwiseProduct.out > * > * \sa class CwiseBinaryOp, cwiseAbs2 > */ > template<typename OtherDerived> > EIGEN_DEVICE_FUNC >-EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived) >+EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product) > cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const > { >- return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived()); >+ return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived()); > } > > /** \returns an expression of the coefficient-wise == operator of *this and \a other > * > * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. > * In order to check for equality between two vectors or matrices with floating-point coefficients, it is > * generally a far better idea to use a fuzzy comparison as provided by isApprox() and > * isMuchSmallerThan(). >diff --git a/blas/PackedTriangularMatrixVector.h b/blas/PackedTriangularMatrixVector.h >--- a/blas/PackedTriangularMatrixVector.h >+++ b/blas/PackedTriangularMatrixVector.h >@@ -13,17 +13,17 @@ > namespace internal { > > template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder> > struct packed_triangular_matrix_vector_product; > > template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs> > struct packed_triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > enum { > IsLower = (Mode & Lower) ==Lower, > HasUnitDiag = (Mode & UnitDiag)==UnitDiag, > HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag > }; > static void run(Index size, const LhsScalar* lhs, const RhsScalar* rhs, ResScalar* res, ResScalar alpha) > { > internal::conj_if<ConjRhs> cj; >@@ -42,17 +42,17 @@ struct packed_triangular_matrix_vector_p > lhs += IsLower ? size-i: i+1; > } > }; > }; > > template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs> > struct packed_triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor> > { >- typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; >+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; > enum { > IsLower = (Mode & Lower) ==Lower, > HasUnitDiag = (Mode & UnitDiag)==UnitDiag, > HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag > }; > static void run(Index size, const LhsScalar* lhs, const RhsScalar* rhs, ResScalar* res, ResScalar alpha) > { > internal::conj_if<ConjRhs> cj; >diff --git a/test/array.cpp b/test/array.cpp >--- a/test/array.cpp >+++ b/test/array.cpp >@@ -67,17 +67,17 @@ template<typename ArrayType> void array( > // reductions > VERIFY_IS_APPROX(m1.abs().colwise().sum().sum(), m1.abs().sum()); > VERIFY_IS_APPROX(m1.abs().rowwise().sum().sum(), m1.abs().sum()); > using std::abs; > VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.colwise().sum().sum() - m1.sum()), m1.abs().sum()); > VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum()); > if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision<Scalar>())) > VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum()); >- VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar>())); >+ VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>())); > > // vector-wise ops > m3 = m1; > VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1); > m3 = m1; > VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1); > m3 = m1; > VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1); >diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp >--- a/test/mixingtypes.cpp >+++ b/test/mixingtypes.cpp >@@ -37,40 +37,37 @@ template<int SizeAtCompileType> void mix > typedef Matrix<std::complex<double>, SizeAtCompileType, SizeAtCompileType> Mat_cd; > typedef Matrix<float, SizeAtCompileType, 1> Vec_f; > typedef Matrix<double, SizeAtCompileType, 1> Vec_d; > typedef Matrix<std::complex<float>, SizeAtCompileType, 1> Vec_cf; > typedef Matrix<std::complex<double>, SizeAtCompileType, 1> Vec_cd; > > Mat_f mf = Mat_f::Random(size,size); > Mat_d md = mf.template cast<double>(); >+ //Mat_d rd = md; > Mat_cf mcf = Mat_cf::Random(size,size); > Mat_cd mcd = mcf.template cast<complex<double> >(); > Mat_cd rcd = mcd; > Vec_f vf = Vec_f::Random(size,1); > Vec_d vd = vf.template cast<double>(); > Vec_cf vcf = Vec_cf::Random(size,1); > Vec_cd vcd = vcf.template cast<complex<double> >(); > float sf = internal::random<float>(); > double sd = internal::random<double>(); > complex<float> scf = internal::random<complex<float> >(); > complex<double> scd = internal::random<complex<double> >(); > > > mf+mf; >- VERIFY_RAISES_ASSERT(mf+md); >-#if !EIGEN_HAS_STD_RESULT_OF >- // this one does not even compile with C++11 >- VERIFY_RAISES_ASSERT(mf+mcf); >-#endif >+ >+// VERIFY_RAISES_ASSERT(mf+md); // does not even compile > > #ifdef EIGEN_DONT_VECTORIZE > VERIFY_RAISES_ASSERT(vf=vd); > VERIFY_RAISES_ASSERT(vf+=vd); >- VERIFY_RAISES_ASSERT(mcd=md); > #endif > > // check scalar products > VERIFY_IS_APPROX(vcf * sf , vcf * complex<float>(sf)); > VERIFY_IS_APPROX(sd * vcd, complex<double>(sd) * vcd); > VERIFY_IS_APPROX(vf * scf , vf.template cast<complex<float> >() * scf); > VERIFY_IS_APPROX(scd * vd, scd * vd.template cast<complex<double> >()); > >@@ -181,26 +178,45 @@ template<int SizeAtCompileType> void mix > VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = sd * md * mcd), > Mat_cd((sd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>())); > VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * mcd * md), > Mat_cd((scd * mcd * md.template cast<CD>().eval()).template triangularView<Upper>())); > VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * md * mcd), > Mat_cd((scd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>())); > > >- VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() ); >- VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast<CD>().eval().array() ); >+ >+ VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() ); >+ VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast<CD>().eval().array() ); >+ >+ VERIFY_IS_APPROX( md.array() + mcd.array(), md.template cast<CD>().eval().array() + mcd.array() ); >+ VERIFY_IS_APPROX( mcd.array() + md.array(), mcd.array() + md.template cast<CD>().eval().array() ); >+ >+ VERIFY_IS_APPROX( md.array() - mcd.array(), md.template cast<CD>().eval().array() - mcd.array() ); >+ VERIFY_IS_APPROX( mcd.array() - md.array(), mcd.array() - md.template cast<CD>().eval().array() ); > > // VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast<CD>().eval().array() / mcd.array() ); > VERIFY_IS_APPROX( mcd.array() / md.array(), mcd.array() / md.template cast<CD>().eval().array() ); > > rcd = mcd; >+ VERIFY_IS_APPROX( rcd = md, md.template cast<CD>().eval() ); >+ rcd = mcd; >+ VERIFY_IS_APPROX( rcd += md, mcd + md.template cast<CD>().eval() ); >+ rcd = mcd; >+ VERIFY_IS_APPROX( rcd -= md, mcd - md.template cast<CD>().eval() ); >+ rcd = mcd; > VERIFY_IS_APPROX( rcd.array() *= md.array(), mcd.array() * md.template cast<CD>().eval().array() ); > rcd = mcd; > VERIFY_IS_APPROX( rcd.array() /= md.array(), mcd.array() / md.template cast<CD>().eval().array() ); >+ >+ rcd = mcd; >+ VERIFY_IS_APPROX( rcd += md + mcd*md, mcd + (md.template cast<CD>().eval()) + mcd*(md.template cast<CD>().eval())); >+ >+ rcd = mcd; >+ VERIFY_IS_APPROX( rcd += mcd + md*md, mcd + mcd + ((md*md).template cast<CD>().eval()) ); > } > > void test_mixingtypes() > { > for(int i = 0; i < g_repeat; i++) { > CALL_SUBTEST_1(mixingtypes<3>()); > CALL_SUBTEST_2(mixingtypes<4>()); > CALL_SUBTEST_3(mixingtypes<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))); >diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h >--- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h >+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h >@@ -496,46 +496,46 @@ struct make_coherent_impl<Matrix<A_Scala > else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) > { > b.resize(a.size()); > b.setZero(); > } > } > }; > >+} // end namespace internal >+ > template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols> >-struct scalar_product_traits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar> >+struct ScalarBinaryOpTraits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar> > { > enum { Defined = 1 }; > typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType; > }; > > template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols> >-struct scalar_product_traits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> > >+struct ScalarBinaryOpTraits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> > > { > enum { Defined = 1 }; > typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType; > }; > > template<typename DerType> >-struct scalar_product_traits<AutoDiffScalar<DerType>,typename DerType::Scalar> >+struct ScalarBinaryOpTraits<AutoDiffScalar<DerType>,typename DerType::Scalar> > { > enum { Defined = 1 }; > typedef AutoDiffScalar<DerType> ReturnType; > }; > > template<typename DerType> >-struct scalar_product_traits<typename DerType::Scalar,AutoDiffScalar<DerType> > >+struct ScalarBinaryOpTraits<typename DerType::Scalar,AutoDiffScalar<DerType> > > { > enum { Defined = 1 }; > typedef AutoDiffScalar<DerType> ReturnType; > }; > >-} // end namespace internal >- > #define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \ > template<typename DerType> \ > inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > \ > FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \ > using namespace Eigen; \ > typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \ > typedef AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > ReturnType; \ > CODE; \ >diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h >--- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h >+++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h >@@ -198,17 +198,17 @@ void KroneckerProductSparse<Lhs,Rhs>::ev > > namespace internal { > > template<typename _Lhs, typename _Rhs> > struct traits<KroneckerProduct<_Lhs,_Rhs> > > { > typedef typename remove_all<_Lhs>::type Lhs; > typedef typename remove_all<_Rhs>::type Rhs; >- typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar; >+ typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar; > typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex; > > enum { > Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret, > Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret, > MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret, > MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret > }; >@@ -217,17 +217,17 @@ struct traits<KroneckerProduct<_Lhs,_Rhs > }; > > template<typename _Lhs, typename _Rhs> > struct traits<KroneckerProductSparse<_Lhs,_Rhs> > > { > typedef MatrixXpr XprKind; > typedef typename remove_all<_Lhs>::type Lhs; > typedef typename remove_all<_Rhs>::type Rhs; >- typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar; >+ typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar; > typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind; > typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex; > > enum { > LhsFlags = Lhs::Flags, > RhsFlags = Rhs::Flags, > > RowsAtCompileTime = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 279
: 713