Bugzilla – Attachment 301 Details for
Bug 314
Infinite recursion when doing exp(matrix)
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
Log In
[x]
|
Forgot Password
Login:
[x]
This bugzilla service is closed. All entries have been migrated to
https://gitlab.com/libeigen/eigen
[patch]
proof of concept patch removing most of the internal::* standard math functions
rm_std_math_functions.diff (text/plain), 169.78 KB, created by
Gael Guennebaud
on 2012-10-09 13:51:36 UTC
(
hide
)
Description:
proof of concept patch removing most of the internal::* standard math functions
Filename:
MIME Type:
Creator:
Gael Guennebaud
Created:
2012-10-09 13:51:36 UTC
Size:
169.78 KB
patch
obsolete
>diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h >--- a/Eigen/src/Cholesky/LDLT.h >+++ b/Eigen/src/Cholesky/LDLT.h >@@ -243,16 +243,17 @@ namespace internal { > > template<int UpLo> struct ldlt_inplace; > > template<> struct ldlt_inplace<Lower> > { > template<typename MatrixType, typename TranspositionType, typename Workspace> > static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) > { >+ using std::abs; > typedef typename MatrixType::Scalar Scalar; > typedef typename MatrixType::RealScalar RealScalar; > typedef typename MatrixType::Index Index; > eigen_assert(mat.rows()==mat.cols()); > const Index size = mat.rows(); > > if (size <= 1) > { >diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h >--- a/Eigen/src/Cholesky/LLT.h >+++ b/Eigen/src/Cholesky/LLT.h >@@ -185,16 +185,17 @@ template<typename _MatrixType, int _UpLo > > namespace internal { > > template<typename Scalar, int UpLo> struct llt_inplace; > > template<typename MatrixType, typename VectorType> > static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) > { >+ using std::sqrt; > typedef typename MatrixType::Scalar Scalar; > typedef typename MatrixType::RealScalar RealScalar; > typedef typename MatrixType::Index Index; > typedef typename MatrixType::ColXpr ColXpr; > typedef typename internal::remove_all<ColXpr>::type ColXprCleaned; > typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; > typedef Matrix<Scalar,Dynamic,1> TempVectorType; > typedef typename TempVectorType::SegmentReturnType TempVecSegment; >@@ -258,16 +259,17 @@ static typename MatrixType::Index llt_ra > } > > template<typename Scalar> struct llt_inplace<Scalar, Lower> > { > typedef typename NumTraits<Scalar>::Real RealScalar; > template<typename MatrixType> > static typename MatrixType::Index unblocked(MatrixType& mat) > { >+ using std::sqrt; > typedef typename MatrixType::Index Index; > > eigen_assert(mat.rows()==mat.cols()); > const Index size = mat.rows(); > for(Index k = 0; k < size; ++k) > { > Index rs = size-k-1; // remaining size > >diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h >--- a/Eigen/src/Core/DiagonalMatrix.h >+++ b/Eigen/src/Core/DiagonalMatrix.h >@@ -281,21 +281,22 @@ MatrixBase<Derived>::asDiagonal() const > * Example: \include MatrixBase_isDiagonal.cpp > * Output: \verbinclude MatrixBase_isDiagonal.out > * > * \sa asDiagonal() > */ > template<typename Derived> > bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const > { >+ using std::abs; > if(cols() != rows()) return false; > RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1); > for(Index j = 0; j < cols(); ++j) > { >- RealScalar absOnDiagonal = internal::abs(coeff(j,j)); >+ RealScalar absOnDiagonal = abs(coeff(j,j)); > if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; > } > for(Index j = 0; j < cols(); ++j) > for(Index i = 0; i < j; ++i) > { > if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; > if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; > } >diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h >--- a/Eigen/src/Core/Dot.h >+++ b/Eigen/src/Core/Dot.h >@@ -119,17 +119,18 @@ EIGEN_STRONG_INLINE typename NumTraits<t > * In both cases, it consists in the square root of the sum of the square of all the matrix entries. > * For vectors, this is also equals to the square root of the dot product of \c *this with itself. > * > * \sa dot(), squaredNorm() > */ > template<typename Derived> > inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const > { >- return internal::sqrt(squaredNorm()); >+ using std::sqrt; >+ return sqrt(squaredNorm()); > } > > /** \returns an expression of the quotient of *this by its own norm. > * > * \only_for_vectors > * > * \sa norm(), normalize() > */ >diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h >--- a/Eigen/src/Core/Functors.h >+++ b/Eigen/src/Core/Functors.h >@@ -282,17 +282,17 @@ struct functor_traits<scalar_opposite_op > /** \internal > * \brief Template functor to compute the absolute value of a scalar > * > * \sa class CwiseUnaryOp, Cwise::abs > */ > template<typename Scalar> struct scalar_abs_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) > typedef typename NumTraits<Scalar>::Real result_type; >- EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); } >+ EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using std::abs; return abs(a); } > template<typename Packet> > EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const > { return internal::pabs(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_abs_op<Scalar> > > { > enum { >@@ -416,33 +416,33 @@ struct functor_traits<scalar_imag_ref_op > /** \internal > * > * \brief Template functor to compute the exponential of a scalar > * > * \sa class CwiseUnaryOp, Cwise::exp() > */ > template<typename Scalar> struct scalar_exp_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::exp; return exp(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_exp_op<Scalar> > > { enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasExp }; }; > > /** \internal > * > * \brief Template functor to compute the logarithm of a scalar > * > * \sa class CwiseUnaryOp, Cwise::log() > */ > template<typename Scalar> struct scalar_log_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::log(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::log; return log(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::plog(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_log_op<Scalar> > > { enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; }; > > /** \internal >@@ -669,17 +669,17 @@ struct functor_traits<scalar_add_op<Scal > { enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasAdd }; }; > > /** \internal > * \brief Template functor to compute the square root of a scalar > * \sa class CwiseUnaryOp, Cwise::sqrt() > */ > template<typename Scalar> struct scalar_sqrt_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::sqrt; return sqrt(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_sqrt_op<Scalar> > > { enum { > Cost = 5 * NumTraits<Scalar>::MulCost, > PacketAccess = packet_traits<Scalar>::HasSqrt >@@ -687,17 +687,17 @@ struct functor_traits<scalar_sqrt_op<Sca > }; > > /** \internal > * \brief Template functor to compute the cosine of a scalar > * \sa class CwiseUnaryOp, ArrayBase::cos() > */ > template<typename Scalar> struct scalar_cos_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) >- inline Scalar operator() (const Scalar& a) const { return internal::cos(a); } >+ inline Scalar operator() (const Scalar& a) const { using std::cos; return cos(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_cos_op<Scalar> > > { > enum { > Cost = 5 * NumTraits<Scalar>::MulCost, >@@ -706,17 +706,17 @@ struct functor_traits<scalar_cos_op<Scal > }; > > /** \internal > * \brief Template functor to compute the sine of a scalar > * \sa class CwiseUnaryOp, ArrayBase::sin() > */ > template<typename Scalar> struct scalar_sin_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::sin; return sin(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::psin(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_sin_op<Scalar> > > { > enum { > Cost = 5 * NumTraits<Scalar>::MulCost, >@@ -726,17 +726,17 @@ struct functor_traits<scalar_sin_op<Scal > > > /** \internal > * \brief Template functor to compute the tan of a scalar > * \sa class CwiseUnaryOp, ArrayBase::tan() > */ > template<typename Scalar> struct scalar_tan_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::tan; return tan(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_tan_op<Scalar> > > { > enum { > Cost = 5 * NumTraits<Scalar>::MulCost, >@@ -745,17 +745,17 @@ struct functor_traits<scalar_tan_op<Scal > }; > > /** \internal > * \brief Template functor to compute the arc cosine of a scalar > * \sa class CwiseUnaryOp, ArrayBase::acos() > */ > template<typename Scalar> struct scalar_acos_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::acos; return acos(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_acos_op<Scalar> > > { > enum { > Cost = 5 * NumTraits<Scalar>::MulCost, >@@ -764,17 +764,17 @@ struct functor_traits<scalar_acos_op<Sca > }; > > /** \internal > * \brief Template functor to compute the arc sine of a scalar > * \sa class CwiseUnaryOp, ArrayBase::asin() > */ > template<typename Scalar> struct scalar_asin_op { > EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) >- inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); } >+ inline const Scalar operator() (const Scalar& a) const { using std::asin; return asin(a); } > typedef typename packet_traits<Scalar>::type Packet; > inline Packet packetOp(const Packet& a) const { return internal::pasin(a); } > }; > template<typename Scalar> > struct functor_traits<scalar_asin_op<Scalar> > > { > enum { > Cost = 5 * NumTraits<Scalar>::MulCost, >diff --git a/Eigen/src/Core/GlobalFunctions.h b/Eigen/src/Core/GlobalFunctions.h >--- a/Eigen/src/Core/GlobalFunctions.h >+++ b/Eigen/src/Core/GlobalFunctions.h >@@ -1,22 +1,22 @@ > // This file is part of Eigen, a lightweight C++ template library > // for linear algebra. > // >-// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> >+// Copyright (C) 2010-2012 Gael Guennebaud <gael.guennebaud@inria.fr> > // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com> > // > // This Source Code Form is subject to the terms of the Mozilla > // Public License v. 2.0. If a copy of the MPL was not distributed > // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. > > #ifndef EIGEN_GLOBAL_FUNCTIONS_H > #define EIGEN_GLOBAL_FUNCTIONS_H > >-#define EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(NAME,FUNCTOR) \ >+#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR) \ > template<typename Derived> \ > inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \ > NAME(const Eigen::ArrayBase<Derived>& x) { \ > return x.derived(); \ > } > > #define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \ > \ >@@ -30,49 +30,46 @@ > { \ > static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) \ > { \ > return x.derived(); \ > } \ > }; > > >-namespace std >+namespace Eigen > { >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,scalar_real_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,scalar_imag_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,scalar_sin_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,scalar_cos_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(asin,scalar_asin_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(acos,scalar_acos_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(tan,scalar_tan_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,scalar_exp_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,scalar_log_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,scalar_abs_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,scalar_sqrt_op) >- >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op) >+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op) >+ > template<typename Derived> > inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar>, const Derived> > pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) { > return x.derived().pow(exponent); > } > > template<typename Derived> > inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_binary_pow_op<typename Derived::Scalar, typename Derived::Scalar>, const Derived, const Derived> > pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<Derived>& exponents) > { > return Eigen::CwiseBinaryOp<Eigen::internal::scalar_binary_pow_op<typename Derived::Scalar, typename Derived::Scalar>, const Derived, const Derived>( > x.derived(), > exponents.derived() > ); > } >-} >- >-namespace Eigen >-{ >+ > /** > * \brief Component-wise division of a scalar by array elements. > **/ > template <typename Derived> > inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived> > operator/(typename Derived::Scalar s, const Eigen::ArrayBase<Derived>& a) > { > return Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>( >@@ -80,24 +77,15 @@ namespace Eigen > Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>(s) > ); > } > > namespace internal > { > EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) > EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sin,scalar_sin_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(cos,scalar_cos_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(asin,scalar_asin_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(acos,scalar_acos_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(tan,scalar_tan_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(exp,scalar_exp_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(log,scalar_log_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs,scalar_abs_op) > EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op) >- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sqrt,scalar_sqrt_op) > } > } > > // TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...) > > #endif // EIGEN_GLOBAL_FUNCTIONS_H >diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h >--- a/Eigen/src/Core/MathFunctions.h >+++ b/Eigen/src/Core/MathFunctions.h >@@ -246,43 +246,16 @@ struct conj_retval > > template<typename Scalar> > inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) > { > return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); > } > > /**************************************************************************** >-* Implementation of abs * >-****************************************************************************/ >- >-template<typename Scalar> >-struct abs_impl >-{ >- typedef typename NumTraits<Scalar>::Real RealScalar; >- static inline RealScalar run(const Scalar& x) >- { >- using std::abs; >- return abs(x); >- } >-}; >- >-template<typename Scalar> >-struct abs_retval >-{ >- typedef typename NumTraits<Scalar>::Real type; >-}; >- >-template<typename Scalar> >-inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x) >-{ >- return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x); >-} >- >-/**************************************************************************** > * Implementation of abs2 * > ****************************************************************************/ > > template<typename Scalar> > struct abs2_impl > { > typedef typename NumTraits<Scalar>::Real RealScalar; > static inline RealScalar run(const Scalar& x) >@@ -400,131 +373,16 @@ struct cast_impl > > template<typename OldType, typename NewType> > inline NewType cast(const OldType& x) > { > return cast_impl<OldType, NewType>::run(x); > } > > /**************************************************************************** >-* Implementation of sqrt * >-****************************************************************************/ >- >-template<typename Scalar, bool IsInteger> >-struct sqrt_default_impl >-{ >- static inline Scalar run(const Scalar& x) >- { >- using std::sqrt; >- return sqrt(x); >- } >-}; >- >-template<typename Scalar> >-struct sqrt_default_impl<Scalar, true> >-{ >- static inline Scalar run(const Scalar&) >- { >-#ifdef EIGEN2_SUPPORT >- eigen_assert(!NumTraits<Scalar>::IsInteger); >-#else >- EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) >-#endif >- return Scalar(0); >- } >-}; >- >-template<typename Scalar> >-struct sqrt_impl : sqrt_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {}; >- >-template<typename Scalar> >-struct sqrt_retval >-{ >- typedef Scalar type; >-}; >- >-template<typename Scalar> >-inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x) >-{ >- return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x); >-} >- >-/**************************************************************************** >-* Implementation of standard unary real functions (exp, log, sin, cos, ... * >-****************************************************************************/ >- >-// This macro instanciate all the necessary template mechanism which is common to all unary real functions. >-#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \ >- template<typename Scalar, bool IsInteger> struct NAME##_default_impl { \ >- static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); } \ >- }; \ >- template<typename Scalar> struct NAME##_default_impl<Scalar, true> { \ >- static inline Scalar run(const Scalar&) { \ >- EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) \ >- return Scalar(0); \ >- } \ >- }; \ >- template<typename Scalar> struct NAME##_impl \ >- : NAME##_default_impl<Scalar, NumTraits<Scalar>::IsInteger> \ >- {}; \ >- template<typename Scalar> struct NAME##_retval { typedef Scalar type; }; \ >- template<typename Scalar> \ >- inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) { \ >- return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x); \ >- } >- >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp) >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log) >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin) >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos) >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan) >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin) >-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos) >- >-/**************************************************************************** >-* Implementation of atan2 * >-****************************************************************************/ >- >-template<typename Scalar, bool IsInteger> >-struct atan2_default_impl >-{ >- typedef Scalar retval; >- static inline Scalar run(const Scalar& x, const Scalar& y) >- { >- using std::atan2; >- return atan2(x, y); >- } >-}; >- >-template<typename Scalar> >-struct atan2_default_impl<Scalar, true> >-{ >- static inline Scalar run(const Scalar&, const Scalar&) >- { >- EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) >- return Scalar(0); >- } >-}; >- >-template<typename Scalar> >-struct atan2_impl : atan2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {}; >- >-template<typename Scalar> >-struct atan2_retval >-{ >- typedef Scalar type; >-}; >- >-template<typename Scalar> >-inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y) >-{ >- return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y); >-} >- >-/**************************************************************************** > * Implementation of atanh2 * > ****************************************************************************/ > > template<typename Scalar, bool IsInteger> > struct atanh2_default_impl > { > typedef Scalar retval; > typedef typename NumTraits<Scalar>::Real RealScalar; >diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h >--- a/Eigen/src/Core/StableNorm.h >+++ b/Eigen/src/Core/StableNorm.h >@@ -39,30 +39,31 @@ inline void stable_norm_kernel(const Exp > * > * \sa norm(), blueNorm(), hypotNorm() > */ > template<typename Derived> > inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real > MatrixBase<Derived>::stableNorm() const > { > using std::min; >+ using std::sqrt; > const Index blockSize = 4096; > RealScalar scale(0); > RealScalar invScale(1); > RealScalar ssq(0); // sum of square > enum { > Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0 > }; > Index n = size(); > Index bi = internal::first_aligned(derived()); > if (bi>0) > internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale); > for (; bi<n; bi+=blockSize) > internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale); >- return scale * internal::sqrt(ssq); >+ return scale * sqrt(ssq); > } > > /** \returns the \em l2 norm of \c *this using the Blue's algorithm. > * A Portable Fortran Program to Find the Euclidean Norm of a Vector, > * ACM TOMS, Vol 4, Issue 1, 1978. > * > * For architecture/scalar types without vectorization, this version > * is much faster than stableNorm(). Otherwise the stableNorm() is faster. >@@ -71,16 +72,18 @@ MatrixBase<Derived>::stableNorm() const > */ > template<typename Derived> > inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real > MatrixBase<Derived>::blueNorm() const > { > using std::pow; > using std::min; > using std::max; >+ using std::sqrt; >+ using std::abs; > static Index nmax = -1; > static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr; > if(nmax <= 0) > { > int nbig, ibeta, it, iemin, iemax, iexp; > RealScalar abig, eps; > // This program calculates the machine-dependent constants > // bl, b2, slm, s2m, relerr overfl, nmax >@@ -104,66 +107,66 @@ MatrixBase<Derived>::blueNorm() const > > iexp = (2-iemin)/2; > s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range > iexp = - ((iemax+it)/2); > s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range > > overfl = rbig*s2m; // overflow boundary for abig > eps = RealScalar(pow(double(ibeta), 1-it)); >- relerr = internal::sqrt(eps); // tolerance for neglecting asml >+ relerr = sqrt(eps); // tolerance for neglecting asml > abig = RealScalar(1.0/eps - 1.0); > if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n > else nmax = nbig; > } > Index n = size(); > RealScalar ab2 = b2 / RealScalar(n); > RealScalar asml = RealScalar(0); > RealScalar amed = RealScalar(0); > RealScalar abig = RealScalar(0); > for(Index j=0; j<n; ++j) > { >- RealScalar ax = internal::abs(coeff(j)); >+ RealScalar ax = abs(coeff(j)); > if(ax > ab2) abig += internal::abs2(ax*s2m); > else if(ax < b1) asml += internal::abs2(ax*s1m); > else amed += internal::abs2(ax); > } > if(abig > RealScalar(0)) > { >- abig = internal::sqrt(abig); >+ abig = sqrt(abig); > if(abig > overfl) > { > return rbig; > } > if(amed > RealScalar(0)) > { > abig = abig/s2m; >- amed = internal::sqrt(amed); >+ amed = sqrt(amed); > } > else > return abig/s2m; > } > else if(asml > RealScalar(0)) > { > if (amed > RealScalar(0)) > { >- abig = internal::sqrt(amed); >- amed = internal::sqrt(asml) / s1m; >+ abig = sqrt(amed); >+ amed = sqrt(asml) / s1m; > } > else >- return internal::sqrt(asml)/s1m; >+ return sqrt(asml)/s1m; > } > else >- return internal::sqrt(amed); >+ return sqrt(amed); > asml = (min)(abig, amed); > abig = (max)(abig, amed); > if(asml <= abig*relerr) > return abig; > else >- return abig * internal::sqrt(RealScalar(1) + internal::abs2(asml/abig)); >+ return abig * sqrt(RealScalar(1) + internal::abs2(asml/abig)); > } > > /** \returns the \em l2 norm of \c *this avoiding undeflow and overflow. > * This version use a concatenation of hypot() calls, and it is very slow. > * > * \sa norm(), stableNorm() > */ > template<typename Derived> >diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h >--- a/Eigen/src/Core/TriangularMatrix.h >+++ b/Eigen/src/Core/TriangularMatrix.h >@@ -776,53 +776,55 @@ MatrixBase<Derived>::triangularView() co > /** \returns true if *this is approximately equal to an upper triangular matrix, > * within the precision given by \a prec. > * > * \sa isLowerTriangular() > */ > template<typename Derived> > bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const > { >+ using std::abs; > RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1); > for(Index j = 0; j < cols(); ++j) > { > Index maxi = (std::min)(j, rows()-1); > for(Index i = 0; i <= maxi; ++i) > { >- RealScalar absValue = internal::abs(coeff(i,j)); >+ RealScalar absValue = abs(coeff(i,j)); > if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; > } > } > RealScalar threshold = maxAbsOnUpperPart * prec; > for(Index j = 0; j < cols(); ++j) > for(Index i = j+1; i < rows(); ++i) >- if(internal::abs(coeff(i, j)) > threshold) return false; >+ if(abs(coeff(i, j)) > threshold) return false; > return true; > } > > /** \returns true if *this is approximately equal to a lower triangular matrix, > * within the precision given by \a prec. > * > * \sa isUpperTriangular() > */ > template<typename Derived> > bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const > { >+ using std::abs; > RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1); > for(Index j = 0; j < cols(); ++j) > for(Index i = j; i < rows(); ++i) > { >- RealScalar absValue = internal::abs(coeff(i,j)); >+ RealScalar absValue = abs(coeff(i,j)); > if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; > } > RealScalar threshold = maxAbsOnLowerPart * prec; > for(Index j = 1; j < cols(); ++j) > { > Index maxi = (std::min)(j, rows()-1); > for(Index i = 0; i < maxi; ++i) >- if(internal::abs(coeff(i, j)) > threshold) return false; >+ if(abs(coeff(i, j)) > threshold) return false; > } > return true; > } > > } // end namespace Eigen > > #endif // EIGEN_TRIANGULARMATRIX_H >diff --git a/Eigen/src/Eigen2Support/MathFunctions.h b/Eigen/src/Eigen2Support/MathFunctions.h >--- a/Eigen/src/Eigen2Support/MathFunctions.h >+++ b/Eigen/src/Eigen2Support/MathFunctions.h >@@ -10,24 +10,24 @@ > #ifndef EIGEN2_MATH_FUNCTIONS_H > #define EIGEN2_MATH_FUNCTIONS_H > > namespace Eigen { > > template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return internal::real(x); } > template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return internal::imag(x); } > template<typename T> inline T ei_conj(const T& x) { return internal::conj(x); } >-template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { return internal::abs(x); } >+template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { using std::abs; return abs(x); } > template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return internal::abs2(x); } >-template<typename T> inline T ei_sqrt(const T& x) { return internal::sqrt(x); } >-template<typename T> inline T ei_exp (const T& x) { return internal::exp(x); } >-template<typename T> inline T ei_log (const T& x) { return internal::log(x); } >-template<typename T> inline T ei_sin (const T& x) { return internal::sin(x); } >-template<typename T> inline T ei_cos (const T& x) { return internal::cos(x); } >-template<typename T> inline T ei_atan2(const T& x,const T& y) { return internal::atan2(x,y); } >+template<typename T> inline T ei_sqrt(const T& x) { using std::sqrt; return sqrt(x); } >+template<typename T> inline T ei_exp (const T& x) { using std::exp; return exp(x); } >+template<typename T> inline T ei_log (const T& x) { using std::log; return log(x); } >+template<typename T> inline T ei_sin (const T& x) { using std::sin; return sin(x); } >+template<typename T> inline T ei_cos (const T& x) { using std::cos; return cos(x); } >+template<typename T> inline T ei_atan2(const T& x,const T& y) { using std::atan2; return atan2(x,y); } > template<typename T> inline T ei_pow (const T& x,const T& y) { return internal::pow(x,y); } > template<typename T> inline T ei_random () { return internal::random<T>(); } > template<typename T> inline T ei_random (const T& x, const T& y) { return internal::random(x, y); } > > template<typename T> inline T precision () { return NumTraits<T>::dummy_precision(); } > template<typename T> inline T machine_epsilon () { return NumTraits<T>::epsilon(); } > > >diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h >--- a/Eigen/src/Eigenvalues/ComplexSchur.h >+++ b/Eigen/src/Eigenvalues/ComplexSchur.h >@@ -253,20 +253,21 @@ inline bool ComplexSchur<MatrixType>::su > return false; > } > > > /** Compute the shift in the current QR iteration. */ > template<typename MatrixType> > typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter) > { >+ using std::abs; > if (iter == 10 || iter == 20) > { > // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f >- return internal::abs(internal::real(m_matT.coeff(iu,iu-1))) + internal::abs(internal::real(m_matT.coeff(iu-1,iu-2))); >+ return abs(internal::real(m_matT.coeff(iu,iu-1))) + abs(internal::real(m_matT.coeff(iu-1,iu-2))); > } > > // compute the shift as one of the eigenvalues of t, the 2x2 > // diagonal block on the bottom of the active submatrix > Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1); > RealScalar normt = t.cwiseAbs().sum(); > t /= normt; // the normalization by sf is to avoid under/overflow > >diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h >--- a/Eigen/src/Eigenvalues/EigenSolver.h >+++ b/Eigen/src/Eigenvalues/EigenSolver.h >@@ -359,16 +359,18 @@ typename EigenSolver<MatrixType>::Eigenv > } > return matV; > } > > template<typename MatrixType> > EigenSolver<MatrixType>& > EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors) > { >+ using std::sqrt; >+ using std::abs; > assert(matrix.cols() == matrix.rows()); > > // Reduce to real Schur form. > m_realSchur.compute(matrix, computeEigenvectors); > > if (m_realSchur.info() == Success) > { > m_matT = m_realSchur.matrixT(); >@@ -383,17 +385,17 @@ EigenSolver<MatrixType>::compute(const M > if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) > { > m_eivalues.coeffRef(i) = m_matT.coeff(i, i); > ++i; > } > else > { > Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1)); >- Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); >+ Scalar z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); > m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z); > m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z); > i += 2; > } > } > > // Compute eigenvectors. > if (computeEigenvectors) >@@ -405,18 +407,19 @@ EigenSolver<MatrixType>::compute(const M > > return *this; > } > > // Complex scalar division. > template<typename Scalar> > std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi) > { >+ using std::abs; > Scalar r,d; >- if (internal::abs(yr) > internal::abs(yi)) >+ if (abs(yr) > abs(yi)) > { > r = yi/yr; > d = yr + r*yi; > return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d); > } > else > { > r = yr/yi; >@@ -424,16 +427,17 @@ std::complex<Scalar> cdiv(Scalar xr, Sca > return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d); > } > } > > > template<typename MatrixType> > void EigenSolver<MatrixType>::doComputeEigenvectors() > { >+ using std::abs; > const Index size = m_eivec.cols(); > const Scalar eps = NumTraits<Scalar>::epsilon(); > > // inefficient! this is already computed in RealSchur > Scalar norm(0); > for (Index j = 0; j < size; ++j) > { > norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); >@@ -479,36 +483,36 @@ void EigenSolver<MatrixType>::doComputeE > } > else // Solve real equations > { > Scalar x = m_matT.coeff(i,i+1); > Scalar y = m_matT.coeff(i+1,i); > Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); > Scalar t = (x * lastr - lastw * r) / denom; > m_matT.coeffRef(i,n) = t; >- if (internal::abs(x) > internal::abs(lastw)) >+ if (abs(x) > abs(lastw)) > m_matT.coeffRef(i+1,n) = (-r - w * t) / x; > else > m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw; > } > > // Overflow control >- Scalar t = internal::abs(m_matT.coeff(i,n)); >+ Scalar t = abs(m_matT.coeff(i,n)); > if ((eps * t) * t > Scalar(1)) > m_matT.col(n).tail(size-i) /= t; > } > } > } > else if (q < Scalar(0) && n > 0) // Complex vector > { > Scalar lastra(0), lastsa(0), lastw(0); > Index l = n-1; > > // Last vector component imaginary so matrix is triangular >- if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n))) >+ if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n))) > { > m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1); > m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1); > } > else > { > std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q); > m_matT.coeffRef(n-1,n-1) = internal::real(cc); >@@ -540,37 +544,37 @@ void EigenSolver<MatrixType>::doComputeE > else > { > // Solve complex equations > Scalar x = m_matT.coeff(i,i+1); > Scalar y = m_matT.coeff(i+1,i); > Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; > Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; > if ((vr == 0.0) && (vi == 0.0)) >- vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw)); >+ vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); > > std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi); > m_matT.coeffRef(i,n-1) = internal::real(cc); > m_matT.coeffRef(i,n) = internal::imag(cc); >- if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q))) >+ if (abs(x) > (abs(lastw) + abs(q))) > { > m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x; > m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x; > } > else > { > cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q); > m_matT.coeffRef(i+1,n-1) = internal::real(cc); > m_matT.coeffRef(i+1,n) = internal::imag(cc); > } > } > > // Overflow control > using std::max; >- Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n))); >+ Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n))); > if ((eps * t) * t > Scalar(1)) > m_matT.block(i, n-1, size-i, 2) /= t; > > } > } > > // We handled a pair of complex conjugate eigenvalues, so need to skip them both > n--; >diff --git a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h >--- a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h >+++ b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h >@@ -285,16 +285,18 @@ template<typename _MatrixType> class Gen > // // TODO > // return matV; > //} > > template<typename MatrixType> > GeneralizedEigenSolver<MatrixType>& > GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors) > { >+ using std::sqrt; >+ using std::abs; > eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows()); > > // Reduce to generalized real Schur form: > // A = Q S Z and B = Q T Z > m_realQZ.compute(A, B, computeEigenvectors); > > if (m_realQZ.info() == Success) > { >@@ -312,17 +314,17 @@ GeneralizedEigenSolver<MatrixType>::comp > { > m_alphas.coeffRef(i) = m_matS.coeff(i, i); > m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i); > ++i; > } > else > { > Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1)); >- Scalar z = internal::sqrt(internal::abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1))); >+ Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1))); > m_alphas.coeffRef(i) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z); > m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z); > > m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i); > m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i); > i += 2; > } > } >diff --git a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h >--- a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h >+++ b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h >@@ -116,20 +116,21 @@ SelfAdjointView<MatrixType, UpLo>::eigen > * Output: \verbinclude MatrixBase_operatorNorm.out > * > * \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm() > */ > template<typename Derived> > inline typename MatrixBase<Derived>::RealScalar > MatrixBase<Derived>::operatorNorm() const > { >+ using std::sqrt; > typename Derived::PlainObject m_eval(derived()); > // FIXME if it is really guaranteed that the eigenvalues are already sorted, > // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. >- return internal::sqrt((m_eval*m_eval.adjoint()) >+ return sqrt((m_eval*m_eval.adjoint()) > .eval() > .template selfadjointView<Lower>() > .eigenvalues() > .maxCoeff() > ); > } > > /** \brief Computes the L2 operator norm >diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h >--- a/Eigen/src/Eigenvalues/RealQZ.h >+++ b/Eigen/src/Eigenvalues/RealQZ.h >@@ -273,59 +273,63 @@ namespace Eigen { > } > } > > > /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */ > template<typename MatrixType> > inline typename MatrixType::Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu) > { >+ using std::abs; > Index res = iu; > while (res > 0) > { >- Scalar s = internal::abs(m_S.coeff(res-1,res-1)) + internal::abs(m_S.coeff(res,res)); >+ Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res)); > if (s == Scalar(0.0)) > s = m_normOfS; >- if (internal::abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s) >+ if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s) > break; > res--; > } > return res; > } > > /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */ > template<typename MatrixType> > inline typename MatrixType::Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l) > { >+ using std::abs; > Index res = l; > while (res >= f) { >- if (internal::abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT) >+ if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT) > break; > res--; > } > return res; > } > > /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */ > template<typename MatrixType> > inline void RealQZ<MatrixType>::splitOffTwoRows(Index i) > { >+ using std::abs; >+ using std::sqrt; > const Index dim=m_S.cols(); >- if (internal::abs(m_S.coeff(i+1,i)==Scalar(0))) >+ if (abs(m_S.coeff(i+1,i)==Scalar(0))) > return; > Index z = findSmallDiagEntry(i,i+1); > if (z==i-1) > { > // block of (S T^{-1}) > Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>(). > template solve<OnTheRight>(m_S.template block<2,2>(i,i)); > Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1)); > Scalar q = p*p + STi(1,0)*STi(0,1); > if (q>=0) { >- Scalar z = internal::sqrt(q); >+ Scalar z = sqrt(q); > // one QR-like iteration for ABi - lambda I > // is enough - when we know exact eigenvalue in advance, > // convergence is immediate > JRs G; > if (p>=0) > G.makeGivens(p + z, STi(1,0)); > else > G.makeGivens(p - z, STi(1,0)); >@@ -388,17 +392,19 @@ namespace Eigen { > m_S.coeffRef(l,l-1)=Scalar(0.0); > // update Z > if (m_computeQZ) > m_Z.applyOnTheLeft(l,l-1,G.adjoint()); > } > > /** \internal QR-like iterative step for block f..l */ > template<typename MatrixType> >- inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter) { >+ inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter) >+ { >+ using std::abs; > const Index dim = m_S.cols(); > > // x, y, z > Scalar x, y, z; > if (iter==10) > { > // Wilkinson ad hoc shift > const Scalar >@@ -406,17 +412,17 @@ namespace Eigen { > a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1), > b12=m_T.coeff(f+0,f+1), > b11i=Scalar(1.0)/m_T.coeff(f+0,f+0), > b22i=Scalar(1.0)/m_T.coeff(f+1,f+1), > a87=m_S.coeff(l-1,l-2), > a98=m_S.coeff(l-0,l-1), > b77i=Scalar(1.0)/m_T.coeff(l-2,l-2), > b88i=Scalar(1.0)/m_T.coeff(l-1,l-1); >- Scalar ss = internal::abs(a87*b77i) + internal::abs(a98*b88i), >+ Scalar ss = abs(a87*b77i) + abs(a98*b88i), > lpl = Scalar(1.5)*ss, > ll = ss*ss; > x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i > - a11*a21*b12*b11i*b11i*b22i; > y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i > - a21*a21*b12*b11i*b11i*b22i; > z = a21*a32*b11i*b22i; > } >diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h >--- a/Eigen/src/Eigenvalues/RealSchur.h >+++ b/Eigen/src/Eigenvalues/RealSchur.h >@@ -309,45 +309,48 @@ inline typename MatrixType::Scalar RealS > norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum(); > return norm; > } > > /** \internal Look for single small sub-diagonal element and returns its index */ > template<typename MatrixType> > inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm) > { >+ using std::abs; > Index res = iu; > while (res > 0) > { >- Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res)); >+ Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res)); > if (s == 0.0) > s = norm; >- if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s) >+ if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s) > break; > res--; > } > return res; > } > > /** \internal Update T given that rows iu-1 and iu decouple from the rest. */ > template<typename MatrixType> > inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift) > { >+ using std::sqrt; >+ using std::abs; > const Index size = m_matT.cols(); > > // The eigenvalues of the 2x2 matrix [a b; c d] are > // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc > Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu)); > Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // q = tr^2 / 4 - det = discr/4 > m_matT.coeffRef(iu,iu) += exshift; > m_matT.coeffRef(iu-1,iu-1) += exshift; > > if (q >= Scalar(0)) // Two real eigenvalues > { >- Scalar z = internal::sqrt(internal::abs(q)); >+ Scalar z = sqrt(abs(q)); > JacobiRotation<Scalar> rot; > if (p >= Scalar(0)) > rot.makeGivens(p + z, m_matT.coeff(iu, iu-1)); > else > rot.makeGivens(p - z, m_matT.coeff(iu, iu-1)); > > m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint()); > m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot); >@@ -359,72 +362,75 @@ inline void RealSchur<MatrixType>::split > if (iu > 1) > m_matT.coeffRef(iu-1, iu-2) = Scalar(0); > } > > /** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */ > template<typename MatrixType> > inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo) > { >+ using std::sqrt; >+ using std::abs; > shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu); > shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1); > shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); > > // Wilkinson's original ad hoc shift > if (iter == 10) > { > exshift += shiftInfo.coeff(0); > for (Index i = 0; i <= iu; ++i) > m_matT.coeffRef(i,i) -= shiftInfo.coeff(0); >- Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2)); >+ Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2)); > shiftInfo.coeffRef(0) = Scalar(0.75) * s; > shiftInfo.coeffRef(1) = Scalar(0.75) * s; > shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s; > } > > // MATLAB's new ad hoc shift > if (iter == 30) > { > Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); > s = s * s + shiftInfo.coeff(2); > if (s > Scalar(0)) > { >- s = internal::sqrt(s); >+ s = sqrt(s); > if (shiftInfo.coeff(1) < shiftInfo.coeff(0)) > s = -s; > s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); > s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s; > exshift += s; > for (Index i = 0; i <= iu; ++i) > m_matT.coeffRef(i,i) -= s; > shiftInfo.setConstant(Scalar(0.964)); > } > } > } > > /** \internal Compute index im at which Francis QR step starts and the first Householder vector. */ > template<typename MatrixType> > inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector) > { >+ using std::abs; > Vector3s& v = firstHouseholderVector; // alias to save typing > > for (im = iu-2; im >= il; --im) > { > const Scalar Tmm = m_matT.coeff(im,im); > const Scalar r = shiftInfo.coeff(0) - Tmm; > const Scalar s = shiftInfo.coeff(1) - Tmm; > v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1); > v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s; > v.coeffRef(2) = m_matT.coeff(im+2,im+1); > if (im == il) { > break; > } >- const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2))); >- const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1))); >- if (internal::abs(lhs) < NumTraits<Scalar>::epsilon() * rhs) >+ const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2))); >+ const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1))); >+ if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs) > { > break; > } > } > } > > /** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */ > template<typename MatrixType> >diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h >--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h >+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h >@@ -379,16 +379,17 @@ namespace internal { > template<int StorageOrder,typename RealScalar, typename Scalar, typename Index> > static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); > } > > template<typename MatrixType> > SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType> > ::compute(const MatrixType& matrix, int options) > { >+ using std::abs; > eigen_assert(matrix.cols() == matrix.rows()); > eigen_assert((options&~(EigVecMask|GenEigMask))==0 > && (options&EigVecMask)!=EigVecMask > && "invalid option parameter"); > bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; > Index n = matrix.cols(); > m_eivalues.resize(n,1); > >@@ -416,17 +417,17 @@ SelfAdjointEigenSolver<MatrixType>& Self > > Index end = n-1; > Index start = 0; > Index iter = 0; // total number of iterations > > while (end>0) > { > for (Index i = start; i<end; ++i) >- if (internal::isMuchSmallerThan(internal::abs(m_subdiag[i]),(internal::abs(diag[i])+internal::abs(diag[i+1])))) >+ if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1])))) > m_subdiag[i] = 0; > > // find the largest unreduced block > while (end>0 && m_subdiag[end-1]==0) > { > end--; > } > if (end<=0) >@@ -670,16 +671,17 @@ template<typename SolverType> struct dir > const Scalar t0 = Scalar(0.5) * sqrt( abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0)); > const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1)); > roots(0) = t1 - t0; > roots(1) = t1 + t0; > } > > static inline void run(SolverType& solver, const MatrixType& mat, int options) > { >+ using std::sqrt; > eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows()); > eigen_assert((options&~(EigVecMask|GenEigMask))==0 > && (options&EigVecMask)!=EigVecMask > && "invalid option parameter"); > bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; > > MatrixType& eivecs = solver.m_eivec; > VectorType& eivals = solver.m_eivalues; >@@ -731,16 +733,17 @@ SelfAdjointEigenSolver<MatrixType>& Self > internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options); > return *this; > } > > namespace internal { > template<int StorageOrder,typename RealScalar, typename Scalar, typename Index> > static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) > { >+ using std::abs; > RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); > RealScalar e = subdiag[end-1]; > // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still > // underflow thus leading to inf/NaN values when using the following commented code: > // RealScalar e2 = abs2(subdiag[end-1]); > // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); > // This explain the following, somewhat more complicated, version: > RealScalar mu = diag[end]; >diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h >--- a/Eigen/src/Eigenvalues/Tridiagonalization.h >+++ b/Eigen/src/Eigenvalues/Tridiagonalization.h >@@ -462,16 +462,17 @@ template<typename MatrixType> > struct tridiagonalization_inplace_selector<MatrixType,3,false> > { > typedef typename MatrixType::Scalar Scalar; > typedef typename MatrixType::RealScalar RealScalar; > > template<typename DiagonalType, typename SubDiagonalType> > static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) > { >+ using std::sqrt; > diag[0] = mat(0,0); > RealScalar v1norm2 = abs2(mat(2,0)); > if(v1norm2 == RealScalar(0)) > { > diag[1] = mat(1,1); > diag[2] = mat(2,2); > subdiag[0] = mat(1,0); > subdiag[1] = mat(2,1); >diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h >--- a/Eigen/src/Geometry/AlignedBox.h >+++ b/Eigen/src/Geometry/AlignedBox.h >@@ -243,24 +243,24 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTO > inline Scalar squaredExteriorDistance(const AlignedBox& b) const; > > /** \returns the distance between the point \a p and the box \c *this, > * and zero if \a p is inside the box. > * \sa squaredExteriorDistance() > */ > template<typename Derived> > inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const >- { return internal::sqrt(NonInteger(squaredExteriorDistance(p))); } >+ { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(p))); } > > /** \returns the distance between the boxes \a b and \c *this, > * and zero if the boxes intersect. > * \sa squaredExteriorDistance() > */ > inline NonInteger exteriorDistance(const AlignedBox& b) const >- { return internal::sqrt(NonInteger(squaredExteriorDistance(b))); } >+ { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); } > > /** \returns \c *this with scalar type casted to \a NewScalarType > * > * Note that if \a NewScalarType is equal to the current scalar type of \c *this > * then this function smartly returns a const reference to \c *this. > */ > template<typename NewScalarType> > inline typename internal::cast_return_type<AlignedBox, >diff --git a/Eigen/src/Geometry/AngleAxis.h b/Eigen/src/Geometry/AngleAxis.h >--- a/Eigen/src/Geometry/AngleAxis.h >+++ b/Eigen/src/Geometry/AngleAxis.h >@@ -156,26 +156,27 @@ typedef AngleAxis<double> AngleAxisd; > */ > template<typename Scalar> > template<typename QuatDerived> > AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q) > { > using std::acos; > using std::min; > using std::max; >+ using std::sqrt; > Scalar n2 = q.vec().squaredNorm(); > if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision()) > { > m_angle = 0; > m_axis << 1, 0, 0; > } > else > { > m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1))); >- m_axis = q.vec() / internal::sqrt(n2); >+ m_axis = q.vec() / sqrt(n2); > } > return *this; > } > > /** Set \c *this from a 3x3 rotation matrix \a mat. > */ > template<typename Scalar> > template<typename Derived> >@@ -197,19 +198,21 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::fr > } > > /** Constructs and \returns an equivalent 3x3 rotation matrix. > */ > template<typename Scalar> > typename AngleAxis<Scalar>::Matrix3 > AngleAxis<Scalar>::toRotationMatrix(void) const > { >+ using std::sin; >+ using std::cos; > Matrix3 res; >- Vector3 sin_axis = internal::sin(m_angle) * m_axis; >- Scalar c = internal::cos(m_angle); >+ Vector3 sin_axis = sin(m_angle) * m_axis; >+ Scalar c = cos(m_angle); > Vector3 cos1_axis = (Scalar(1)-c) * m_axis; > > Scalar tmp; > tmp = cos1_axis.x() * m_axis.y(); > res.coeffRef(0,1) = tmp - sin_axis.z(); > res.coeffRef(1,0) = tmp + sin_axis.z(); > > tmp = cos1_axis.x() * m_axis.z(); >diff --git a/Eigen/src/Geometry/EulerAngles.h b/Eigen/src/Geometry/EulerAngles.h >--- a/Eigen/src/Geometry/EulerAngles.h >+++ b/Eigen/src/Geometry/EulerAngles.h >@@ -27,56 +27,57 @@ namespace Eigen { > * * AngleAxisf(ea[1], Vector3f::UnitX()) > * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode > * This corresponds to the right-multiply conventions (with right hand side frames). > */ > template<typename Derived> > inline Matrix<typename MatrixBase<Derived>::Scalar,3,1> > MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const > { >+ using std::atan2; > /* Implemented from Graphics Gems IV */ > EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) > > Matrix<Scalar,3,1> res; > typedef Matrix<typename Derived::Scalar,2,1> Vector2; > const Scalar epsilon = NumTraits<Scalar>::dummy_precision(); > > const Index odd = ((a0+1)%3 == a1) ? 0 : 1; > const Index i = a0; > const Index j = (a0 + 1 + odd)%3; > const Index k = (a0 + 2 - odd)%3; > > if (a0==a2) > { > Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm(); >- res[1] = internal::atan2(s, coeff(i,i)); >+ res[1] = atan2(s, coeff(i,i)); > if (s > epsilon) > { >- res[0] = internal::atan2(coeff(j,i), coeff(k,i)); >- res[2] = internal::atan2(coeff(i,j),-coeff(i,k)); >+ res[0] = atan2(coeff(j,i), coeff(k,i)); >+ res[2] = atan2(coeff(i,j),-coeff(i,k)); > } > else > { > res[0] = Scalar(0); >- res[2] = (coeff(i,i)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j)); >+ res[2] = (coeff(i,i)>0?1:-1)*atan2(-coeff(k,j), coeff(j,j)); > } > } > else > { > Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm(); >- res[1] = internal::atan2(-coeff(i,k), c); >+ res[1] = atan2(-coeff(i,k), c); > if (c > epsilon) > { >- res[0] = internal::atan2(coeff(j,k), coeff(k,k)); >- res[2] = internal::atan2(coeff(i,j), coeff(i,i)); >+ res[0] = atan2(coeff(j,k), coeff(k,k)); >+ res[2] = atan2(coeff(i,j), coeff(i,i)); > } > else > { > res[0] = Scalar(0); >- res[2] = (coeff(i,k)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j)); >+ res[2] = (coeff(i,k)>0?1:-1)*atan2(-coeff(k,j), coeff(j,j)); > } > } > if (!odd) > res = -res; > return res; > } > > } // end namespace Eigen >diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h >--- a/Eigen/src/Geometry/Hyperplane.h >+++ b/Eigen/src/Geometry/Hyperplane.h >@@ -130,17 +130,17 @@ public: > /** \returns the signed distance between the plane \c *this and a point \a p. > * \sa absDistance() > */ > inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); } > > /** \returns the absolute distance between the plane \c *this and a point \a p. > * \sa signedDistance() > */ >- inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); } >+ inline Scalar absDistance(const VectorType& p) const { using std::abs; return abs(signedDistance(p)); } > > /** \returns the projection of a point \a p onto the plane \c *this. > */ > inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } > > /** \returns a constant reference to the unit normal vector of the plane, which corresponds > * to the linear part of the implicit equation. > */ >@@ -173,23 +173,24 @@ public: > /** \returns the intersection of *this with \a other. > * > * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. > * > * \note If \a other is approximately parallel to *this, this method will return any point on *this. > */ > VectorType intersection(const Hyperplane& other) const > { >+ using std::abs; > EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) > Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); > // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests > // whether the two lines are approximately parallel. > if(internal::isMuchSmallerThan(det, Scalar(1))) > { // special case where the two lines are approximately parallel. Pick any point on the first line. >- if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0))) >+ if(abs(coeffs().coeff(1))>abs(coeffs().coeff(0))) > return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); > else > return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); > } > else > { // general case > Scalar invdet = Scalar(1) / det; > return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), >diff --git a/Eigen/src/Geometry/ParametrizedLine.h b/Eigen/src/Geometry/ParametrizedLine.h >--- a/Eigen/src/Geometry/ParametrizedLine.h >+++ b/Eigen/src/Geometry/ParametrizedLine.h >@@ -82,17 +82,17 @@ public: > RealScalar squaredDistance(const VectorType& p) const > { > VectorType diff = p - origin(); > return (diff - direction().dot(diff) * direction()).squaredNorm(); > } > /** \returns the distance of a point \a p to its projection onto the line \c *this. > * \sa squaredDistance() > */ >- RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); } >+ RealScalar distance(const VectorType& p) const { using std::sqrt; return sqrt(squaredDistance(p)); } > > /** \returns the projection of a point \a p onto the line \c *this. */ > VectorType projection(const VectorType& p) const > { return origin() + direction().dot(p-origin()) * direction(); } > > VectorType pointAt(const Scalar& t) const; > > template <int OtherOptions> >diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h >--- a/Eigen/src/Geometry/Quaternion.h >+++ b/Eigen/src/Geometry/Quaternion.h >@@ -500,19 +500,21 @@ EIGEN_STRONG_INLINE Derived& QuaternionB > return derived(); > } > > /** Set \c *this from an angle-axis \a aa and returns a reference to \c *this > */ > template<class Derived> > EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa) > { >+ using std::cos; >+ using std::sin; > Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings >- this->w() = internal::cos(ha); >- this->vec() = internal::sin(ha) * aa.axis(); >+ this->w() = cos(ha); >+ this->vec() = sin(ha) * aa.axis(); > return derived(); > } > > /** Set \c *this from the expression \a xpr: > * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion > * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix > * and \a xpr is converted to a quaternion > */ >@@ -576,16 +578,17 @@ QuaternionBase<Derived>::toRotationMatri > * Note that the two input vectors do \b not have to be normalized, and > * do not need to have the same norm. > */ > template<class Derived> > template<typename Derived1, typename Derived2> > inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b) > { > using std::max; >+ using std::sqrt; > Vector3 v0 = a.normalized(); > Vector3 v1 = b.normalized(); > Scalar c = v1.dot(v0); > > // if dot == -1, vectors are nearly opposites > // => accuraletly compute the rotation axis by computing the > // intersection of the two planes. This is done by solving: > // x^T v0 = 0 >@@ -596,22 +599,22 @@ inline Derived& QuaternionBase<Derived>: > if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision()) > { > c = max<Scalar>(c,-1); > Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose(); > JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV); > Vector3 axis = svd.matrixV().col(2); > > Scalar w2 = (Scalar(1)+c)*Scalar(0.5); >- this->w() = internal::sqrt(w2); >- this->vec() = axis * internal::sqrt(Scalar(1) - w2); >+ this->w() = sqrt(w2); >+ this->vec() = axis * sqrt(Scalar(1) - w2); > return derived(); > } > Vector3 axis = v0.cross(v1); >- Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2)); >+ Scalar s = sqrt((Scalar(1)+c)*Scalar(2)); > Scalar invs = Scalar(1)/s; > this->vec() = axis * invs; > this->w() = s * Scalar(0.5); > > return derived(); > } > > >@@ -672,67 +675,71 @@ QuaternionBase<Derived>::conjugate() con > * \sa dot() > */ > template <class Derived> > template <class OtherDerived> > inline typename internal::traits<Derived>::Scalar > QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const > { > using std::acos; >- double d = internal::abs(this->dot(other)); >+ using std::abs; >+ double d = abs(this->dot(other)); > if (d>=1.0) > return Scalar(0); > return static_cast<Scalar>(2 * acos(d)); > } > > /** \returns the spherical linear interpolation between the two quaternions > * \c *this and \a other at the parameter \a t > */ > template <class Derived> > template <class OtherDerived> > Quaternion<typename internal::traits<Derived>::Scalar> > QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const > { > using std::acos; >+ using std::sin; >+ using std::abs; > static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon(); > Scalar d = this->dot(other); >- Scalar absD = internal::abs(d); >+ Scalar absD = abs(d); > > Scalar scale0; > Scalar scale1; > > if(absD>=one) > { > scale0 = Scalar(1) - t; > scale1 = t; > } > else > { > // theta is the angle between the 2 quaternions > Scalar theta = acos(absD); >- Scalar sinTheta = internal::sin(theta); >+ Scalar sinTheta = sin(theta); > >- scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta; >- scale1 = internal::sin( ( t * theta) ) / sinTheta; >+ scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta; >+ scale1 = sin( ( t * theta) ) / sinTheta; > } > if(d<0) scale1 = -scale1; > > return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs()); > } > > namespace internal { > > // set from a rotation matrix > template<typename Other> > struct quaternionbase_assign_impl<Other,3,3> > { > typedef typename Other::Scalar Scalar; > typedef DenseIndex Index; > template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat) > { >+ using std::sqrt; > // This algorithm comes from "Quaternion Calculus and Fast Animation", > // Ken Shoemake, 1987 SIGGRAPH course notes > Scalar t = mat.trace(); > if (t > Scalar(0)) > { > t = sqrt(t + Scalar(1.0)); > q.w() = Scalar(0.5)*t; > t = Scalar(0.5)/t; >diff --git a/Eigen/src/Geometry/Rotation2D.h b/Eigen/src/Geometry/Rotation2D.h >--- a/Eigen/src/Geometry/Rotation2D.h >+++ b/Eigen/src/Geometry/Rotation2D.h >@@ -128,27 +128,30 @@ typedef Rotation2D<double> Rotation2Dd; > /** Set \c *this from a 2x2 rotation matrix \a mat. > * In other words, this function extract the rotation angle > * from the rotation matrix. > */ > template<typename Scalar> > template<typename Derived> > Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat) > { >+ using std::atan2; > EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) >- m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0)); >+ m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0)); > return *this; > } > > /** Constructs and \returns an equivalent 2x2 rotation matrix. > */ > template<typename Scalar> > typename Rotation2D<Scalar>::Matrix2 > Rotation2D<Scalar>::toRotationMatrix(void) const > { >- Scalar sinA = internal::sin(m_angle); >- Scalar cosA = internal::cos(m_angle); >+ using std::sin; >+ using std::cos; >+ Scalar sinA = sin(m_angle); >+ Scalar cosA = cos(m_angle); > return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); > } > > } // end namespace Eigen > > #endif // EIGEN_ROTATION2D_H >diff --git a/Eigen/src/Householder/Householder.h b/Eigen/src/Householder/Householder.h >--- a/Eigen/src/Householder/Householder.h >+++ b/Eigen/src/Householder/Householder.h >@@ -62,31 +62,32 @@ void MatrixBase<Derived>::makeHouseholde > */ > template<typename Derived> > template<typename EssentialPart> > void MatrixBase<Derived>::makeHouseholder( > EssentialPart& essential, > Scalar& tau, > RealScalar& beta) const > { >+ using std::sqrt; > EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) > VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1); > > RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); > Scalar c0 = coeff(0); > > if(tailSqNorm == RealScalar(0) && internal::imag(c0)==RealScalar(0)) > { > tau = RealScalar(0); > beta = internal::real(c0); > essential.setZero(); > } > else > { >- beta = internal::sqrt(internal::abs2(c0) + tailSqNorm); >+ beta = sqrt(internal::abs2(c0) + tailSqNorm); > if (internal::real(c0)>=RealScalar(0)) > beta = -beta; > essential = tail / (c0 - beta); > tau = internal::conj((beta - c0) / beta); > } > } > > /** Apply the elementary reflector H given by >diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h >--- a/Eigen/src/Jacobi/Jacobi.h >+++ b/Eigen/src/Jacobi/Jacobi.h >@@ -76,39 +76,41 @@ template<typename Scalar> class JacobiRo > /** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix > * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$ > * > * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() > */ > template<typename Scalar> > bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z) > { >+ using std::sqrt; >+ using std::abs; > typedef typename NumTraits<Scalar>::Real RealScalar; > if(y == Scalar(0)) > { > m_c = Scalar(1); > m_s = Scalar(0); > return false; > } > else > { >- RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y)); >- RealScalar w = internal::sqrt(internal::abs2(tau) + RealScalar(1)); >+ RealScalar tau = (x-z)/(RealScalar(2)*abs(y)); >+ RealScalar w = sqrt(internal::abs2(tau) + RealScalar(1)); > RealScalar t; > if(tau>RealScalar(0)) > { > t = RealScalar(1) / (tau + w); > } > else > { > t = RealScalar(1) / (tau - w); > } > RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1); >- RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+RealScalar(1)); >- m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n; >+ RealScalar n = RealScalar(1) / sqrt(internal::abs2(t)+RealScalar(1)); >+ m_s = - sign_t * (internal::conj(y) / abs(y)) * abs(t) * n; > m_c = n; > return true; > } > } > > /** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix > * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields > * a diagonal matrix \f$ A = J^* B J \f$ >@@ -147,97 +149,101 @@ void JacobiRotation<Scalar>::makeGivens( > makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type()); > } > > > // specialization for complexes > template<typename Scalar> > void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type) > { >+ using std::sqrt; >+ using std::abs; > if(q==Scalar(0)) > { > m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1); > m_s = 0; > if(r) *r = m_c * p; > } > else if(p==Scalar(0)) > { > m_c = 0; >- m_s = -q/internal::abs(q); >- if(r) *r = internal::abs(q); >+ m_s = -q/abs(q); >+ if(r) *r = abs(q); > } > else > { > RealScalar p1 = internal::norm1(p); > RealScalar q1 = internal::norm1(q); > if(p1>=q1) > { > Scalar ps = p / p1; > RealScalar p2 = internal::abs2(ps); > Scalar qs = q / p1; > RealScalar q2 = internal::abs2(qs); > >- RealScalar u = internal::sqrt(RealScalar(1) + q2/p2); >+ RealScalar u = sqrt(RealScalar(1) + q2/p2); > if(internal::real(p)<RealScalar(0)) > u = -u; > > m_c = Scalar(1)/u; > m_s = -qs*internal::conj(ps)*(m_c/p2); > if(r) *r = p * u; > } > else > { > Scalar ps = p / q1; > RealScalar p2 = internal::abs2(ps); > Scalar qs = q / q1; > RealScalar q2 = internal::abs2(qs); > >- RealScalar u = q1 * internal::sqrt(p2 + q2); >+ RealScalar u = q1 * sqrt(p2 + q2); > if(internal::real(p)<RealScalar(0)) > u = -u; > >- p1 = internal::abs(p); >+ p1 = abs(p); > ps = p/p1; > m_c = p1/u; > m_s = -internal::conj(ps) * (q/u); > if(r) *r = ps * u; > } > } > } > > // specialization for reals > template<typename Scalar> > void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type) > { >+ using std::sqrt; >+ using std::abs; > if(q==Scalar(0)) > { > m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1); > m_s = Scalar(0); >- if(r) *r = internal::abs(p); >+ if(r) *r = abs(p); > } > else if(p==Scalar(0)) > { > m_c = Scalar(0); > m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1); >- if(r) *r = internal::abs(q); >+ if(r) *r = abs(q); > } >- else if(internal::abs(p) > internal::abs(q)) >+ else if(abs(p) > abs(q)) > { > Scalar t = q/p; >- Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t)); >+ Scalar u = sqrt(Scalar(1) + internal::abs2(t)); > if(p<Scalar(0)) > u = -u; > m_c = Scalar(1)/u; > m_s = -t * m_c; > if(r) *r = p * u; > } > else > { > Scalar t = p/q; >- Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t)); >+ Scalar u = sqrt(Scalar(1) + internal::abs2(t)); > if(q<Scalar(0)) > u = -u; > m_s = -Scalar(1)/u; > m_c = -t * m_s; > if(r) *r = q * u; > } > > } >diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h >--- a/Eigen/src/LU/FullPivLU.h >+++ b/Eigen/src/LU/FullPivLU.h >@@ -288,21 +288,22 @@ template<typename _MatrixType> class Ful > /** \returns the rank of the matrix of which *this is the LU decomposition. > * > * \note This method has to determine which pivots should be considered nonzero. > * For that, it uses the threshold value that you can control by calling > * setThreshold(const RealScalar&). > */ > inline Index rank() const > { >+ using std::abs; > eigen_assert(m_isInitialized && "LU is not initialized."); >- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); >+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); > Index result = 0; > for(Index i = 0; i < m_nonzero_pivots; ++i) >- result += (internal::abs(m_lu.coeff(i,i)) > premultiplied_threshold); >+ result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold); > return result; > } > > /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition. > * > * \note This method has to determine which pivots should be considered nonzero. > * For that, it uses the threshold value that you can control by calling > * setThreshold(const RealScalar&). >@@ -542,16 +543,17 @@ struct kernel_retval<FullPivLU<_MatrixTy > > enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( > MatrixType::MaxColsAtCompileTime, > MatrixType::MaxRowsAtCompileTime) > }; > > template<typename Dest> void evalTo(Dest& dst) const > { >+ using std::abs; > const Index cols = dec().matrixLU().cols(), dimker = cols - rank(); > if(dimker == 0) > { > // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's > // avoid crashing/asserting as that depends on floating point calculations. Let's > // just return a single column vector filled with zeros. > dst.setZero(); > return; >@@ -627,16 +629,17 @@ struct image_retval<FullPivLU<_MatrixTyp > > enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( > MatrixType::MaxColsAtCompileTime, > MatrixType::MaxRowsAtCompileTime) > }; > > template<typename Dest> void evalTo(Dest& dst) const > { >+ using std::abs; > if(rank() == 0) > { > // The Image is just {0}, so it doesn't have a basis properly speaking, but let's > // avoid crashing/asserting as that depends on floating point calculations. Let's > // just return a single column vector filled with zeros. > dst.setZero(); > return; > } >diff --git a/Eigen/src/LU/Inverse.h b/Eigen/src/LU/Inverse.h >--- a/Eigen/src/LU/Inverse.h >+++ b/Eigen/src/LU/Inverse.h >@@ -50,16 +50,17 @@ struct compute_inverse_and_det_with_chec > static inline void run( > const MatrixType& matrix, > const typename MatrixType::RealScalar& absDeterminantThreshold, > ResultType& result, > typename ResultType::Scalar& determinant, > bool& invertible > ) > { >+ using std::abs; > determinant = matrix.coeff(0,0); > invertible = abs(determinant) > absDeterminantThreshold; > if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant; > } > }; > > /**************************** > *** Size 2 implementation *** >@@ -93,16 +94,17 @@ struct compute_inverse_and_det_with_chec > static inline void run( > const MatrixType& matrix, > const typename MatrixType::RealScalar& absDeterminantThreshold, > ResultType& inverse, > typename ResultType::Scalar& determinant, > bool& invertible > ) > { >+ using std::abs; > typedef typename ResultType::Scalar Scalar; > determinant = matrix.determinant(); > invertible = abs(determinant) > absDeterminantThreshold; > if(!invertible) return; > const Scalar invdet = Scalar(1) / determinant; > compute_inverse_size2_helper(matrix, invdet, inverse); > } > }; >@@ -162,16 +164,17 @@ struct compute_inverse_and_det_with_chec > static inline void run( > const MatrixType& matrix, > const typename MatrixType::RealScalar& absDeterminantThreshold, > ResultType& inverse, > typename ResultType::Scalar& determinant, > bool& invertible > ) > { >+ using std::abs; > typedef typename ResultType::Scalar Scalar; > Matrix<Scalar,3,1> cofactors_col0; > cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix); > cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix); > cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix); > determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); > invertible = abs(determinant) > absDeterminantThreshold; > if(!invertible) return; >@@ -246,16 +249,17 @@ struct compute_inverse_and_det_with_chec > static inline void run( > const MatrixType& matrix, > const typename MatrixType::RealScalar& absDeterminantThreshold, > ResultType& inverse, > typename ResultType::Scalar& determinant, > bool& invertible > ) > { >+ using std::abs; > determinant = matrix.determinant(); > invertible = abs(determinant) > absDeterminantThreshold; > if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse); > } > }; > > /************************* > *** MatrixBase methods *** >diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h >--- a/Eigen/src/QR/ColPivHouseholderQR.h >+++ b/Eigen/src/QR/ColPivHouseholderQR.h >@@ -176,21 +176,22 @@ template<typename _MatrixType> class Col > /** \returns the rank of the matrix of which *this is the QR decomposition. > * > * \note This method has to determine which pivots should be considered nonzero. > * For that, it uses the threshold value that you can control by calling > * setThreshold(const RealScalar&). > */ > inline Index rank() const > { >+ using std::abs; > eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); >- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); >+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); > Index result = 0; > for(Index i = 0; i < m_nonzero_pivots; ++i) >- result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold); >+ result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold); > return result; > } > > /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. > * > * \note This method has to determine which pivots should be considered nonzero. > * For that, it uses the threshold value that you can control by calling > * setThreshold(const RealScalar&). >@@ -337,32 +338,34 @@ template<typename _MatrixType> class Col > RealScalar m_prescribedThreshold, m_maxpivot; > Index m_nonzero_pivots; > Index m_det_pq; > }; > > template<typename MatrixType> > typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const > { >+ using std::abs; > eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); > eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); >- return internal::abs(m_qr.diagonal().prod()); >+ return abs(m_qr.diagonal().prod()); > } > > template<typename MatrixType> > typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDeterminant() const > { > eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); > eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); > return m_qr.diagonal().cwiseAbs().array().log().sum(); > } > > template<typename MatrixType> > ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix) > { >+ using std::abs; > Index rows = matrix.rows(); > Index cols = matrix.cols(); > Index size = matrix.diagonalSize(); > > m_qr = matrix; > m_hCoeffs.resize(size); > > m_temp.resize(cols); >@@ -421,17 +424,17 @@ ColPivHouseholderQR<MatrixType>& ColPivH > // generate the householder vector, store it below the diagonal > RealScalar beta; > m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); > > // apply the householder transformation to the diagonal coefficient > m_qr.coeffRef(k,k) = beta; > > // remember the maximum absolute value of diagonal coefficients >- if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta); >+ if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta); > > // apply the householder transformation > m_qr.bottomRightCorner(rows-k, cols-k-1) > .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); > > // update our table of squared norms of the columns > m_colSqNorms.tail(cols-k-1) -= m_qr.row(k).tail(cols-k-1).cwiseAbs2(); > } >diff --git a/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/Eigen/src/QR/ColPivHouseholderQR_MKL.h >--- a/Eigen/src/QR/ColPivHouseholderQR_MKL.h >+++ b/Eigen/src/QR/ColPivHouseholderQR_MKL.h >@@ -42,16 +42,17 @@ namespace Eigen { > > #define EIGEN_MKL_QR_COLPIV(EIGTYPE, MKLTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \ > template<> inline \ > ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \ > ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \ > const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix) \ > \ > { \ >+ using std::abs; \ > typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \ > typedef MatrixType::Scalar Scalar; \ > typedef MatrixType::RealScalar RealScalar; \ > Index rows = matrix.rows();\ > Index cols = matrix.cols();\ > Index size = matrix.diagonalSize();\ > \ > m_qr = matrix;\ >@@ -66,20 +67,20 @@ ColPivHouseholderQR<Matrix<EIGTYPE, Dyna > m_colsPermutation.indices().setZero(); \ > \ > lapack_int lda = m_qr.outerStride(), i; \ > lapack_int matrix_order = MKLCOLROW; \ > LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \ > m_isInitialized = true; \ > m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \ > m_hCoeffs.adjointInPlace(); \ >- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); \ >+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \ > lapack_int *perm = m_colsPermutation.indices().data(); \ > for(i=0;i<size;i++) { \ >- m_nonzero_pivots += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);\ >+ m_nonzero_pivots += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\ > } \ > for(i=0;i<cols;i++) perm[i]--;\ > \ > /*m_det_pq = (number_of_transpositions%2) ? -1 : 1; // TODO: It's not needed now; fix upon availability in Eigen */ \ > \ > return *this; \ > } > >diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h >--- a/Eigen/src/QR/FullPivHouseholderQR.h >+++ b/Eigen/src/QR/FullPivHouseholderQR.h >@@ -196,21 +196,22 @@ template<typename _MatrixType> class Ful > /** \returns the rank of the matrix of which *this is the QR decomposition. > * > * \note This method has to determine which pivots should be considered nonzero. > * For that, it uses the threshold value that you can control by calling > * setThreshold(const RealScalar&). > */ > inline Index rank() const > { >+ using std::abs; > eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); >- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); >+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); > Index result = 0; > for(Index i = 0; i < m_nonzero_pivots; ++i) >- result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold); >+ result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold); > return result; > } > > /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. > * > * \note This method has to determine which pivots should be considered nonzero. > * For that, it uses the threshold value that you can control by calling > * setThreshold(const RealScalar&). >@@ -357,32 +358,34 @@ template<typename _MatrixType> class Ful > Index m_nonzero_pivots; > RealScalar m_precision; > Index m_det_pq; > }; > > template<typename MatrixType> > typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const > { >+ using std::abs; > eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); > eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); >- return internal::abs(m_qr.diagonal().prod()); >+ return abs(m_qr.diagonal().prod()); > } > > template<typename MatrixType> > typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDeterminant() const > { > eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); > eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); > return m_qr.diagonal().cwiseAbs().array().log().sum(); > } > > template<typename MatrixType> > FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix) > { >+ using std::abs; > Index rows = matrix.rows(); > Index cols = matrix.cols(); > Index size = (std::min)(rows,cols); > > m_qr = matrix; > m_hCoeffs.resize(size); > > m_temp.resize(cols); >@@ -434,17 +437,17 @@ FullPivHouseholderQR<MatrixType>& FullPi > ++number_of_transpositions; > } > > RealScalar beta; > m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); > m_qr.coeffRef(k,k) = beta; > > // remember the maximum absolute value of diagonal coefficients >- if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta); >+ if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta); > > m_qr.bottomRightCorner(rows-k, cols-k-1) > .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); > } > > m_cols_permutation.setIdentity(cols); > for(Index k = 0; k < size; ++k) > m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k)); >diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h >--- a/Eigen/src/QR/HouseholderQR.h >+++ b/Eigen/src/QR/HouseholderQR.h >@@ -176,19 +176,20 @@ template<typename _MatrixType> class Hou > HCoeffsType m_hCoeffs; > RowVectorType m_temp; > bool m_isInitialized; > }; > > template<typename MatrixType> > typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const > { >+ using std::abs; > eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); > eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); >- return internal::abs(m_qr.diagonal().prod()); >+ return abs(m_qr.diagonal().prod()); > } > > template<typename MatrixType> > typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const > { > eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); > eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); > return m_qr.diagonal().cwiseAbs().array().log().sum(); >diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h >--- a/Eigen/src/SVD/JacobiSVD.h >+++ b/Eigen/src/SVD/JacobiSVD.h >@@ -354,16 +354,17 @@ template<typename MatrixType, int QRPrec > struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true> > { > typedef JacobiSVD<MatrixType, QRPreconditioner> SVD; > typedef typename MatrixType::Scalar Scalar; > typedef typename MatrixType::RealScalar RealScalar; > typedef typename SVD::Index Index; > static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) > { >+ using std::sqrt; > Scalar z; > JacobiRotation<Scalar> rot; > RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p))); > if(n==0) > { > z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); > work_matrix.row(p) *= z; > if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); >@@ -393,16 +394,17 @@ struct svd_precondition_2x2_block_to_be_ > } > }; > > template<typename MatrixType, typename RealScalar, typename Index> > void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, > JacobiRotation<RealScalar> *j_left, > JacobiRotation<RealScalar> *j_right) > { >+ using std::sqrt; > Matrix<RealScalar,2,2> m; > m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)), > real(matrix.coeff(q,p)), real(matrix.coeff(q,q)); > JacobiRotation<RealScalar> rot1; > RealScalar t = m.coeff(0,0) + m.coeff(1,1); > RealScalar d = m.coeff(1,0) - m.coeff(0,1); > if(t == RealScalar(0)) > { >@@ -722,16 +724,17 @@ void JacobiSVD<MatrixType, QRPreconditio > if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); > if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); > } > > template<typename MatrixType, int QRPreconditioner> > JacobiSVD<MatrixType, QRPreconditioner>& > JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions) > { >+ using std::abs; > allocate(matrix.rows(), matrix.cols(), computationOptions); > > // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, > // only worsening the precision of U and V as we accumulate more rotations > const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon(); > > // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) > const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min(); >@@ -759,19 +762,19 @@ JacobiSVD<MatrixType, QRPreconditioner>: > for(Index p = 1; p < m_diagSize; ++p) > { > for(Index q = 0; q < p; ++q) > { > // if this 2x2 sub-matrix is not diagonal already... > // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't > // keep us iterating forever. Similarly, small denormal numbers are considered zero. > using std::max; >- RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)), >- internal::abs(m_workMatrix.coeff(q,q)))); >- if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold) >+ RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), >+ abs(m_workMatrix.coeff(q,q)))); >+ if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold) > { > finished = false; > > // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal > internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q); > JacobiRotation<RealScalar> j_left, j_right; > internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); > >@@ -785,17 +788,17 @@ JacobiSVD<MatrixType, QRPreconditioner>: > } > } > } > > /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ > > for(Index i = 0; i < m_diagSize; ++i) > { >- RealScalar a = internal::abs(m_workMatrix.coeff(i,i)); >+ RealScalar a = abs(m_workMatrix.coeff(i,i)); > m_singularValues.coeffRef(i) = a; > if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; > } > > /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/ > > m_nonzeroSingularValues = m_diagSize; > for(Index i = 0; i < m_diagSize; i++) >diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h >--- a/Eigen/src/SparseCholesky/SimplicialCholesky.h >+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h >@@ -741,16 +741,18 @@ void SimplicialCholeskyBase<Derived>::an > m_factorizationIsOk = false; > } > > > template<typename Derived> > template<bool DoLDLT> > void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap) > { >+ using std::sqrt; >+ > eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); > eigen_assert(ap.rows()==ap.cols()); > const Index size = ap.rows(); > eigen_assert(m_parent.size()==size); > eigen_assert(m_nonZerosPerCol.size()==size); > > const Index* Lp = m_matrix.outerIndexPtr(); > Index* Li = m_matrix.innerIndexPtr(); >@@ -825,17 +827,17 @@ void SimplicialCholeskyBase<Derived>::fa > else > { > Index p = Lp[k] + m_nonZerosPerCol[k]++; > Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ > if(d <= RealScalar(0)) { > ok = false; /* failure, matrix is not positive definite */ > break; > } >- Lx[p] = internal::sqrt(d) ; >+ Lx[p] = sqrt(d) ; > } > } > > m_info = ok ? Success : NumericalIssue; > m_factorizationIsOk = true; > } > > namespace internal { >diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h >--- a/Eigen/src/SparseCore/AmbiVector.h >+++ b/Eigen/src/SparseCore/AmbiVector.h >@@ -286,30 +286,31 @@ class AmbiVector<_Scalar,_Index>::Iterat > * \param vec the vector on which we iterate > * \param epsilon the minimal value used to prune zero coefficients. > * In practice, all coefficients having a magnitude smaller than \a epsilon > * are skipped. > */ > Iterator(const AmbiVector& vec, RealScalar epsilon = 0) > : m_vector(vec) > { >+ using std::abs; > m_epsilon = epsilon; > m_isDense = m_vector.m_mode==IsDense; > if (m_isDense) > { > m_currentEl = 0; // this is to avoid a compilation warning > m_cachedValue = 0; // this is to avoid a compilation warning > m_cachedIndex = m_vector.m_start-1; > ++(*this); > } > else > { > ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer); > m_currentEl = m_vector.m_llStart; >- while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon) >+ while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon) > m_currentEl = llElements[m_currentEl].next; > if (m_currentEl<0) > { > m_cachedValue = 0; // this is to avoid a compilation warning > m_cachedIndex = -1; > } > else > { >@@ -321,32 +322,33 @@ class AmbiVector<_Scalar,_Index>::Iterat > > Index index() const { return m_cachedIndex; } > Scalar value() const { return m_cachedValue; } > > operator bool() const { return m_cachedIndex>=0; } > > Iterator& operator++() > { >+ using std::abs; > if (m_isDense) > { > do { > ++m_cachedIndex; >- } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon); >+ } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon); > if (m_cachedIndex<m_vector.m_end) > m_cachedValue = m_vector.m_buffer[m_cachedIndex]; > else > m_cachedIndex=-1; > } > else > { > ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer); > do { > m_currentEl = llElements[m_currentEl].next; >- } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon); >+ } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon); > if (m_currentEl<0) > { > m_cachedIndex = -1; > } > else > { > m_cachedIndex = llElements[m_currentEl].index; > m_cachedValue = llElements[m_currentEl].value; >diff --git a/Eigen/src/SparseCore/SparseDot.h b/Eigen/src/SparseCore/SparseDot.h >--- a/Eigen/src/SparseCore/SparseDot.h >+++ b/Eigen/src/SparseCore/SparseDot.h >@@ -81,14 +81,15 @@ SparseMatrixBase<Derived>::squaredNorm() > { > return internal::real((*this).cwiseAbs2().sum()); > } > > template<typename Derived> > inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real > SparseMatrixBase<Derived>::norm() const > { >- return internal::sqrt(squaredNorm()); >+ using std::sqrt; >+ return sqrt(squaredNorm()); > } > > } // end namespace Eigen > > #endif // EIGEN_SPARSE_DOT_H >diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h >--- a/Eigen/src/SparseCore/SparseProduct.h >+++ b/Eigen/src/SparseCore/SparseProduct.h >@@ -102,17 +102,18 @@ class SparseSparseProduct : internal::no > EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance) > : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) > { > init(); > } > > SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const > { >- return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon); >+ using std::abs; >+ return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon); > } > > template<typename Dest> > void evalTo(Dest& result) const > { > if(m_conservative) > internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result); > else >diff --git a/blas/level1_impl.h b/blas/level1_impl.h >--- a/blas/level1_impl.h >+++ b/blas/level1_impl.h >@@ -70,61 +70,64 @@ int EIGEN_CAT(EIGEN_CAT(i,SCALAR_SUFFIX) > DenseIndex ret; > if(*incx==1) vector(x,*n).cwiseAbs().minCoeff(&ret); > else vector(x,*n,std::abs(*incx)).cwiseAbs().minCoeff(&ret); > return ret+1; > } > > int EIGEN_BLAS_FUNC(rotg)(RealScalar *pa, RealScalar *pb, RealScalar *pc, RealScalar *ps) > { >+ using std::sqrt; >+ using std::abs; >+ > Scalar& a = *reinterpret_cast<Scalar*>(pa); > Scalar& b = *reinterpret_cast<Scalar*>(pb); > RealScalar* c = pc; > Scalar* s = reinterpret_cast<Scalar*>(ps); > > #if !ISCOMPLEX > Scalar r,z; >- Scalar aa = internal::abs(a); >- Scalar ab = internal::abs(b); >+ Scalar aa = abs(a); >+ Scalar ab = abs(b); > if((aa+ab)==Scalar(0)) > { > *c = 1; > *s = 0; > r = 0; > z = 0; > } > else > { >- r = internal::sqrt(a*a + b*b); >+ r = sqrt(a*a + b*b); > Scalar amax = aa>ab ? a : b; > r = amax>0 ? r : -r; > *c = a/r; > *s = b/r; > z = 1; > if (aa > ab) z = *s; > if (ab > aa && *c!=RealScalar(0)) > z = Scalar(1)/ *c; > } > *pa = r; > *pb = z; > #else > Scalar alpha; > RealScalar norm,scale; >- if(internal::abs(a)==RealScalar(0)) >+ if(abs(a)==RealScalar(0)) > { > *c = RealScalar(0); > *s = Scalar(1); > a = b; > } > else > { >- scale = internal::abs(a) + internal::abs(b); >- norm = scale*internal::sqrt((internal::abs2(a/scale))+ (internal::abs2(b/scale))); >- alpha = a/internal::abs(a); >- *c = internal::abs(a)/norm; >+ scale = abs(a) + abs(b); >+ norm = scale*sqrt((internal::abs2(a/scale))+ (internal::abs2(b/scale))); >+ alpha = a/abs(a); >+ *c = abs(a)/norm; > *s = alpha*internal::conj(b)/norm; > a = alpha*norm; > } > #endif > > // JacobiRotation<Scalar> r; > // r.makeGivens(a,b); > // *c = r.c(); >diff --git a/cmake/FindUmfpack.cmake b/cmake/FindUmfpack.cmake >--- a/cmake/FindUmfpack.cmake >+++ b/cmake/FindUmfpack.cmake >@@ -11,16 +11,18 @@ find_path(UMFPACK_INCLUDES > PATHS > $ENV{UMFPACKDIR} > ${INCLUDE_INSTALL_DIR} > PATH_SUFFIXES > suitesparse > ufsparse > ) > >+if(NOT UMFPACK_LIBRARIES) >+ > find_library(UMFPACK_LIBRARIES umfpack PATHS $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) > > if(UMFPACK_LIBRARIES) > if (NOT UMFPACK_LIBDIR) > get_filename_component(UMFPACK_LIBDIR ${UMFPACK_LIBRARIES} PATH) > endif(NOT UMFPACK_LIBDIR) > > find_library(AMD_LIBRARY amd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) >@@ -38,14 +40,15 @@ if(UMFPACK_LIBRARIES) > if (COLAMD_LIBRARY) > set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${COLAMD_LIBRARY}) > #else (COLAMD_LIBRARY) > # set(UMFPACK_LIBRARIES FALSE) > endif (COLAMD_LIBRARY) > > endif(UMFPACK_LIBRARIES) > >+endif() > > include(FindPackageHandleStandardArgs) > find_package_handle_standard_args(UMFPACK DEFAULT_MSG > UMFPACK_INCLUDES UMFPACK_LIBRARIES) > > mark_as_advanced(UMFPACK_INCLUDES UMFPACK_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY) >diff --git a/test/adjoint.cpp b/test/adjoint.cpp >--- a/test/adjoint.cpp >+++ b/test/adjoint.cpp >@@ -58,17 +58,17 @@ template<typename MatrixType> void adjoi > // check normalized() and normalize() > VERIFY_IS_APPROX(v1, v1.norm() * v1.normalized()); > v3 = v1; > v3.normalize(); > VERIFY_IS_APPROX(v1, v1.norm() * v3); > VERIFY_IS_APPROX(v3, v1.normalized()); > VERIFY_IS_APPROX(v3.norm(), RealScalar(1)); > } >- VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(vzero.dot(v1)), static_cast<RealScalar>(1)); >+ VERIFY_IS_MUCH_SMALLER_THAN(abs(vzero.dot(v1)), static_cast<RealScalar>(1)); > > // check compatibility of dot and adjoint > > ref = NumTraits<Scalar>::IsInteger ? 0 : (std::max)((std::max)(v1.norm(),v2.norm()),(std::max)((square * v2).norm(),(square.adjoint() * v1).norm())); > VERIFY(test_isApproxWithRef(v1.dot(square * v2), (square.adjoint() * v1).dot(v2), ref)); > > // like in testBasicStuff, test operator() to check const-qualification > Index r = internal::random<Index>(0, rows-1), >diff --git a/test/array.cpp b/test/array.cpp >--- a/test/array.cpp >+++ b/test/array.cpp >@@ -115,17 +115,17 @@ template<typename ArrayType> void compar > VERIFY( (m1 == m1(r,c) ).any() ); > > // test Select > VERIFY_IS_APPROX( (m1<m2).select(m1,m2), m1.cwiseMin(m2) ); > VERIFY_IS_APPROX( (m1>m2).select(m1,m2), m1.cwiseMax(m2) ); > Scalar mid = (m1.cwiseAbs().minCoeff() + m1.cwiseAbs().maxCoeff())/Scalar(2); > for (int j=0; j<cols; ++j) > for (int i=0; i<rows; ++i) >- m3(i,j) = internal::abs(m1(i,j))<mid ? 0 : m1(i,j); >+ m3(i,j) = abs(m1(i,j))<mid ? 0 : m1(i,j); > VERIFY_IS_APPROX( (m1.abs()<ArrayType::Constant(rows,cols,mid)) > .select(ArrayType::Zero(rows,cols),m1), m3); > // shorter versions: > VERIFY_IS_APPROX( (m1.abs()<ArrayType::Constant(rows,cols,mid)) > .select(0,m1), m3); > VERIFY_IS_APPROX( (m1.abs()>=ArrayType::Constant(rows,cols,mid)) > .select(m1,0), m3); > // even shorter version: >@@ -158,59 +158,59 @@ template<typename ArrayType> void array_ > > ArrayType m1 = ArrayType::Random(rows, cols), > m2 = ArrayType::Random(rows, cols), > m3(rows, cols); > > Scalar s1 = internal::random<Scalar>(); > > // these tests are mostly to check possible compilation issues. >- VERIFY_IS_APPROX(m1.sin(), std::sin(m1)); >- VERIFY_IS_APPROX(m1.sin(), internal::sin(m1)); >- VERIFY_IS_APPROX(m1.cos(), std::cos(m1)); >- VERIFY_IS_APPROX(m1.cos(), internal::cos(m1)); >- VERIFY_IS_APPROX(m1.asin(), std::asin(m1)); >- VERIFY_IS_APPROX(m1.asin(), internal::asin(m1)); >- VERIFY_IS_APPROX(m1.acos(), std::acos(m1)); >- VERIFY_IS_APPROX(m1.acos(), internal::acos(m1)); >- VERIFY_IS_APPROX(m1.tan(), std::tan(m1)); >- VERIFY_IS_APPROX(m1.tan(), internal::tan(m1)); >+// VERIFY_IS_APPROX(m1.sin(), std::sin(m1)); >+ VERIFY_IS_APPROX(m1.sin(), sin(m1)); >+// VERIFY_IS_APPROX(m1.cos(), std::cos(m1)); >+ VERIFY_IS_APPROX(m1.cos(), cos(m1)); >+// VERIFY_IS_APPROX(m1.asin(), std::asin(m1)); >+ VERIFY_IS_APPROX(m1.asin(), asin(m1)); >+// VERIFY_IS_APPROX(m1.acos(), std::acos(m1)); >+ VERIFY_IS_APPROX(m1.acos(), acos(m1)); >+// VERIFY_IS_APPROX(m1.tan(), std::tan(m1)); >+ VERIFY_IS_APPROX(m1.tan(), tan(m1)); > >- VERIFY_IS_APPROX(internal::cos(m1+RealScalar(3)*m2), internal::cos((m1+RealScalar(3)*m2).eval())); >- VERIFY_IS_APPROX(std::cos(m1+RealScalar(3)*m2), std::cos((m1+RealScalar(3)*m2).eval())); >+ VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); >+// VERIFY_IS_APPROX(std::cos(m1+RealScalar(3)*m2), std::cos((m1+RealScalar(3)*m2).eval())); > >- VERIFY_IS_APPROX(m1.abs().sqrt(), std::sqrt(std::abs(m1))); >- VERIFY_IS_APPROX(m1.abs().sqrt(), internal::sqrt(internal::abs(m1))); >- VERIFY_IS_APPROX(m1.abs(), internal::sqrt(internal::abs2(m1))); >+// VERIFY_IS_APPROX(m1.abs().sqrt(), std::sqrt(std::abs(m1))); >+ VERIFY_IS_APPROX(m1.abs().sqrt(), sqrt(abs(m1))); >+ VERIFY_IS_APPROX(m1.abs(), sqrt(internal::abs2(m1))); > > VERIFY_IS_APPROX(internal::abs2(internal::real(m1)) + internal::abs2(internal::imag(m1)), internal::abs2(m1)); >- VERIFY_IS_APPROX(internal::abs2(std::real(m1)) + internal::abs2(std::imag(m1)), internal::abs2(m1)); >+ VERIFY_IS_APPROX(internal::abs2(real(m1)) + internal::abs2(imag(m1)), internal::abs2(m1)); > if(!NumTraits<Scalar>::IsComplex) > VERIFY_IS_APPROX(internal::real(m1), m1); > >- VERIFY_IS_APPROX(m1.abs().log(), std::log(std::abs(m1))); >- VERIFY_IS_APPROX(m1.abs().log(), internal::log(internal::abs(m1))); >+ //VERIFY_IS_APPROX(m1.abs().log(), std::log(std::abs(m1))); >+ VERIFY_IS_APPROX(m1.abs().log(), log(abs(m1))); > >- VERIFY_IS_APPROX(m1.exp(), std::exp(m1)); >- VERIFY_IS_APPROX(m1.exp() * m2.exp(), std::exp(m1+m2)); >- VERIFY_IS_APPROX(m1.exp(), internal::exp(m1)); >- VERIFY_IS_APPROX(m1.exp() / m2.exp(), std::exp(m1-m2)); >+// VERIFY_IS_APPROX(m1.exp(), std::exp(m1)); >+ VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); >+ VERIFY_IS_APPROX(m1.exp(), exp(m1)); >+ VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); > > VERIFY_IS_APPROX(m1.pow(2), m1.square()); >- VERIFY_IS_APPROX(std::pow(m1,2), m1.square()); >+ VERIFY_IS_APPROX(pow(m1,2), m1.square()); > > ArrayType exponents = ArrayType::Constant(rows, cols, RealScalar(2)); >- VERIFY_IS_APPROX(std::pow(m1,exponents), m1.square()); >+ VERIFY_IS_APPROX(Eigen::pow(m1,exponents), m1.square()); > > m3 = m1.abs(); > VERIFY_IS_APPROX(m3.pow(RealScalar(0.5)), m3.sqrt()); >- VERIFY_IS_APPROX(std::pow(m3,RealScalar(0.5)), m3.sqrt()); >+ VERIFY_IS_APPROX(pow(m3,RealScalar(0.5)), m3.sqrt()); > > // scalar by array division >- const RealScalar tiny = std::sqrt(std::numeric_limits<RealScalar>::epsilon()); >+ const RealScalar tiny = sqrt(std::numeric_limits<RealScalar>::epsilon()); > s1 += Scalar(tiny); > m1 += ArrayType::Constant(rows,cols,Scalar(tiny)); > VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse()); > } > > template<typename ArrayType> void array_complex(const ArrayType& m) > { > typedef typename ArrayType::Index Index; >@@ -218,21 +218,21 @@ template<typename ArrayType> void array_ > Index rows = m.rows(); > Index cols = m.cols(); > > ArrayType m1 = ArrayType::Random(rows, cols), > m2(rows, cols); > > for (Index i = 0; i < m.rows(); ++i) > for (Index j = 0; j < m.cols(); ++j) >- m2(i,j) = std::sqrt(m1(i,j)); >+ m2(i,j) = sqrt(m1(i,j)); > > VERIFY_IS_APPROX(m1.sqrt(), m2); >- VERIFY_IS_APPROX(m1.sqrt(), std::sqrt(m1)); >- VERIFY_IS_APPROX(m1.sqrt(), internal::sqrt(m1)); >+// VERIFY_IS_APPROX(m1.sqrt(), std::sqrt(m1)); >+ VERIFY_IS_APPROX(m1.sqrt(), Eigen::sqrt(m1)); > } > > template<typename ArrayType> void min_max(const ArrayType& m) > { > typedef typename ArrayType::Index Index; > typedef typename ArrayType::Scalar Scalar; > > Index rows = m.rows(); >diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp >--- a/test/array_for_matrix.cpp >+++ b/test/array_for_matrix.cpp >@@ -105,17 +105,17 @@ template<typename MatrixType> void compa > VERIFY( (m1.array() == m1(r,c) ).any() ); > > // test Select > VERIFY_IS_APPROX( (m1.array()<m2.array()).select(m1,m2), m1.cwiseMin(m2) ); > VERIFY_IS_APPROX( (m1.array()>m2.array()).select(m1,m2), m1.cwiseMax(m2) ); > Scalar mid = (m1.cwiseAbs().minCoeff() + m1.cwiseAbs().maxCoeff())/Scalar(2); > for (int j=0; j<cols; ++j) > for (int i=0; i<rows; ++i) >- m3(i,j) = internal::abs(m1(i,j))<mid ? 0 : m1(i,j); >+ m3(i,j) = abs(m1(i,j))<mid ? 0 : m1(i,j); > VERIFY_IS_APPROX( (m1.array().abs()<MatrixType::Constant(rows,cols,mid).array()) > .select(MatrixType::Zero(rows,cols),m1), m3); > // shorter versions: > VERIFY_IS_APPROX( (m1.array().abs()<MatrixType::Constant(rows,cols,mid).array()) > .select(0,m1), m3); > VERIFY_IS_APPROX( (m1.array().abs()>=MatrixType::Constant(rows,cols,mid).array()) > .select(m1,0), m3); > // even shorter version: >@@ -128,21 +128,22 @@ template<typename MatrixType> void compa > > // TODO allows colwise/rowwise for array > VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().colwise().count(), VectorOfIndices::Constant(cols,rows).transpose()); > VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().rowwise().count(), VectorOfIndices::Constant(rows, cols)); > } > > template<typename VectorType> void lpNorm(const VectorType& v) > { >+ using std::sqrt; > VectorType u = VectorType::Random(v.size()); > > VERIFY_IS_APPROX(u.template lpNorm<Infinity>(), u.cwiseAbs().maxCoeff()); > VERIFY_IS_APPROX(u.template lpNorm<1>(), u.cwiseAbs().sum()); >- VERIFY_IS_APPROX(u.template lpNorm<2>(), internal::sqrt(u.array().abs().square().sum())); >+ VERIFY_IS_APPROX(u.template lpNorm<2>(), sqrt(u.array().abs().square().sum())); > VERIFY_IS_APPROX(internal::pow(u.template lpNorm<5>(), typename VectorType::RealScalar(5)), u.array().abs().pow(5).sum()); > } > > template<typename MatrixType> void cwise_min_max(const MatrixType& m) > { > typedef typename MatrixType::Index Index; > typedef typename MatrixType::Scalar Scalar; > >diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp >--- a/test/geo_alignedbox.cpp >+++ b/test/geo_alignedbox.cpp >@@ -104,17 +104,17 @@ void specificTest1() > Vector2f sides = M-m; > VERIFY_IS_APPROX(sides, box.sizes() ); > VERIFY_IS_APPROX(sides[1], box.sizes()[1] ); > VERIFY_IS_APPROX(sides[1], box.sizes().maxCoeff() ); > VERIFY_IS_APPROX(sides[0], box.sizes().minCoeff() ); > > VERIFY_IS_APPROX( 14.0f, box.volume() ); > VERIFY_IS_APPROX( 53.0f, box.diagonal().squaredNorm() ); >- VERIFY_IS_APPROX( internal::sqrt( 53.0f ), box.diagonal().norm() ); >+ VERIFY_IS_APPROX( std::sqrt( 53.0f ), box.diagonal().norm() ); > > VERIFY_IS_APPROX( m, box.corner( BoxType::BottomLeft ) ); > VERIFY_IS_APPROX( M, box.corner( BoxType::TopRight ) ); > Vector2f bottomRight; bottomRight << M[0], m[1]; > Vector2f topLeft; topLeft << m[0], M[1]; > VERIFY_IS_APPROX( bottomRight, box.corner( BoxType::BottomRight ) ); > VERIFY_IS_APPROX( topLeft, box.corner( BoxType::TopLeft ) ); > } >diff --git a/test/geo_hyperplane.cpp b/test/geo_hyperplane.cpp >--- a/test/geo_hyperplane.cpp >+++ b/test/geo_hyperplane.cpp >@@ -85,17 +85,17 @@ template<typename Scalar> void lines() > typedef Matrix<Scalar,3,1> CoeffsType; > > for(int i = 0; i < 10; i++) > { > Vector center = Vector::Random(); > Vector u = Vector::Random(); > Vector v = Vector::Random(); > Scalar a = internal::random<Scalar>(); >- while (internal::abs(a-1) < 1e-4) a = internal::random<Scalar>(); >+ while (abs(a-1) < 1e-4) a = internal::random<Scalar>(); > while (u.norm() < 1e-4) u = Vector::Random(); > while (v.norm() < 1e-4) v = Vector::Random(); > > HLine line_u = HLine::Through(center + u, center + a*u); > HLine line_v = HLine::Through(center + v, center + a*v); > > // the line equations should be normalized so that a^2+b^2=1 > VERIFY_IS_APPROX(line_u.normal().norm(), Scalar(1)); >diff --git a/test/geo_parametrizedline.cpp b/test/geo_parametrizedline.cpp >--- a/test/geo_parametrizedline.cpp >+++ b/test/geo_parametrizedline.cpp >@@ -30,17 +30,17 @@ template<typename LineType> void paramet > VectorType p0 = VectorType::Random(dim); > VectorType p1 = VectorType::Random(dim); > > VectorType d0 = VectorType::Random(dim).normalized(); > > LineType l0(p0, d0); > > Scalar s0 = internal::random<Scalar>(); >- Scalar s1 = internal::abs(internal::random<Scalar>()); >+ Scalar s1 = abs(internal::random<Scalar>()); > > VERIFY_IS_MUCH_SMALLER_THAN( l0.distance(p0), RealScalar(1) ); > VERIFY_IS_MUCH_SMALLER_THAN( l0.distance(p0+s0*d0), RealScalar(1) ); > VERIFY_IS_APPROX( (l0.projection(p1)-p1).norm(), l0.distance(p1) ); > VERIFY_IS_MUCH_SMALLER_THAN( l0.distance(l0.projection(p1)), RealScalar(1) ); > VERIFY_IS_APPROX( Scalar(l0.distance((p0+s0*d0) + d0.unitOrthogonal() * s1)), s1 ); > > // casting >diff --git a/test/geo_quaternion.cpp b/test/geo_quaternion.cpp >--- a/test/geo_quaternion.cpp >+++ b/test/geo_quaternion.cpp >@@ -31,19 +31,19 @@ template<typename QuatType> void check_s > > Scalar theta_tot = AA(q1*q0.inverse()).angle(); > if(theta_tot>M_PI) > theta_tot = 2.*M_PI-theta_tot; > for(Scalar t=0; t<=1.001; t+=0.1) > { > QuatType q = q0.slerp(t,q1); > Scalar theta = AA(q*q0.inverse()).angle(); >- VERIFY(internal::abs(q.norm() - 1) < largeEps); >+ VERIFY(abs(q.norm() - 1) < largeEps); > if(theta_tot==0) VERIFY(theta_tot==0); >- else VERIFY(internal::abs(theta/theta_tot - t) < largeEps); >+ else VERIFY(abs(theta/theta_tot - t) < largeEps); > } > } > > template<typename Scalar, int Options> void quaternion(void) > { > /* this test covers the following files: > Quaternion.h > */ >@@ -77,23 +77,23 @@ template<typename Scalar, int Options> v > > // concatenation > q1 *= q2; > > q1 = AngleAxisx(a, v0.normalized()); > q2 = AngleAxisx(a, v1.normalized()); > > // angular distance >- Scalar refangle = internal::abs(AngleAxisx(q1.inverse()*q2).angle()); >+ Scalar refangle = abs(AngleAxisx(q1.inverse()*q2).angle()); > if (refangle>Scalar(M_PI)) > refangle = Scalar(2)*Scalar(M_PI) - refangle; > > if((q1.coeffs()-q2.coeffs()).norm() > 10*largeEps) > { >- VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(q1.angularDistance(q2) - refangle), Scalar(1)); >+ VERIFY_IS_MUCH_SMALLER_THAN(abs(q1.angularDistance(q2) - refangle), Scalar(1)); > } > > // rotation matrix conversion > VERIFY_IS_APPROX(q1 * v2, q1.toRotationMatrix() * v2); > VERIFY_IS_APPROX(q1 * q2 * v2, > q1.toRotationMatrix() * q2.toRotationMatrix() * v2); > > VERIFY( (q2*q1).isApprox(q1*q2, largeEps) >@@ -104,17 +104,17 @@ template<typename Scalar, int Options> v > > > // angle-axis conversion > AngleAxisx aa = AngleAxisx(q1); > VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); > > // Do not execute the test if the rotation angle is almost zero, or > // the rotation axis and v1 are almost parallel. >- if (internal::abs(aa.angle()) > 5*test_precision<Scalar>() >+ if (abs(aa.angle()) > 5*test_precision<Scalar>() > && (aa.axis() - v1.normalized()).norm() < 1.99 > && (aa.axis() + v1.normalized()).norm() < 1.99) > { > VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1); > } > > // from two vector creation > VERIFY_IS_APPROX( v2.normalized(),(q2.setFromTwoVectors(v1, v2)*v1).normalized()); >diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp >--- a/test/geo_transformations.cpp >+++ b/test/geo_transformations.cpp >@@ -83,16 +83,17 @@ template<typename Scalar, int Mode, int > VERIFY_IS_APPROX((t0 * v1).template head<3>(), AlignedScaling3(v0) * v1); > } > > template<typename Scalar, int Mode, int Options> void transformations() > { > /* this test covers the following files: > Cross.h Quaternion.h, Transform.cpp > */ >+ using std::cos; > typedef Matrix<Scalar,2,2> Matrix2; > typedef Matrix<Scalar,3,3> Matrix3; > typedef Matrix<Scalar,4,4> Matrix4; > typedef Matrix<Scalar,2,1> Vector2; > typedef Matrix<Scalar,3,1> Vector3; > typedef Matrix<Scalar,4,1> Vector4; > typedef Quaternion<Scalar> Quaternionx; > typedef AngleAxis<Scalar> AngleAxisx; >@@ -110,17 +111,17 @@ template<typename Scalar, int Mode, int > v1 = Vector3::Random(); > Matrix3 matrot1, m; > > Scalar a = internal::random<Scalar>(-Scalar(M_PI), Scalar(M_PI)); > Scalar s0 = internal::random<Scalar>(); > > VERIFY_IS_APPROX(v0, AngleAxisx(a, v0.normalized()) * v0); > VERIFY_IS_APPROX(-v0, AngleAxisx(Scalar(M_PI), v0.unitOrthogonal()) * v0); >- VERIFY_IS_APPROX(internal::cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0)); >+ VERIFY_IS_APPROX(cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0)); > m = AngleAxisx(a, v0.normalized()).toRotationMatrix().adjoint(); > VERIFY_IS_APPROX(Matrix3::Identity(), m * AngleAxisx(a, v0.normalized())); > VERIFY_IS_APPROX(Matrix3::Identity(), AngleAxisx(a, v0.normalized()) * m); > > Quaternionx q1, q2; > q1 = AngleAxisx(a, v0.normalized()); > q2 = AngleAxisx(a, v1.normalized()); > >@@ -150,17 +151,17 @@ template<typename Scalar, int Mode, int > m = q1.toRotationMatrix(); > aa1 = m; > VERIFY_IS_APPROX(AngleAxisx(m).toRotationMatrix(), > Quaternionx(m).toRotationMatrix()); > > // Transform > // TODO complete the tests ! > a = 0; >- while (internal::abs(a)<Scalar(0.1)) >+ while (abs(a)<Scalar(0.1)) > a = internal::random<Scalar>(-Scalar(0.4)*Scalar(M_PI), Scalar(0.4)*Scalar(M_PI)); > q1 = AngleAxisx(a, v0.normalized()); > Transform3 t0, t1, t2; > > // first test setIdentity() and Identity() > t0.setIdentity(); > VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity()); > t0.matrix().setZero(); >@@ -244,17 +245,17 @@ template<typename Scalar, int Mode, int > t5 = t5*t5; > VERIFY_IS_APPROX(t5, t4*t4); > > // 2D transformation > Transform2 t20, t21; > Vector2 v20 = Vector2::Random(); > Vector2 v21 = Vector2::Random(); > for (int k=0; k<2; ++k) >- if (internal::abs(v21[k])<Scalar(1e-3)) v21[k] = Scalar(1e-3); >+ if (abs(v21[k])<Scalar(1e-3)) v21[k] = Scalar(1e-3); > t21.setIdentity(); > t21.linear() = Rotation2D<Scalar>(a).toRotationMatrix(); > VERIFY_IS_APPROX(t20.fromPositionOrientationScale(v20,a,v21).matrix(), > t21.pretranslate(v20).scale(v21).matrix()); > > t21.setIdentity(); > t21.linear() = Rotation2D<Scalar>(-a).toRotationMatrix(); > VERIFY( (t20.fromPositionOrientationScale(v20,a,v21) >diff --git a/test/inverse.cpp b/test/inverse.cpp >--- a/test/inverse.cpp >+++ b/test/inverse.cpp >@@ -58,17 +58,17 @@ template<typename MatrixType> void inver > VERIFY(invertible); > VERIFY_IS_APPROX(identity, m1*m2); > > //Second: a rank one matrix (not invertible, except for 1x1 matrices) > VectorType v3 = VectorType::Random(rows); > MatrixType m3 = v3*v3.transpose(), m4(rows,cols); > m3.computeInverseAndDetWithCheck(m4, det, invertible); > VERIFY( rows==1 ? invertible : !invertible ); >- VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(det-m3.determinant()), RealScalar(1)); >+ VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1)); > m3.computeInverseWithCheck(m4, invertible); > VERIFY( rows==1 ? invertible : !invertible ); > #endif > > // check in-place inversion > if(MatrixType::RowsAtCompileTime>=2 && MatrixType::RowsAtCompileTime<=4) > { > // in-place is forbidden >diff --git a/test/linearstructure.cpp b/test/linearstructure.cpp >--- a/test/linearstructure.cpp >+++ b/test/linearstructure.cpp >@@ -22,17 +22,17 @@ template<typename MatrixType> void linea > > // this test relies a lot on Random.h, and there's not much more that we can do > // to test it, hence I consider that we will have tested Random.h > MatrixType m1 = MatrixType::Random(rows, cols), > m2 = MatrixType::Random(rows, cols), > m3(rows, cols); > > Scalar s1 = internal::random<Scalar>(); >- while (internal::abs(s1)<1e-3) s1 = internal::random<Scalar>(); >+ while (abs(s1)<1e-3) s1 = internal::random<Scalar>(); > > Index r = internal::random<Index>(0, rows-1), > c = internal::random<Index>(0, cols-1); > > VERIFY_IS_APPROX(-(-m1), m1); > VERIFY_IS_APPROX(m1+m1, 2*m1); > VERIFY_IS_APPROX(m1+m2-m1, m2); > VERIFY_IS_APPROX(-m2+m1+m2, m1); >diff --git a/test/main.h b/test/main.h >--- a/test/main.h >+++ b/test/main.h >@@ -378,16 +378,18 @@ template<> std::string type_name<int>() > template<> std::string type_name<std::complex<float> >() { return "complex<float>"; } > template<> std::string type_name<std::complex<double> >() { return "complex<double>"; } > template<> std::string type_name<std::complex<int> >() { return "complex<int>"; } > > // forward declaration of the main test function > void EIGEN_CAT(test_,EIGEN_TEST_FUNC)(); > > using namespace Eigen; >+using std::abs; >+using std::sqrt; > > void set_repeat_from_string(const char *str) > { > errno = 0; > g_repeat = int(strtoul(str, 0, 10)); > if(errno || g_repeat <= 0) > { > std::cout << "Invalid repeat value " << str << std::endl; >diff --git a/test/meta.cpp b/test/meta.cpp >--- a/test/meta.cpp >+++ b/test/meta.cpp >@@ -51,17 +51,17 @@ void test_meta() > > VERIFY(( internal::is_same<float,internal::remove_reference<float&>::type >::value)); > VERIFY(( internal::is_same<const float,internal::remove_reference<const float&>::type >::value)); > VERIFY(( internal::is_same<float,internal::remove_pointer<float*>::type >::value)); > VERIFY(( internal::is_same<const float,internal::remove_pointer<const float*>::type >::value)); > VERIFY(( internal::is_same<float,internal::remove_pointer<float* const >::type >::value)); > > VERIFY(internal::meta_sqrt<1>::ret == 1); >- #define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt<X>::ret == int(internal::sqrt(double(X)))) >+ #define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt<X>::ret == int(std::sqrt(double(X)))) > VERIFY_META_SQRT(2); > VERIFY_META_SQRT(3); > VERIFY_META_SQRT(4); > VERIFY_META_SQRT(5); > VERIFY_META_SQRT(6); > VERIFY_META_SQRT(8); > VERIFY_META_SQRT(9); > VERIFY_META_SQRT(15); >diff --git a/test/packetmath.cpp b/test/packetmath.cpp >--- a/test/packetmath.cpp >+++ b/test/packetmath.cpp >@@ -108,17 +108,17 @@ template<typename Scalar> void packetmat > EIGEN_ALIGN16 Scalar data2[internal::packet_traits<Scalar>::size*4]; > EIGEN_ALIGN16 Packet packets[PacketSize*2]; > EIGEN_ALIGN16 Scalar ref[internal::packet_traits<Scalar>::size*4]; > RealScalar refvalue = 0; > for (int i=0; i<size; ++i) > { > data1[i] = internal::random<Scalar>()/RealScalar(PacketSize); > data2[i] = internal::random<Scalar>()/RealScalar(PacketSize); >- refvalue = (std::max)(refvalue,internal::abs(data1[i])); >+ refvalue = (std::max)(refvalue,abs(data1[i])); > } > > internal::pstore(data2, internal::pload<Packet>(data1)); > VERIFY(areApprox(data1, data2, PacketSize) && "aligned load/store"); > > for (int offset=0; offset<PacketSize; ++offset) > { > internal::pstore(data2, internal::ploadu<Packet>(data1+offset)); >@@ -215,51 +215,51 @@ template<typename Scalar> void packetmat > EIGEN_ALIGN16 Scalar data2[internal::packet_traits<Scalar>::size*4]; > EIGEN_ALIGN16 Scalar ref[internal::packet_traits<Scalar>::size*4]; > > for (int i=0; i<size; ++i) > { > data1[i] = internal::random<Scalar>(-1e3,1e3); > data2[i] = internal::random<Scalar>(-1e3,1e3); > } >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSin, internal::sin, internal::psin); >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasCos, internal::cos, internal::pcos); >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasTan, internal::tan, internal::ptan); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSin, std::sin, internal::psin); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasCos, std::cos, internal::pcos); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasTan, std::tan, internal::ptan); > > for (int i=0; i<size; ++i) > { > data1[i] = internal::random<Scalar>(-1,1); > data2[i] = internal::random<Scalar>(-1,1); > } >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasASin, internal::asin, internal::pasin); >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasACos, internal::acos, internal::pacos); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasASin, std::asin, internal::pasin); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasACos, std::acos, internal::pacos); > > for (int i=0; i<size; ++i) > { > data1[i] = internal::random<Scalar>(-87,88); > data2[i] = internal::random<Scalar>(-87,88); > } >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasExp, internal::exp, internal::pexp); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasExp, std::exp, internal::pexp); > > for (int i=0; i<size; ++i) > { > data1[i] = internal::random<Scalar>(0,1e6); > data2[i] = internal::random<Scalar>(0,1e6); > } >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLog, internal::log, internal::plog); >- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSqrt, internal::sqrt, internal::psqrt); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLog, std::log, internal::plog); >+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSqrt, std::sqrt, internal::psqrt); > > ref[0] = data1[0]; > for (int i=0; i<PacketSize; ++i) > ref[0] = (std::min)(ref[0],data1[i]); > VERIFY(internal::isApprox(ref[0], internal::predux_min(internal::pload<Packet>(data1))) && "internal::predux_min"); > > CHECK_CWISE2((std::min), internal::pmin); > CHECK_CWISE2((std::max), internal::pmax); >- CHECK_CWISE1(internal::abs, internal::pabs); >+ CHECK_CWISE1(abs, internal::pabs); > > ref[0] = data1[0]; > for (int i=0; i<PacketSize; ++i) > ref[0] = (std::max)(ref[0],data1[i]); > VERIFY(internal::isApprox(ref[0], internal::predux_max(internal::pload<Packet>(data1))) && "internal::predux_max"); > > for (int i=0; i<PacketSize; ++i) > ref[i] = data1[0]+Scalar(i); >diff --git a/test/pastix_support.cpp b/test/pastix_support.cpp >--- a/test/pastix_support.cpp >+++ b/test/pastix_support.cpp >@@ -36,9 +36,9 @@ template<typename T> void test_pastix_T_ > } > > void test_pastix_support() > { > CALL_SUBTEST_1(test_pastix_T<float>()); > CALL_SUBTEST_2(test_pastix_T<double>()); > CALL_SUBTEST_3( (test_pastix_T_LU<std::complex<float> >()) ); > CALL_SUBTEST_4(test_pastix_T_LU<std::complex<double> >()); >-} >\ No newline at end of file >+} >diff --git a/test/prec_inverse_4x4.cpp b/test/prec_inverse_4x4.cpp >--- a/test/prec_inverse_4x4.cpp >+++ b/test/prec_inverse_4x4.cpp >@@ -33,17 +33,17 @@ template<typename MatrixType> void inver > typedef typename MatrixType::RealScalar RealScalar; > double error_sum = 0., error_max = 0.; > for(int i = 0; i < repeat; ++i) > { > MatrixType m; > RealScalar absdet; > do { > m = MatrixType::Random(); >- absdet = internal::abs(m.determinant()); >+ absdet = abs(m.determinant()); > } while(absdet < NumTraits<Scalar>::epsilon()); > MatrixType inv = m.inverse(); > double error = double( (m*inv-MatrixType::Identity()).norm() * absdet / NumTraits<Scalar>::epsilon() ); > error_sum += error; > error_max = (std::max)(error_max, error); > } > std::cerr << "inverse_general_4x4, Scalar = " << type_name<Scalar>() << std::endl; > double error_avg = error_sum / repeat; >diff --git a/test/qr.cpp b/test/qr.cpp >--- a/test/qr.cpp >+++ b/test/qr.cpp >@@ -48,16 +48,17 @@ template<typename MatrixType, int Cols2> > Matrix<Scalar,Rows,Cols2> m3 = m1*m2; > m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2); > m2 = qr.solve(m3); > VERIFY_IS_APPROX(m3, m1*m2); > } > > template<typename MatrixType> void qr_invertible() > { >+ using std::log; > typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; > typedef typename MatrixType::Scalar Scalar; > > int size = internal::random<int>(10,50); > > MatrixType m1(size, size), m2(size, size), m3(size, size); > m1 = MatrixType::Random(size,size); > >@@ -71,22 +72,22 @@ template<typename MatrixType> void qr_in > HouseholderQR<MatrixType> qr(m1); > m3 = MatrixType::Random(size,size); > m2 = qr.solve(m3); > VERIFY_IS_APPROX(m3, m1*m2); > > // now construct a matrix with prescribed determinant > m1.setZero(); > for(int i = 0; i < size; i++) m1(i,i) = internal::random<Scalar>(); >- RealScalar absdet = internal::abs(m1.diagonal().prod()); >+ RealScalar absdet = abs(m1.diagonal().prod()); > m3 = qr.householderQ(); // get a unitary > m1 = m3 * m1 * m3; > qr.compute(m1); > VERIFY_IS_APPROX(absdet, qr.absDeterminant()); >- VERIFY_IS_APPROX(internal::log(absdet), qr.logAbsDeterminant()); >+ VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant()); > } > > template<typename MatrixType> void qr_verify_assert() > { > MatrixType tmp; > > HouseholderQR<MatrixType> qr; > VERIFY_RAISES_ASSERT(qr.matrixQR()) >diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp >--- a/test/qr_colpivoting.cpp >+++ b/test/qr_colpivoting.cpp >@@ -67,16 +67,17 @@ template<typename MatrixType, int Cols2> > Matrix<Scalar,Rows,Cols2> m3 = m1*m2; > m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2); > m2 = qr.solve(m3); > VERIFY_IS_APPROX(m3, m1*m2); > } > > template<typename MatrixType> void qr_invertible() > { >+ using std::log; > typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; > typedef typename MatrixType::Scalar Scalar; > > int size = internal::random<int>(10,50); > > MatrixType m1(size, size), m2(size, size), m3(size, size); > m1 = MatrixType::Random(size,size); > >@@ -90,22 +91,22 @@ template<typename MatrixType> void qr_in > ColPivHouseholderQR<MatrixType> qr(m1); > m3 = MatrixType::Random(size,size); > m2 = qr.solve(m3); > //VERIFY_IS_APPROX(m3, m1*m2); > > // now construct a matrix with prescribed determinant > m1.setZero(); > for(int i = 0; i < size; i++) m1(i,i) = internal::random<Scalar>(); >- RealScalar absdet = internal::abs(m1.diagonal().prod()); >+ RealScalar absdet = abs(m1.diagonal().prod()); > m3 = qr.householderQ(); // get a unitary > m1 = m3 * m1 * m3; > qr.compute(m1); > VERIFY_IS_APPROX(absdet, qr.absDeterminant()); >- VERIFY_IS_APPROX(internal::log(absdet), qr.logAbsDeterminant()); >+ VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant()); > } > > template<typename MatrixType> void qr_verify_assert() > { > MatrixType tmp; > > ColPivHouseholderQR<MatrixType> qr; > VERIFY_RAISES_ASSERT(qr.matrixQR()) >diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp >--- a/test/qr_fullpivoting.cpp >+++ b/test/qr_fullpivoting.cpp >@@ -46,16 +46,17 @@ template<typename MatrixType> void qr() > MatrixType m3 = m1*m2; > m2 = MatrixType::Random(cols,cols2); > m2 = qr.solve(m3); > VERIFY_IS_APPROX(m3, m1*m2); > } > > template<typename MatrixType> void qr_invertible() > { >+ using std::log; > typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; > typedef typename MatrixType::Scalar Scalar; > > int size = internal::random<int>(10,50); > > MatrixType m1(size, size), m2(size, size), m3(size, size); > m1 = MatrixType::Random(size,size); > >@@ -73,22 +74,22 @@ template<typename MatrixType> void qr_in > > m3 = MatrixType::Random(size,size); > m2 = qr.solve(m3); > VERIFY_IS_APPROX(m3, m1*m2); > > // now construct a matrix with prescribed determinant > m1.setZero(); > for(int i = 0; i < size; i++) m1(i,i) = internal::random<Scalar>(); >- RealScalar absdet = internal::abs(m1.diagonal().prod()); >+ RealScalar absdet = abs(m1.diagonal().prod()); > m3 = qr.matrixQ(); // get a unitary > m1 = m3 * m1 * m3; > qr.compute(m1); > VERIFY_IS_APPROX(absdet, qr.absDeterminant()); >- VERIFY_IS_APPROX(internal::log(absdet), qr.logAbsDeterminant()); >+ VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant()); > } > > template<typename MatrixType> void qr_verify_assert() > { > MatrixType tmp; > > FullPivHouseholderQR<MatrixType> qr; > VERIFY_RAISES_ASSERT(qr.matrixQR()) >diff --git a/test/real_qz.cpp b/test/real_qz.cpp >--- a/test/real_qz.cpp >+++ b/test/real_qz.cpp >@@ -31,21 +31,21 @@ template<typename MatrixType> void real_ > > RealQZ<MatrixType> qz(A,B); > > VERIFY_IS_EQUAL(qz.info(), Success); > // check for zeros > bool all_zeros = true; > for (Index i=0; i<A.cols(); i++) > for (Index j=0; j<i; j++) { >- if (internal::abs(qz.matrixT()(i,j))!=Scalar(0.0)) >+ if (abs(qz.matrixT()(i,j))!=Scalar(0.0)) > all_zeros = false; >- if (j<i-1 && internal::abs(qz.matrixS()(i,j))!=Scalar(0.0)) >+ if (j<i-1 && abs(qz.matrixS()(i,j))!=Scalar(0.0)) > all_zeros = false; >- if (j==i-1 && j>0 && internal::abs(qz.matrixS()(i,j))!=Scalar(0.0) && internal::abs(qz.matrixS()(i-1,j-1))!=Scalar(0.0)) >+ if (j==i-1 && j>0 && abs(qz.matrixS()(i,j))!=Scalar(0.0) && abs(qz.matrixS()(i-1,j-1))!=Scalar(0.0)) > all_zeros = false; > } > VERIFY_IS_EQUAL(all_zeros, true); > VERIFY_IS_APPROX(qz.matrixQ()*qz.matrixS()*qz.matrixZ(), A); > VERIFY_IS_APPROX(qz.matrixQ()*qz.matrixT()*qz.matrixZ(), B); > VERIFY_IS_APPROX(qz.matrixQ()*qz.matrixQ().adjoint(), MatrixType::Identity(dim,dim)); > VERIFY_IS_APPROX(qz.matrixZ()*qz.matrixZ().adjoint(), MatrixType::Identity(dim,dim)); > } >diff --git a/test/redux.cpp b/test/redux.cpp >--- a/test/redux.cpp >+++ b/test/redux.cpp >@@ -75,51 +75,51 @@ template<typename VectorType> void vecto > RealScalar minc(internal::real(v.coeff(0))), maxc(internal::real(v.coeff(0))); > for(int j = 0; j < i; j++) > { > s += v[j]; > p *= v_for_prod[j]; > minc = (std::min)(minc, internal::real(v[j])); > maxc = (std::max)(maxc, internal::real(v[j])); > } >- VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(s - v.head(i).sum()), Scalar(1)); >+ VERIFY_IS_MUCH_SMALLER_THAN(abs(s - v.head(i).sum()), Scalar(1)); > VERIFY_IS_APPROX(p, v_for_prod.head(i).prod()); > VERIFY_IS_APPROX(minc, v.real().head(i).minCoeff()); > VERIFY_IS_APPROX(maxc, v.real().head(i).maxCoeff()); > } > > for(int i = 0; i < size-1; i++) > { > Scalar s(0), p(1); > RealScalar minc(internal::real(v.coeff(i))), maxc(internal::real(v.coeff(i))); > for(int j = i; j < size; j++) > { > s += v[j]; > p *= v_for_prod[j]; > minc = (std::min)(minc, internal::real(v[j])); > maxc = (std::max)(maxc, internal::real(v[j])); > } >- VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(s - v.tail(size-i).sum()), Scalar(1)); >+ VERIFY_IS_MUCH_SMALLER_THAN(abs(s - v.tail(size-i).sum()), Scalar(1)); > VERIFY_IS_APPROX(p, v_for_prod.tail(size-i).prod()); > VERIFY_IS_APPROX(minc, v.real().tail(size-i).minCoeff()); > VERIFY_IS_APPROX(maxc, v.real().tail(size-i).maxCoeff()); > } > > for(int i = 0; i < size/2; i++) > { > Scalar s(0), p(1); > RealScalar minc(internal::real(v.coeff(i))), maxc(internal::real(v.coeff(i))); > for(int j = i; j < size-i; j++) > { > s += v[j]; > p *= v_for_prod[j]; > minc = (std::min)(minc, internal::real(v[j])); > maxc = (std::max)(maxc, internal::real(v[j])); > } >- VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(s - v.segment(i, size-2*i).sum()), Scalar(1)); >+ VERIFY_IS_MUCH_SMALLER_THAN(abs(s - v.segment(i, size-2*i).sum()), Scalar(1)); > VERIFY_IS_APPROX(p, v_for_prod.segment(i, size-2*i).prod()); > VERIFY_IS_APPROX(minc, v.real().segment(i, size-2*i).minCoeff()); > VERIFY_IS_APPROX(maxc, v.real().segment(i, size-2*i).maxCoeff()); > } > > // test empty objects > VERIFY_IS_APPROX(v.head(0).sum(), Scalar(0)); > VERIFY_IS_APPROX(v.tail(0).prod(), Scalar(1)); >diff --git a/test/sparselu.cpp b/test/sparselu.cpp >--- a/test/sparselu.cpp >+++ b/test/sparselu.cpp >@@ -35,9 +35,9 @@ template<typename T> void test_sparselu_ > } > > void test_sparselu() > { > CALL_SUBTEST_1(test_sparselu_T<float>()); > CALL_SUBTEST_2(test_sparselu_T<double>()); > CALL_SUBTEST_3(test_sparselu_T<std::complex<float> >()); > CALL_SUBTEST_4(test_sparselu_T<std::complex<double> >()); >-} >\ No newline at end of file >+} >diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp >--- a/test/stable_norm.cpp >+++ b/test/stable_norm.cpp >@@ -27,16 +27,17 @@ template<typename T> EIGEN_DONT_INLINE T > return x; > } > > template<typename MatrixType> void stable_norm(const MatrixType& m) > { > /* this test covers the following files: > StableNorm.h > */ >+ using std::sqrt; > typedef typename MatrixType::Index Index; > typedef typename MatrixType::Scalar Scalar; > typedef typename NumTraits<Scalar>::Real RealScalar; > > // Check the basic machine-dependent constants. > { > int ibeta, it, iemin, iemax; > >@@ -68,31 +69,31 @@ template<typename MatrixType> void stabl > VERIFY_IS_APPROX(vrand.stableNorm(), vrand.norm()); > VERIFY_IS_APPROX(vrand.blueNorm(), vrand.norm()); > VERIFY_IS_APPROX(vrand.hypotNorm(), vrand.norm()); > > RealScalar size = static_cast<RealScalar>(m.size()); > > // test isFinite > VERIFY(!isFinite( std::numeric_limits<RealScalar>::infinity())); >- VERIFY(!isFinite(internal::sqrt(-internal::abs(big)))); >+ VERIFY(!isFinite(sqrt(-abs(big)))); > > // test overflow >- VERIFY(isFinite(internal::sqrt(size)*internal::abs(big))); >- VERIFY_IS_NOT_APPROX(internal::sqrt(copy(vbig.squaredNorm())), internal::abs(internal::sqrt(size)*big)); // here the default norm must fail >- VERIFY_IS_APPROX(vbig.stableNorm(), internal::sqrt(size)*internal::abs(big)); >- VERIFY_IS_APPROX(vbig.blueNorm(), internal::sqrt(size)*internal::abs(big)); >- VERIFY_IS_APPROX(vbig.hypotNorm(), internal::sqrt(size)*internal::abs(big)); >+ VERIFY(isFinite(sqrt(size)*abs(big))); >+ VERIFY_IS_NOT_APPROX(sqrt(copy(vbig.squaredNorm())), abs(sqrt(size)*big)); // here the default norm must fail >+ VERIFY_IS_APPROX(vbig.stableNorm(), sqrt(size)*abs(big)); >+ VERIFY_IS_APPROX(vbig.blueNorm(), sqrt(size)*abs(big)); >+ VERIFY_IS_APPROX(vbig.hypotNorm(), sqrt(size)*abs(big)); > > // test underflow >- VERIFY(isFinite(internal::sqrt(size)*internal::abs(small))); >- VERIFY_IS_NOT_APPROX(internal::sqrt(copy(vsmall.squaredNorm())), internal::abs(internal::sqrt(size)*small)); // here the default norm must fail >- VERIFY_IS_APPROX(vsmall.stableNorm(), internal::sqrt(size)*internal::abs(small)); >- VERIFY_IS_APPROX(vsmall.blueNorm(), internal::sqrt(size)*internal::abs(small)); >- VERIFY_IS_APPROX(vsmall.hypotNorm(), internal::sqrt(size)*internal::abs(small)); >+ VERIFY(isFinite(sqrt(size)*abs(small))); >+ VERIFY_IS_NOT_APPROX(sqrt(copy(vsmall.squaredNorm())), abs(sqrt(size)*small)); // here the default norm must fail >+ VERIFY_IS_APPROX(vsmall.stableNorm(), sqrt(size)*abs(small)); >+ VERIFY_IS_APPROX(vsmall.blueNorm(), sqrt(size)*abs(small)); >+ VERIFY_IS_APPROX(vsmall.hypotNorm(), sqrt(size)*abs(small)); > > // Test compilation of cwise() version > VERIFY_IS_APPROX(vrand.colwise().stableNorm(), vrand.colwise().norm()); > VERIFY_IS_APPROX(vrand.colwise().blueNorm(), vrand.colwise().norm()); > VERIFY_IS_APPROX(vrand.colwise().hypotNorm(), vrand.colwise().norm()); > VERIFY_IS_APPROX(vrand.rowwise().stableNorm(), vrand.rowwise().norm()); > VERIFY_IS_APPROX(vrand.rowwise().blueNorm(), vrand.rowwise().norm()); > VERIFY_IS_APPROX(vrand.rowwise().hypotNorm(), vrand.rowwise().norm()); >diff --git a/test/umeyama.cpp b/test/umeyama.cpp >--- a/test/umeyama.cpp >+++ b/test/umeyama.cpp >@@ -94,17 +94,17 @@ template <typename MatrixType> > void run_test(int dim, int num_elements) > { > typedef typename internal::traits<MatrixType>::Scalar Scalar; > typedef Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixX; > typedef Matrix<Scalar, Eigen::Dynamic, 1> VectorX; > > // MUST be positive because in any other case det(cR_t) may become negative for > // odd dimensions! >- const Scalar c = internal::abs(internal::random<Scalar>()); >+ const Scalar c = abs(internal::random<Scalar>()); > > MatrixX R = randMatrixSpecialUnitary<Scalar>(dim); > VectorX t = Scalar(50)*VectorX::Random(dim,1); > > MatrixX cR_t = MatrixX::Identity(dim+1,dim+1); > cR_t.block(0,0,dim,dim) = c*R; > cR_t.block(0,dim,dim,1) = t; > >@@ -126,17 +126,17 @@ void run_fixed_size_test(int num_element > typedef Matrix<Scalar, Dimension+1, Dimension+1> HomMatrix; > typedef Matrix<Scalar, Dimension, Dimension> FixedMatrix; > typedef Matrix<Scalar, Dimension, 1> FixedVector; > > const int dim = Dimension; > > // MUST be positive because in any other case det(cR_t) may become negative for > // odd dimensions! >- const Scalar c = internal::abs(internal::random<Scalar>()); >+ const Scalar c = abs(internal::random<Scalar>()); > > FixedMatrix R = randMatrixSpecialUnitary<Scalar>(dim); > FixedVector t = Scalar(50)*FixedVector::Random(dim,1); > > HomMatrix cR_t = HomMatrix::Identity(dim+1,dim+1); > cR_t.block(0,0,dim,dim) = c*R; > cR_t.block(0,dim,dim,1) = t; > >diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3 >--- a/unsupported/Eigen/AlignedVector3 >+++ b/unsupported/Eigen/AlignedVector3 >@@ -162,17 +162,18 @@ template<typename _Scalar> class Aligned > inline Scalar squaredNorm() const > { > eigen_assert(m_coeffs.w()==Scalar(0)); > return m_coeffs.squaredNorm(); > } > > inline Scalar norm() const > { >- return internal::sqrt(squaredNorm()); >+ using std::sqrt; >+ return sqrt(squaredNorm()); > } > > inline AlignedVector3 cross(const AlignedVector3& other) const > { > return AlignedVector3(m_coeffs.cross3(other.m_coeffs)); > } > > template<typename Derived> >diff --git a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h >--- a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h >+++ b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h >@@ -23,16 +23,17 @@ struct kiss_cpx_fft > std::vector<int> m_stageRadix; > std::vector<int> m_stageRemainder; > std::vector<Complex> m_scratchBuf; > bool m_inverse; > > inline > void make_twiddles(int nfft,bool inverse) > { >+ using std::acos; > m_inverse = inverse; > m_twiddles.resize(nfft); > Scalar phinc = (inverse?2:-2)* acos( (Scalar) -1) / nfft; > for (int i=0;i<nfft;++i) > m_twiddles[i] = exp( Complex(0,i*phinc) ); > } > > void factorize(int nfft) >@@ -394,16 +395,17 @@ struct kissfft_impl > pd.factorize(nfft); > } > return pd; > } > > inline > Complex * real_twiddles(int ncfft2) > { >+ using std::acos; > std::vector<Complex> & twidref = m_realTwiddles[ncfft2];// creates new if not there > if ( (int)twidref.size() != ncfft2 ) { > twidref.resize(ncfft2); > int ncfft= ncfft2<<1; > Scalar pi = acos( Scalar(-1) ); > for (int k=1;k<=ncfft2;++k) > twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) ); > } >diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h >--- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h >+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h >@@ -111,16 +111,17 @@ class IncompleteCholesky : internal::non > PermutationType m_perm; > > }; > > template<typename Scalar, int _UpLo, typename OrderingType> > template<typename _MatrixType> > void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat) > { >+ using std::sqrt; > eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); > > // FIXME Stability: We should probably compute the scaling factors and the shifts that are needed to ensure a succesful LLT factorization and an efficient preconditioner. > > // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added > > // Apply the fill-reducing permutation computed in analyzePattern() > if (m_perm.rows() == mat.rows() ) >@@ -177,17 +178,17 @@ void IncompleteCholesky<Scalar,_UpLo, Or > // p is the original number of elements in the column (without the diagonal) > int p = colPtr[j+1] - colPtr[j] - 2 ; > internal::QuickSplit(curCol, irow, p); > if(RealScalar(diag) <= 0) > { //FIXME We can use heuristics (Kershaw, 1978 or above reference ) to get a dynamic shift > m_info = NumericalIssue; > return; > } >- RealScalar rdiag = internal::sqrt(RealScalar(diag)); >+ RealScalar rdiag = sqrt(RealScalar(diag)); > Scalar scal = Scalar(1)/rdiag; > vals[colPtr[j]] = rdiag; > // Insert the largest p elements in the matrix and scale them meanwhile > int cpt = 0; > for (int i = colPtr[j]+1; i < colPtr[j+1]; i++) > { > vals[i] = curCol(cpt) * scal; > rowIdx[i] = irow(cpt); >diff --git a/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/unsupported/Eigen/src/IterativeSolvers/IterationController.h >--- a/unsupported/Eigen/src/IterativeSolvers/IterationController.h >+++ b/unsupported/Eigen/src/IterativeSolvers/IterationController.h >@@ -124,17 +124,18 @@ class IterationController > void setMaxIterations(size_t i) { m_maxiter = i; } > > double rhsNorm() const { return m_rhsn; } > void setRhsNorm(double r) { m_rhsn = r; } > > bool converged() const { return m_res <= m_rhsn * m_resmax; } > bool converged(double nr) > { >- m_res = internal::abs(nr); >+ using std::abs; >+ m_res = abs(nr); > m_resminreach = (std::min)(m_resminreach, m_res); > return converged(); > } > template<typename VectorType> bool converged(const VectorType &v) > { return converged(v.squaredNorm()); } > > bool finished(double nr) > { >diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h >--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h >+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h >@@ -230,40 +230,41 @@ void MatrixFunction<MatrixType,AtomicTyp > * # The distance between two eigenvalues in different clusters is > * more than separation(). > * The implementation follows Algorithm 4.1 in the paper of Davies > * and Higham. > */ > template <typename MatrixType, typename AtomicType> > void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues() > { >+ using std::abs; > const Index rows = m_T.rows(); > VectorType diag = m_T.diagonal(); // contains eigenvalues of A > > for (Index i=0; i<rows; ++i) { > // Find set containing diag(i), adding a new set if necessary > typename ListOfClusters::iterator qi = findCluster(diag(i)); > if (qi == m_clusters.end()) { > Cluster l; > l.push_back(diag(i)); > m_clusters.push_back(l); > qi = m_clusters.end(); > --qi; > } > > // Look for other element to add to the set > for (Index j=i+1; j<rows; ++j) { >- if (internal::abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) { >- typename ListOfClusters::iterator qj = findCluster(diag(j)); >- if (qj == m_clusters.end()) { >- qi->push_back(diag(j)); >- } else { >- qi->insert(qi->end(), qj->begin(), qj->end()); >- m_clusters.erase(qj); >- } >+ if (abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) { >+ typename ListOfClusters::iterator qj = findCluster(diag(j)); >+ if (qj == m_clusters.end()) { >+ qi->push_back(diag(j)); >+ } else { >+ qi->insert(qi->end(), qj->begin(), qj->end()); >+ m_clusters.erase(qj); >+ } > } > } > } > } > > /** \brief Find cluster in #m_clusters containing some value > * \param[in] key Value to find > * \returns Iterator to cluster containing \c key, or >diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h >--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h >+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h >@@ -120,33 +120,34 @@ void MatrixLogarithmAtomic<MatrixType>:: > } > } > > /** \brief Compute logarithm of triangular matrices with size > 2. > * \details This uses a inverse scale-and-square algorithm. */ > template <typename MatrixType> > void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result) > { >+ using std::pow; > int numberOfSquareRoots = 0; > int numberOfExtraSquareRoots = 0; > int degree; > MatrixType T = A; > const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision > maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision > maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision > maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double > 1.1880960220216759245467951592883642e-1L; // quadruple precision > > while (true) { > RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff(); > if (normTminusI < maxNormForPade) { > degree = getPadeDegree(normTminusI); > int degree2 = getPadeDegree(normTminusI / RealScalar(2)); > if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) >- break; >+ break; > ++numberOfExtraSquareRoots; > } > MatrixType sqrtT; > MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT); > T = sqrtT; > ++numberOfSquareRoots; > } > >diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h >--- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h >+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h >@@ -94,21 +94,22 @@ void MatrixSquareRootQuasiTriangular<Mat > } > > // pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size > // post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T > template <typename MatrixType> > void MatrixSquareRootQuasiTriangular<MatrixType>::computeDiagonalPartOfSqrt(MatrixType& sqrtT, > const MatrixType& T) > { >+ using std::sqrt; > const Index size = m_A.rows(); > for (Index i = 0; i < size; i++) { > if (i == size - 1 || T.coeff(i+1, i) == 0) { > eigen_assert(T(i,i) > 0); >- sqrtT.coeffRef(i,i) = internal::sqrt(T.coeff(i,i)); >+ sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i)); > } > else { > compute2x2diagonalBlock(sqrtT, T, i); > ++i; > } > } > } > >@@ -284,27 +285,28 @@ class MatrixSquareRootTriangular > private: > const MatrixType& m_A; > }; > > template <typename MatrixType> > template <typename ResultType> > void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result) > { >+ using std::sqrt; > // Compute Schur decomposition of m_A > const ComplexSchur<MatrixType> schurOfA(m_A); > const MatrixType& T = schurOfA.matrixT(); > const MatrixType& U = schurOfA.matrixU(); > > // Compute square root of T and store it in upper triangular part of result > // This uses that the square root of triangular matrices can be computed directly. > result.resize(m_A.rows(), m_A.cols()); > typedef typename MatrixType::Index Index; > for (Index i = 0; i < m_A.rows(); i++) { >- result.coeffRef(i,i) = internal::sqrt(T.coeff(i,i)); >+ result.coeffRef(i,i) = sqrt(T.coeff(i,i)); > } > for (Index j = 1; j < m_A.cols(); j++) { > for (Index i = j-1; i >= 0; i--) { > typedef typename MatrixType::Scalar Scalar; > // if i = j-1, then segment has length 0 so tmp = 0 > Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value(); > // denominator may be zero if original matrix is singular > result.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j)); >diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h >--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h >+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h >@@ -201,16 +201,17 @@ LevenbergMarquardt<FunctorType,Scalar>:: > > return LevenbergMarquardtSpace::NotStarted; > } > > template<typename FunctorType, typename Scalar> > LevenbergMarquardtSpace::Status > LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x) > { >+ using std::abs; > assert(x.size()==n); // check the caller is not cheating us > > /* calculate the jacobian matrix. */ > Index df_ret = functor.df(x, fjac); > if (df_ret<0) > return LevenbergMarquardtSpace::UserAsked; > if (df_ret>0) > // numerical diff, we evaluated the function df_ret times >@@ -244,17 +245,17 @@ LevenbergMarquardt<FunctorType,Scalar>:: > wa4.applyOnTheLeft(qrfac.householderQ().adjoint()); > qtf = wa4.head(n); > > /* compute the norm of the scaled gradient. */ > gnorm = 0.; > if (fnorm != 0.) > for (Index j = 0; j < n; ++j) > if (wa2[permutation.indices()[j]] != 0.) >- gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); >+ gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); > > /* test for convergence of the gradient norm. */ > if (gnorm <= parameters.gtol) > return LevenbergMarquardtSpace::CosinusTooSmall; > > /* rescale if necessary. */ > if (!useExternalScaling) > diag = diag.cwiseMax(wa2); >@@ -321,27 +322,27 @@ LevenbergMarquardt<FunctorType,Scalar>:: > wa2 = diag.cwiseProduct(x); > fvec = wa4; > xnorm = wa2.stableNorm(); > fnorm = fnorm1; > ++iter; > } > > /* tests for convergence. */ >- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) >+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) > return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; >- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) >+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) > return LevenbergMarquardtSpace::RelativeReductionTooSmall; > if (delta <= parameters.xtol * xnorm) > return LevenbergMarquardtSpace::RelativeErrorTooSmall; > > /* tests for termination and stringent tolerances. */ > if (nfev >= parameters.maxfev) > return LevenbergMarquardtSpace::TooManyFunctionEvaluation; >- if (internal::abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.) >+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.) > return LevenbergMarquardtSpace::FtolTooSmall; > if (delta <= NumTraits<Scalar>::epsilon() * xnorm) > return LevenbergMarquardtSpace::XtolTooSmall; > if (gnorm <= NumTraits<Scalar>::epsilon()) > return LevenbergMarquardtSpace::GtolTooSmall; > > } while (ratio < Scalar(1e-4)); > >@@ -491,17 +492,17 @@ LevenbergMarquardt<FunctorType,Scalar>:: > delta = parameters.factor; > } > > /* compute the norm of the scaled gradient. */ > gnorm = 0.; > if (fnorm != 0.) > for (j = 0; j < n; ++j) > if (wa2[permutation.indices()[j]] != 0.) >- gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); >+ gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); > > /* test for convergence of the gradient norm. */ > if (gnorm <= parameters.gtol) > return LevenbergMarquardtSpace::CosinusTooSmall; > > /* rescale if necessary. */ > if (!useExternalScaling) > diag = diag.cwiseMax(wa2); >@@ -568,27 +569,27 @@ LevenbergMarquardt<FunctorType,Scalar>:: > wa2 = diag.cwiseProduct(x); > fvec = wa4; > xnorm = wa2.stableNorm(); > fnorm = fnorm1; > ++iter; > } > > /* tests for convergence. */ >- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) >+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) > return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; >- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) >+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) > return LevenbergMarquardtSpace::RelativeReductionTooSmall; > if (delta <= parameters.xtol * xnorm) > return LevenbergMarquardtSpace::RelativeErrorTooSmall; > > /* tests for termination and stringent tolerances. */ > if (nfev >= parameters.maxfev) > return LevenbergMarquardtSpace::TooManyFunctionEvaluation; >- if (internal::abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.) >+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.) > return LevenbergMarquardtSpace::FtolTooSmall; > if (delta <= NumTraits<Scalar>::epsilon() * xnorm) > return LevenbergMarquardtSpace::XtolTooSmall; > if (gnorm <= NumTraits<Scalar>::epsilon()) > return LevenbergMarquardtSpace::GtolTooSmall; > > } while (ratio < Scalar(1e-4)); > >diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h >--- a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h >+++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h >@@ -58,21 +58,23 @@ public: > ValuesAtCompileTime = Functor::ValuesAtCompileTime > }; > > /** > * return the number of evaluation of functor > */ > int df(const InputType& _x, JacobianType &jac) const > { >+ using std::sqrt; >+ using std::abs; > /* Local variables */ > Scalar h; > int nfev=0; > const typename InputType::Index n = _x.size(); >- const Scalar eps = internal::sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() ))); >+ const Scalar eps = sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() ))); > ValueType val1, val2; > InputType x = _x; > // TODO : we should do this only if the size is not already known > val1.resize(Functor::values()); > val2.resize(Functor::values()); > > // initialization > switch(mode) { >@@ -84,17 +86,17 @@ public: > // do nothing > break; > default: > assert(false); > }; > > // Function Body > for (int j = 0; j < n; ++j) { >- h = eps * internal::abs(x[j]); >+ h = eps * abs(x[j]); > if (h == 0.) { > h = eps; > } > switch(mode) { > case Forward: > x[j] += h; > Functor::operator()(x, val2); > nfev++; >diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h >--- a/unsupported/Eigen/src/Polynomials/Companion.h >+++ b/unsupported/Eigen/src/Polynomials/Companion.h >@@ -205,16 +205,17 @@ bool companion<_Scalar,_Deg>::balancedR( > return true; } > } > } > > > template< typename _Scalar, int _Deg > > void companion<_Scalar,_Deg>::balance() > { >+ using std::abs; > EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE ); > const Index deg = m_monic.size(); > const Index deg_1 = deg-1; > > bool hasConverged=false; > while( !hasConverged ) > { > hasConverged = true; >diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h >--- a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h >+++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h >@@ -64,20 +64,21 @@ class PolynomialSolverBase > * \param[out] bi_seq : the back insertion sequence (stl concept) > * \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex > * number that is considered as real. > * */ > template<typename Stl_back_insertion_sequence> > inline void realRoots( Stl_back_insertion_sequence& bi_seq, > const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const > { >+ using std::abs; > bi_seq.clear(); > for(Index i=0; i<m_roots.size(); ++i ) > { >- if( internal::abs( m_roots[i].imag() ) < absImaginaryThreshold ){ >+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold ){ > bi_seq.push_back( m_roots[i].real() ); } > } > } > > protected: > template<typename squaredNormBinaryPredicate> > inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const > { >@@ -113,23 +114,24 @@ class PolynomialSolverBase > > protected: > template<typename squaredRealPartBinaryPredicate> > inline const RealScalar& selectRealRoot_withRespectToAbsRealPart( > squaredRealPartBinaryPredicate& pred, > bool& hasArealRoot, > const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const > { >+ using std::abs; > hasArealRoot = false; > Index res=0; > RealScalar abs2(0); > > for( Index i=0; i<m_roots.size(); ++i ) > { >- if( internal::abs( m_roots[i].imag() ) < absImaginaryThreshold ) >+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold ) > { > if( !hasArealRoot ) > { > hasArealRoot = true; > res = i; > abs2 = m_roots[i].real() * m_roots[i].real(); > } > else >@@ -139,37 +141,38 @@ class PolynomialSolverBase > { > abs2 = currAbs2; > res = i; > } > } > } > else > { >- if( internal::abs( m_roots[i].imag() ) < internal::abs( m_roots[res].imag() ) ){ >+ if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){ > res = i; } > } > } > return internal::real_ref(m_roots[res]); > } > > > template<typename RealPartBinaryPredicate> > inline const RealScalar& selectRealRoot_withRespectToRealPart( > RealPartBinaryPredicate& pred, > bool& hasArealRoot, > const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const > { >+ using std::abs; > hasArealRoot = false; > Index res=0; > RealScalar val(0); > > for( Index i=0; i<m_roots.size(); ++i ) > { >- if( internal::abs( m_roots[i].imag() ) < absImaginaryThreshold ) >+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold ) > { > if( !hasArealRoot ) > { > hasArealRoot = true; > res = i; > val = m_roots[i].real(); > } > else >@@ -179,17 +182,17 @@ class PolynomialSolverBase > { > val = curr; > res = i; > } > } > } > else > { >- if( internal::abs( m_roots[i].imag() ) < internal::abs( m_roots[res].imag() ) ){ >+ if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){ > res = i; } > } > } > return internal::real_ref(m_roots[res]); > } > > public: > /** >diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h >--- a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h >+++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h >@@ -69,50 +69,52 @@ T poly_eval( const Polynomials& poly, co > * > * <i><b>Precondition:</b></i> > * <dd> the leading coefficient of the input polynomial poly must be non zero </dd> > */ > template <typename Polynomial> > inline > typename NumTraits<typename Polynomial::Scalar>::Real cauchy_max_bound( const Polynomial& poly ) > { >+ using std::abs; > typedef typename Polynomial::Scalar Scalar; > typedef typename NumTraits<Scalar>::Real Real; > > assert( Scalar(0) != poly[poly.size()-1] ); > const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1]; > Real cb(0); > > for( DenseIndex i=0; i<poly.size()-1; ++i ){ >- cb += internal::abs(poly[i]*inv_leading_coeff); } >+ cb += abs(poly[i]*inv_leading_coeff); } > return cb + Real(1); > } > > /** \ingroup Polynomials_Module > * \returns a minimum bound for the absolute value of any non zero root of the polynomial. > * \param[in] poly : the vector of coefficients of the polynomial ordered > * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial > * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. > */ > template <typename Polynomial> > inline > typename NumTraits<typename Polynomial::Scalar>::Real cauchy_min_bound( const Polynomial& poly ) > { >+ using std::abs; > typedef typename Polynomial::Scalar Scalar; > typedef typename NumTraits<Scalar>::Real Real; > > DenseIndex i=0; > while( i<poly.size()-1 && Scalar(0) == poly(i) ){ ++i; } > if( poly.size()-1 == i ){ > return Real(1); } > > const Scalar inv_min_coeff = Scalar(1)/poly[i]; > Real cb(1); > for( DenseIndex j=i+1; j<poly.size(); ++j ){ >- cb += internal::abs(poly[j]*inv_min_coeff); } >+ cb += abs(poly[j]*inv_min_coeff); } > return Real(1)/cb; > } > > /** \ingroup Polynomials_Module > * Given the roots of a polynomial compute the coefficients in the > * monomial basis of the monic polynomial with same roots and minimal degree. > * If RootVector is a vector of complexes, Polynomial should also be a vector > * of complexes. >diff --git a/unsupported/test/mpreal_support.cpp b/unsupported/test/mpreal_support.cpp >--- a/unsupported/test/mpreal_support.cpp >+++ b/unsupported/test/mpreal_support.cpp >@@ -1,16 +1,15 @@ > #include "main.h" > #include <Eigen/MPRealSupport> > #include <Eigen/LU> > #include <Eigen/Eigenvalues> > #include <sstream> > > using namespace mpfr; >-using namespace std; > using namespace Eigen; > > void test_mpreal_support() > { > // set precision to 256 bits (double has only 53 bits) > mpreal::set_default_prec(256); > typedef Matrix<mpreal,Eigen::Dynamic,Eigen::Dynamic> MatrixXmp; > >diff --git a/unsupported/test/polynomialsolver.cpp b/unsupported/test/polynomialsolver.cpp >--- a/unsupported/test/polynomialsolver.cpp >+++ b/unsupported/test/polynomialsolver.cpp >@@ -87,16 +87,17 @@ void evalSolver( const POLYNOMIAL& pols > } > > > > > template< int Deg, typename POLYNOMIAL, typename ROOTS, typename REAL_ROOTS > > void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const REAL_ROOTS& real_roots ) > { >+ using std::sqrt; > typedef typename POLYNOMIAL::Scalar Scalar; > > typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType; > > PolynomialSolverType psolve; > if( aux_evalSolver<Deg, POLYNOMIAL, PolynomialSolverType>( pols, psolve ) ) > { > //It is supposed that >@@ -110,49 +111,49 @@ void evalSolverSugarFunction( const POLY > typedef typename PolynomialSolverType::RootsType RootsType; > typedef Matrix<Scalar,Deg,1> EvalRootsType; > > //Test realRoots > std::vector< Real > calc_realRoots; > psolve.realRoots( calc_realRoots ); > VERIFY( calc_realRoots.size() == (size_t)real_roots.size() ); > >- const Scalar psPrec = internal::sqrt( test_precision<Scalar>() ); >+ const Scalar psPrec = sqrt( test_precision<Scalar>() ); > > for( size_t i=0; i<calc_realRoots.size(); ++i ) > { > bool found = false; > for( size_t j=0; j<calc_realRoots.size()&& !found; ++j ) > { > if( internal::isApprox( calc_realRoots[i], real_roots[j] ), psPrec ){ > found = true; } > } > VERIFY( found ); > } > > //Test greatestRoot > VERIFY( internal::isApprox( roots.array().abs().maxCoeff(), >- internal::abs( psolve.greatestRoot() ), psPrec ) ); >+ abs( psolve.greatestRoot() ), psPrec ) ); > > //Test smallestRoot > VERIFY( internal::isApprox( roots.array().abs().minCoeff(), >- internal::abs( psolve.smallestRoot() ), psPrec ) ); >+ abs( psolve.smallestRoot() ), psPrec ) ); > > bool hasRealRoot; > //Test absGreatestRealRoot > Real r = psolve.absGreatestRealRoot( hasRealRoot ); > VERIFY( hasRealRoot == (real_roots.size() > 0 ) ); > if( hasRealRoot ){ >- VERIFY( internal::isApprox( real_roots.array().abs().maxCoeff(), internal::abs(r), psPrec ) ); } >+ VERIFY( internal::isApprox( real_roots.array().abs().maxCoeff(), abs(r), psPrec ) ); } > > //Test absSmallestRealRoot > r = psolve.absSmallestRealRoot( hasRealRoot ); > VERIFY( hasRealRoot == (real_roots.size() > 0 ) ); > if( hasRealRoot ){ >- VERIFY( internal::isApprox( real_roots.array().abs().minCoeff(), internal::abs( r ), psPrec ) ); } >+ VERIFY( internal::isApprox( real_roots.array().abs().minCoeff(), abs( r ), psPrec ) ); } > > //Test greatestRealRoot > r = psolve.greatestRealRoot( hasRealRoot ); > VERIFY( hasRealRoot == (real_roots.size() > 0 ) ); > if( hasRealRoot ){ > VERIFY( internal::isApprox( real_roots.array().maxCoeff(), r, psPrec ) ); } > > //Test smallestRealRoot
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 314
:
301
|
343