10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FFT_H
13 #include "./InternalHeaderCheck.h"
28 template <
bool NeedUprade>
struct MakeComplex {
31 T operator() (
const T& val)
const {
return val; }
34 template <>
struct MakeComplex<true> {
37 std::complex<T> operator() (
const T& val)
const {
return std::complex<T>(val, 0); }
40 template <>
struct MakeComplex<false> {
43 std::complex<T> operator() (
const std::complex<T>& val)
const {
return val; }
46 template <
int ResultType>
struct PartOf {
47 template <
typename T> T operator() (
const T& val)
const {
return val; }
50 template <>
struct PartOf<RealPart> {
51 template <
typename T> T operator() (
const std::complex<T>& val)
const {
return val.real(); }
54 template <>
struct PartOf<ImagPart> {
55 template <
typename T> T operator() (
const std::complex<T>& val)
const {
return val.imag(); }
59 template <
typename FFT,
typename XprType,
int FFTResultType,
int FFTDir>
60 struct traits<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir> > :
public traits<XprType> {
61 typedef traits<XprType> XprTraits;
62 typedef typename NumTraits<typename XprTraits::Scalar>::Real RealScalar;
63 typedef typename std::complex<RealScalar> ComplexScalar;
64 typedef typename XprTraits::Scalar InputScalar;
65 typedef std::conditional_t<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar> OutputScalar;
66 typedef typename XprTraits::StorageKind StorageKind;
67 typedef typename XprTraits::Index
Index;
68 typedef typename XprType::Nested Nested;
69 typedef std::remove_reference_t<Nested> Nested_;
70 static constexpr
int NumDimensions = XprTraits::NumDimensions;
71 static constexpr
int Layout = XprTraits::Layout;
72 typedef typename traits<XprType>::PointerType PointerType;
75 template <
typename FFT,
typename XprType,
int FFTResultType,
int FFTDirection>
76 struct eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>,
Eigen::Dense> {
77 typedef const TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>& type;
80 template <
typename FFT,
typename XprType,
int FFTResultType,
int FFTDirection>
81 struct nested<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, 1, typename eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> >::type> {
82 typedef TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> type;
87 template <
typename FFT,
typename XprType,
int FFTResultType,
int FFTDir>
88 class TensorFFTOp :
public TensorBase<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir>, ReadOnlyAccessors> {
90 typedef typename Eigen::internal::traits<TensorFFTOp>::Scalar Scalar;
92 typedef typename std::complex<RealScalar> ComplexScalar;
93 typedef std::conditional_t<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar> OutputScalar;
94 typedef OutputScalar CoeffReturnType;
95 typedef typename Eigen::internal::nested<TensorFFTOp>::type Nested;
96 typedef typename Eigen::internal::traits<TensorFFTOp>::StorageKind StorageKind;
97 typedef typename Eigen::internal::traits<TensorFFTOp>::Index
Index;
99 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(
const XprType& expr,
const FFT& fft)
100 : m_xpr(expr), m_fft(fft) {}
103 const FFT& fft()
const {
return m_fft; }
106 const internal::remove_all_t<typename XprType::Nested>& expression()
const {
111 typename XprType::Nested m_xpr;
116 template <
typename FFT,
typename ArgType,
typename Device,
int FFTResultType,
int FFTDir>
117 struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, Device> {
118 typedef TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir> XprType;
119 typedef typename XprType::Index Index;
120 static constexpr
int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
121 typedef DSizes<Index, NumDims> Dimensions;
122 typedef typename XprType::Scalar Scalar;
124 typedef typename std::complex<RealScalar> ComplexScalar;
125 typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
126 typedef internal::traits<XprType> XprTraits;
127 typedef typename XprTraits::Scalar InputScalar;
128 typedef std::conditional_t<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar> OutputScalar;
129 typedef OutputScalar CoeffReturnType;
130 typedef typename PacketType<OutputScalar, Device>::type PacketReturnType;
131 static constexpr
int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
132 typedef StorageMemory<CoeffReturnType, Device> Storage;
133 typedef typename Storage::Type EvaluatorPointerType;
135 static constexpr
int Layout = TensorEvaluator<ArgType, Device>::Layout;
140 PreferBlockAccess =
false,
146 typedef internal::TensorBlockNotImplemented TensorBlock;
149 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) {
150 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
151 for (
int i = 0; i < NumDims; ++i) {
152 eigen_assert(input_dims[i] > 0);
153 m_dimensions[i] = input_dims[i];
156 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
158 for (
int i = 1; i < NumDims; ++i) {
159 m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
162 m_strides[NumDims - 1] = 1;
163 for (
int i = NumDims - 2; i >= 0; --i) {
164 m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
167 m_size = m_dimensions.TotalSize();
170 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
174 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
175 m_impl.evalSubExprsIfNeeded(NULL);
180 m_data = (EvaluatorPointerType)m_device.get((CoeffReturnType*)(m_device.allocate_temp(
sizeof(CoeffReturnType) * m_size)));
186 EIGEN_STRONG_INLINE
void cleanup() {
188 m_device.deallocate(m_data);
194 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index)
const {
195 return m_data[index];
198 template <
int LoadMode>
199 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType
200 packet(Index index)
const {
201 return internal::ploadt<PacketReturnType, LoadMode>(m_data + index);
204 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
205 costPerCoeff(
bool vectorized)
const {
206 return TensorOpCost(
sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
209 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return m_data; }
210 #ifdef EIGEN_USE_SYCL
212 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
218 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalToBuf(EvaluatorPointerType data) {
219 const bool write_to_out = internal::is_same<OutputScalar, ComplexScalar>::value;
220 ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(
sizeof(ComplexScalar) * m_size);
222 for (Index i = 0; i < m_size; ++i) {
223 buf[i] = MakeComplex<internal::is_same<InputScalar, RealScalar>::value>()(m_impl.coeff(i));
226 for (
size_t i = 0; i < m_fft.size(); ++i) {
227 Index dim = m_fft[i];
228 eigen_assert(dim >= 0 && dim < NumDims);
229 Index line_len = m_dimensions[dim];
230 eigen_assert(line_len >= 1);
231 ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(
sizeof(ComplexScalar) * line_len);
232 const bool is_power_of_two = isPowerOfTwo(line_len);
233 const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len);
234 const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite);
236 ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(
sizeof(ComplexScalar) * good_composite);
237 ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(
sizeof(ComplexScalar) * good_composite);
238 ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(
sizeof(ComplexScalar) * (line_len + 1));
239 if (!is_power_of_two) {
266 for (
int j = 0; j < line_len + 1; ++j) {
267 double arg = ((EIGEN_PI * j) * j) / line_len;
268 std::complex<double> tmp(numext::cos(
arg), numext::sin(
arg));
269 pos_j_base_powered[j] =
static_cast<ComplexScalar
>(tmp);
273 for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) {
274 const Index base_offset = getBaseOffsetFromIndex(partial_index, dim);
277 const Index stride = m_strides[dim];
279 m_device.memcpy(line_buf, &buf[base_offset], line_len*
sizeof(ComplexScalar));
281 Index offset = base_offset;
282 for (
int j = 0; j < line_len; ++j, offset += stride) {
283 line_buf[j] = buf[offset];
288 if (is_power_of_two) {
289 processDataLineCooleyTukey(line_buf, line_len, log_len);
292 processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered);
296 if (FFTDir == FFT_FORWARD && stride == 1) {
297 m_device.memcpy(&buf[base_offset], line_buf, line_len*
sizeof(ComplexScalar));
299 Index offset = base_offset;
300 const ComplexScalar div_factor = ComplexScalar(1.0 / line_len, 0);
301 for (
int j = 0; j < line_len; ++j, offset += stride) {
302 buf[offset] = (FFTDir == FFT_FORWARD) ? line_buf[j] : line_buf[j] * div_factor;
306 m_device.deallocate(line_buf);
307 if (!is_power_of_two) {
308 m_device.deallocate(a);
309 m_device.deallocate(b);
310 m_device.deallocate(pos_j_base_powered);
315 for (Index i = 0; i < m_size; ++i) {
316 data[i] = PartOf<FFTResultType>()(buf[i]);
318 m_device.deallocate(buf);
322 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static bool isPowerOfTwo(Index x) {
324 return !(x & (x - 1));
328 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static Index findGoodComposite(Index n) {
330 while (i < 2 * n - 1) i *= 2;
334 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static Index getLog2(Index m) {
336 while (m >>= 1) log2m++;
341 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) {
342 eigen_assert(isPowerOfTwo(line_len));
343 scramble_FFT(line_buf, line_len);
344 compute_1D_Butterfly<FFTDir>(line_buf, line_len, log_len);
348 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b,
const ComplexScalar* pos_j_base_powered) {
350 Index m = good_composite;
351 ComplexScalar* data = line_buf;
353 for (Index i = 0; i < n; ++i) {
354 if(FFTDir == FFT_FORWARD) {
355 a[i] = data[i] * numext::conj(pos_j_base_powered[i]);
358 a[i] = data[i] * pos_j_base_powered[i];
361 for (Index i = n; i < m; ++i) {
362 a[i] = ComplexScalar(0, 0);
365 for (Index i = 0; i < n; ++i) {
366 if(FFTDir == FFT_FORWARD) {
367 b[i] = pos_j_base_powered[i];
370 b[i] = numext::conj(pos_j_base_powered[i]);
373 for (Index i = n; i < m - n; ++i) {
374 b[i] = ComplexScalar(0, 0);
376 for (Index i = m - n; i < m; ++i) {
377 if(FFTDir == FFT_FORWARD) {
378 b[i] = pos_j_base_powered[m-i];
381 b[i] = numext::conj(pos_j_base_powered[m-i]);
386 compute_1D_Butterfly<FFT_FORWARD>(a, m, log_len);
389 compute_1D_Butterfly<FFT_FORWARD>(b, m, log_len);
391 for (Index i = 0; i < m; ++i) {
396 compute_1D_Butterfly<FFT_REVERSE>(a, m, log_len);
399 for (Index i = 0; i < m; ++i) {
403 for (Index i = 0; i < n; ++i) {
404 if(FFTDir == FFT_FORWARD) {
405 data[i] = a[i] * numext::conj(pos_j_base_powered[i]);
408 data[i] = a[i] * pos_j_base_powered[i];
413 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static void scramble_FFT(ComplexScalar* data, Index n) {
414 eigen_assert(isPowerOfTwo(n));
416 for (Index i = 1; i < n; ++i){
418 std::swap(data[j-1], data[i-1]);
421 while (m >= 2 && j > m) {
430 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void butterfly_2(ComplexScalar* data) {
431 ComplexScalar tmp = data[1];
432 data[1] = data[0] - data[1];
437 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void butterfly_4(ComplexScalar* data) {
438 ComplexScalar tmp[4];
439 tmp[0] = data[0] + data[1];
440 tmp[1] = data[0] - data[1];
441 tmp[2] = data[2] + data[3];
442 if (Dir == FFT_FORWARD) {
443 tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]);
445 tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]);
447 data[0] = tmp[0] + tmp[2];
448 data[1] = tmp[1] + tmp[3];
449 data[2] = tmp[0] - tmp[2];
450 data[3] = tmp[1] - tmp[3];
454 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void butterfly_8(ComplexScalar* data) {
455 ComplexScalar tmp_1[8];
456 ComplexScalar tmp_2[8];
458 tmp_1[0] = data[0] + data[1];
459 tmp_1[1] = data[0] - data[1];
460 tmp_1[2] = data[2] + data[3];
461 if (Dir == FFT_FORWARD) {
462 tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1);
464 tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1);
466 tmp_1[4] = data[4] + data[5];
467 tmp_1[5] = data[4] - data[5];
468 tmp_1[6] = data[6] + data[7];
469 if (Dir == FFT_FORWARD) {
470 tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1);
472 tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1);
474 tmp_2[0] = tmp_1[0] + tmp_1[2];
475 tmp_2[1] = tmp_1[1] + tmp_1[3];
476 tmp_2[2] = tmp_1[0] - tmp_1[2];
477 tmp_2[3] = tmp_1[1] - tmp_1[3];
478 tmp_2[4] = tmp_1[4] + tmp_1[6];
480 #define SQRT2DIV2 0.7071067811865476
481 if (Dir == FFT_FORWARD) {
482 tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2);
483 tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1);
484 tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2);
486 tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2);
487 tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1);
488 tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2);
490 data[0] = tmp_2[0] + tmp_2[4];
491 data[1] = tmp_2[1] + tmp_2[5];
492 data[2] = tmp_2[2] + tmp_2[6];
493 data[3] = tmp_2[3] + tmp_2[7];
494 data[4] = tmp_2[0] - tmp_2[4];
495 data[5] = tmp_2[1] - tmp_2[5];
496 data[6] = tmp_2[2] - tmp_2[6];
497 data[7] = tmp_2[3] - tmp_2[7];
501 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void butterfly_1D_merge(
502 ComplexScalar* data, Index n, Index n_power_of_2) {
506 const RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2];
507 const RealScalar wpi = (Dir == FFT_FORWARD)
508 ? m_minus_sin_2_PI_div_n_LUT[n_power_of_2]
509 : -m_minus_sin_2_PI_div_n_LUT[n_power_of_2];
511 const ComplexScalar wp(wtemp, wpi);
512 const ComplexScalar wp_one = wp + ComplexScalar(1, 0);
513 const ComplexScalar wp_one_2 = wp_one * wp_one;
514 const ComplexScalar wp_one_3 = wp_one_2 * wp_one;
515 const ComplexScalar wp_one_4 = wp_one_3 * wp_one;
516 const Index n2 = n / 2;
517 ComplexScalar w(1.0, 0.0);
518 for (Index i = 0; i < n2; i += 4) {
519 ComplexScalar temp0(data[i + n2] * w);
520 ComplexScalar temp1(data[i + 1 + n2] * w * wp_one);
521 ComplexScalar temp2(data[i + 2 + n2] * w * wp_one_2);
522 ComplexScalar temp3(data[i + 3 + n2] * w * wp_one_3);
525 data[i + n2] = data[i] - temp0;
528 data[i + 1 + n2] = data[i + 1] - temp1;
529 data[i + 1] += temp1;
531 data[i + 2 + n2] = data[i + 2] - temp2;
532 data[i + 2] += temp2;
534 data[i + 3 + n2] = data[i + 3] - temp3;
535 data[i + 3] += temp3;
540 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void compute_1D_Butterfly(
541 ComplexScalar* data, Index n, Index n_power_of_2) {
542 eigen_assert(isPowerOfTwo(n));
544 compute_1D_Butterfly<Dir>(data, n / 2, n_power_of_2 - 1);
545 compute_1D_Butterfly<Dir>(data + n / 2, n / 2, n_power_of_2 - 1);
546 butterfly_1D_merge<Dir>(data, n, n_power_of_2);
548 butterfly_8<Dir>(data);
550 butterfly_4<Dir>(data);
552 butterfly_2<Dir>(data);
556 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index getBaseOffsetFromIndex(Index index, Index omitted_dim)
const {
559 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
560 for (
int i = NumDims - 1; i > omitted_dim; --i) {
561 const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
562 const Index idx = index / partial_m_stride;
563 index -= idx * partial_m_stride;
564 result += idx * m_strides[i];
569 for (Index i = 0; i < omitted_dim; ++i) {
570 const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
571 const Index idx = index / partial_m_stride;
572 index -= idx * partial_m_stride;
573 result += idx * m_strides[i];
581 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index getIndexFromOffset(Index base, Index omitted_dim, Index offset)
const {
582 Index result = base + offset * m_strides[omitted_dim] ;
588 const FFT EIGEN_DEVICE_REF m_fft;
589 Dimensions m_dimensions;
590 array<Index, NumDims> m_strides;
591 TensorEvaluator<ArgType, Device> m_impl;
592 EvaluatorPointerType m_data;
593 const Device EIGEN_DEVICE_REF m_device;
597 const RealScalar m_sin_PI_div_n_LUT[32] = {
600 RealScalar(-0.999999999999999),
601 RealScalar(-0.292893218813453),
602 RealScalar(-0.0761204674887130),
603 RealScalar(-0.0192147195967696),
604 RealScalar(-0.00481527332780311),
605 RealScalar(-0.00120454379482761),
606 RealScalar(-3.01181303795779e-04),
607 RealScalar(-7.52981608554592e-05),
608 RealScalar(-1.88247173988574e-05),
609 RealScalar(-4.70619042382852e-06),
610 RealScalar(-1.17654829809007e-06),
611 RealScalar(-2.94137117780840e-07),
612 RealScalar(-7.35342821488550e-08),
613 RealScalar(-1.83835707061916e-08),
614 RealScalar(-4.59589268710903e-09),
615 RealScalar(-1.14897317243732e-09),
616 RealScalar(-2.87243293150586e-10),
617 RealScalar( -7.18108232902250e-11),
618 RealScalar(-1.79527058227174e-11),
619 RealScalar(-4.48817645568941e-12),
620 RealScalar(-1.12204411392298e-12),
621 RealScalar(-2.80511028480785e-13),
622 RealScalar(-7.01277571201985e-14),
623 RealScalar(-1.75319392800498e-14),
624 RealScalar(-4.38298482001247e-15),
625 RealScalar(-1.09574620500312e-15),
626 RealScalar(-2.73936551250781e-16),
627 RealScalar(-6.84841378126949e-17),
628 RealScalar(-1.71210344531737e-17),
629 RealScalar(-4.28025861329343e-18)
633 const RealScalar m_minus_sin_2_PI_div_n_LUT[32] = {
636 RealScalar(-1.00000000000000e+00),
637 RealScalar(-7.07106781186547e-01),
638 RealScalar(-3.82683432365090e-01),
639 RealScalar(-1.95090322016128e-01),
640 RealScalar(-9.80171403295606e-02),
641 RealScalar(-4.90676743274180e-02),
642 RealScalar(-2.45412285229123e-02),
643 RealScalar(-1.22715382857199e-02),
644 RealScalar(-6.13588464915448e-03),
645 RealScalar(-3.06795676296598e-03),
646 RealScalar(-1.53398018628477e-03),
647 RealScalar(-7.66990318742704e-04),
648 RealScalar(-3.83495187571396e-04),
649 RealScalar(-1.91747597310703e-04),
650 RealScalar(-9.58737990959773e-05),
651 RealScalar(-4.79368996030669e-05),
652 RealScalar(-2.39684498084182e-05),
653 RealScalar(-1.19842249050697e-05),
654 RealScalar(-5.99211245264243e-06),
655 RealScalar(-2.99605622633466e-06),
656 RealScalar(-1.49802811316901e-06),
657 RealScalar(-7.49014056584716e-07),
658 RealScalar(-3.74507028292384e-07),
659 RealScalar(-1.87253514146195e-07),
660 RealScalar(-9.36267570730981e-08),
661 RealScalar(-4.68133785365491e-08),
662 RealScalar(-2.34066892682746e-08),
663 RealScalar(-1.17033446341373e-08),
664 RealScalar(-5.85167231706864e-09),
665 RealScalar(-2.92583615853432e-09)
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_arg_op< typename Derived::Scalar >, const Derived > arg(const Eigen::ArrayBase< Derived > &x)