10#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
11#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
13#include "./InternalHeaderCheck.h"
19template <
typename Op,
typename XprType>
20struct traits<TensorScanOp<Op, XprType> >
21 :
public traits<XprType> {
22 typedef typename XprType::Scalar Scalar;
23 typedef traits<XprType> XprTraits;
24 typedef typename XprTraits::StorageKind StorageKind;
25 typedef typename XprType::Nested Nested;
26 typedef typename remove_reference<Nested>::type _Nested;
27 static const int NumDimensions = XprTraits::NumDimensions;
28 static const int Layout = XprTraits::Layout;
29 typedef typename XprTraits::PointerType PointerType;
32template<
typename Op,
typename XprType>
33struct eval<TensorScanOp<Op, XprType>,
Eigen::Dense>
35 typedef const TensorScanOp<Op, XprType>& type;
38template<
typename Op,
typename XprType>
39struct nested<TensorScanOp<Op, XprType>, 1,
40 typename eval<TensorScanOp<Op, XprType> >::type>
42 typedef TensorScanOp<Op, XprType> type;
51template <
typename Op,
typename XprType>
53 :
public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
55 typedef typename Eigen::internal::traits<TensorScanOp>::Scalar Scalar;
57 typedef typename XprType::CoeffReturnType CoeffReturnType;
58 typedef typename Eigen::internal::nested<TensorScanOp>::type Nested;
59 typedef typename Eigen::internal::traits<TensorScanOp>::StorageKind StorageKind;
60 typedef typename Eigen::internal::traits<TensorScanOp>::Index
Index;
62 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorScanOp(
63 const XprType& expr,
const Index& axis,
bool exclusive =
false,
const Op& op = Op())
64 : m_expr(expr), m_axis(axis), m_accumulator(op), m_exclusive(exclusive) {}
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
67 const Index axis()
const {
return m_axis; }
68 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
69 const XprType& expression()
const {
return m_expr; }
70 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
71 const Op accumulator()
const {
return m_accumulator; }
72 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
73 bool exclusive()
const {
return m_exclusive; }
76 typename XprType::Nested m_expr;
78 const Op m_accumulator;
79 const bool m_exclusive;
85template <
typename Self>
86EIGEN_STRONG_INLINE
void ReduceScalar(Self&
self,
Index offset,
87 typename Self::CoeffReturnType* data) {
89 typename Self::CoeffReturnType accum =
self.accumulator().initialize();
90 if (
self.stride() == 1) {
91 if (
self.exclusive()) {
92 for (
Index curr = offset; curr < offset +
self.size(); ++curr) {
93 data[curr] =
self.accumulator().finalize(accum);
94 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
97 for (
Index curr = offset; curr < offset +
self.size(); ++curr) {
98 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
99 data[curr] =
self.accumulator().finalize(accum);
103 if (
self.exclusive()) {
104 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
105 Index curr = offset + idx3 *
self.stride();
106 data[curr] =
self.accumulator().finalize(accum);
107 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
110 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
111 Index curr = offset + idx3 *
self.stride();
112 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
113 data[curr] =
self.accumulator().finalize(accum);
119template <
typename Self>
120EIGEN_STRONG_INLINE
void ReducePacket(Self&
self,
Index offset,
121 typename Self::CoeffReturnType* data) {
122 using Scalar =
typename Self::CoeffReturnType;
123 using Packet =
typename Self::PacketReturnType;
125 Packet accum =
self.accumulator().template initializePacket<Packet>();
126 if (
self.stride() == 1) {
127 if (
self.exclusive()) {
128 for (
Index curr = offset; curr < offset +
self.size(); ++curr) {
129 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
130 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
133 for (
Index curr = offset; curr < offset +
self.size(); ++curr) {
134 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
135 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
139 if (
self.exclusive()) {
140 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
141 const Index curr = offset + idx3 *
self.stride();
142 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
143 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
146 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
147 const Index curr = offset + idx3 *
self.stride();
148 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
149 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
155template <
typename Self,
bool Vectorize,
bool Parallel>
157 EIGEN_STRONG_INLINE
void operator()(Self&
self,
Index idx1,
158 typename Self::CoeffReturnType* data) {
159 for (
Index idx2 = 0; idx2 <
self.stride(); idx2++) {
161 Index offset = idx1 + idx2;
162 ReduceScalar(
self, offset, data);
168template <
typename Self>
169struct ReduceBlock<Self, true, false> {
170 EIGEN_STRONG_INLINE
void operator()(Self&
self,
Index idx1,
171 typename Self::CoeffReturnType* data) {
172 using Packet =
typename Self::PacketReturnType;
173 const int PacketSize = internal::unpacket_traits<Packet>::size;
175 for (; idx2 + PacketSize <=
self.stride(); idx2 += PacketSize) {
177 Index offset = idx1 + idx2;
178 ReducePacket(
self, offset, data);
180 for (; idx2 <
self.stride(); idx2++) {
182 Index offset = idx1 + idx2;
183 ReduceScalar(
self, offset, data);
189template <
typename Self,
typename Reducer,
typename Device,
191 (TensorEvaluator<typename Self::ChildTypeNoConst, Device>::PacketAccess &&
192 internal::reducer_traits<Reducer, Device>::PacketAccess)>
194 void operator()(Self&
self,
typename Self::CoeffReturnType* data) {
195 Index total_size = internal::array_prod(
self.dimensions());
201 for (
Index idx1 = 0; idx1 < total_size; idx1 +=
self.stride() *
self.size()) {
202 ReduceBlock<Self, Vectorize,
false> block_reducer;
203 block_reducer(
self, idx1, data);
208#ifdef EIGEN_USE_THREADS
213EIGEN_STRONG_INLINE
Index AdjustBlockSize(
Index item_size,
Index block_size) {
214 EIGEN_CONSTEXPR
Index kBlockAlignment = 128;
215 const Index items_per_cacheline =
216 numext::maxi<Index>(1, kBlockAlignment / item_size);
217 return items_per_cacheline * divup(block_size, items_per_cacheline);
220template <
typename Self>
221struct ReduceBlock<Self,
true,
true> {
222 EIGEN_STRONG_INLINE
void operator()(Self&
self,
Index idx1,
223 typename Self::CoeffReturnType* data) {
224 using Scalar =
typename Self::CoeffReturnType;
225 using Packet =
typename Self::PacketReturnType;
226 const int PacketSize = internal::unpacket_traits<Packet>::size;
227 Index num_scalars =
self.stride();
228 Index num_packets = 0;
229 if (
self.stride() >= PacketSize) {
230 num_packets =
self.stride() / PacketSize;
231 self.device().parallelFor(
233 TensorOpCost(PacketSize *
self.size(), PacketSize *
self.size(),
234 16 * PacketSize *
self.size(),
true, PacketSize),
237 [=](
Index blk_size) {
238 return AdjustBlockSize(PacketSize *
sizeof(Scalar), blk_size);
241 for (
Index packet = first; packet <
last; ++packet) {
242 const Index idx2 = packet * PacketSize;
243 ReducePacket(
self, idx1 + idx2, data);
246 num_scalars -= num_packets * PacketSize;
248 self.device().parallelFor(
249 num_scalars, TensorOpCost(
self.size(),
self.size(), 16 *
self.size()),
252 [=](
Index blk_size) {
253 return AdjustBlockSize(
sizeof(Scalar), blk_size);
256 for (
Index scalar = first; scalar <
last; ++scalar) {
257 const Index idx2 = num_packets * PacketSize + scalar;
258 ReduceScalar(
self, idx1 + idx2, data);
264template <
typename Self>
265struct ReduceBlock<Self, false, true> {
266 EIGEN_STRONG_INLINE
void operator()(Self&
self,
Index idx1,
267 typename Self::CoeffReturnType* data) {
268 using Scalar =
typename Self::CoeffReturnType;
269 self.device().parallelFor(
270 self.stride(), TensorOpCost(
self.size(),
self.size(), 16 *
self.size()),
273 [=](
Index blk_size) {
274 return AdjustBlockSize(
sizeof(Scalar), blk_size);
277 for (
Index idx2 = first; idx2 <
last; ++idx2) {
278 ReduceScalar(
self, idx1 + idx2, data);
285template <
typename Self,
typename Reducer,
bool Vectorize>
286struct ScanLauncher<Self, Reducer, ThreadPoolDevice, Vectorize> {
287 void operator()(Self&
self,
typename Self::CoeffReturnType* data) {
288 using Scalar =
typename Self::CoeffReturnType;
289 using Packet =
typename Self::PacketReturnType;
290 const int PacketSize = internal::unpacket_traits<Packet>::size;
291 const Index total_size = internal::array_prod(
self.dimensions());
292 const Index inner_block_size =
self.stride() *
self.size();
293 bool parallelize_by_outer_blocks = (total_size >= (
self.stride() * inner_block_size));
295 if ((parallelize_by_outer_blocks && total_size <= 4096) ||
296 (!parallelize_by_outer_blocks &&
self.stride() < PacketSize)) {
297 ScanLauncher<Self, Reducer, DefaultDevice, Vectorize> launcher;
298 launcher(
self, data);
302 if (parallelize_by_outer_blocks) {
304 const Index num_outer_blocks = total_size / inner_block_size;
305 self.device().parallelFor(
307 TensorOpCost(inner_block_size, inner_block_size,
308 16 * PacketSize * inner_block_size, Vectorize,
310 [=](
Index blk_size) {
311 return AdjustBlockSize(inner_block_size *
sizeof(Scalar), blk_size);
314 for (
Index idx1 = first; idx1 <
last; ++idx1) {
315 ReduceBlock<Self, Vectorize,
false> block_reducer;
316 block_reducer(
self, idx1 * inner_block_size, data);
322 ReduceBlock<Self, Vectorize,
true> block_reducer;
323 for (
Index idx1 = 0; idx1 < total_size;
324 idx1 +=
self.stride() *
self.size()) {
325 block_reducer(
self, idx1, data);
332#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
338template <
typename Self,
typename Reducer>
339__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024
void ScanKernel(Self
self,
Index total_size,
typename Self::CoeffReturnType* data) {
341 Index val = threadIdx.x + blockIdx.x * blockDim.x;
342 Index offset = (val /
self.stride()) *
self.stride() *
self.size() + val %
self.stride();
344 if (offset + (
self.size() - 1) *
self.stride() < total_size) {
346 typename Self::CoeffReturnType accum =
self.accumulator().initialize();
347 for (
Index idx = 0; idx <
self.size(); idx++) {
348 Index curr = offset + idx *
self.stride();
349 if (
self.exclusive()) {
350 data[curr] =
self.accumulator().finalize(accum);
351 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
353 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
354 data[curr] =
self.accumulator().finalize(accum);
362template <
typename Self,
typename Reducer,
bool Vectorize>
363struct ScanLauncher<Self, Reducer, GpuDevice, Vectorize> {
364 void operator()(
const Self&
self,
typename Self::CoeffReturnType* data) {
365 Index total_size = internal::array_prod(
self.dimensions());
366 Index num_blocks = (total_size /
self.size() + 63) / 64;
367 Index block_size = 64;
369 LAUNCH_GPU_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0,
self.device(),
self, total_size, data);
377template <
typename Op,
typename ArgType,
typename Device>
378struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
380 typedef TensorScanOp<Op, ArgType> XprType;
381 typedef typename XprType::Index
Index;
382 typedef const ArgType ChildTypeNoConst;
383 typedef const ArgType ChildType;
384 static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
385 typedef DSizes<Index, NumDims> Dimensions;
386 typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
387 typedef typename XprType::CoeffReturnType CoeffReturnType;
388 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
389 typedef TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> Self;
390 typedef StorageMemory<Scalar, Device> Storage;
391 typedef typename Storage::Type EvaluatorPointerType;
395 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
397 PreferBlockAccess =
false,
398 Layout = TensorEvaluator<ArgType, Device>::Layout,
404 typedef internal::TensorBlockNotImplemented TensorBlock;
407 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
408 : m_impl(op.expression(), device),
410 m_exclusive(op.exclusive()),
411 m_accumulator(op.accumulator()),
412 m_size(m_impl.dimensions()[op.axis()]),
413 m_stride(1), m_consume_dim(op.axis()),
417 EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
418 eigen_assert(op.axis() >= 0 && op.axis() < NumDims);
421 const Dimensions& dims = m_impl.dimensions();
422 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
423 for (
int i = 0; i < op.axis(); ++i) {
424 m_stride = m_stride * dims[i];
430 unsigned int axis = internal::convert_index<unsigned int>(op.axis());
431 for (
unsigned int i = NumDims - 1; i > axis; --i) {
432 m_stride = m_stride * dims[i];
437 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
438 return m_impl.dimensions();
441 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Index& stride()
const {
445 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Index& consume_dim()
const {
446 return m_consume_dim;
449 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Index& size()
const {
453 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Op& accumulator()
const {
454 return m_accumulator;
457 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool exclusive()
const {
461 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorEvaluator<ArgType, Device>& inner()
const {
465 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Device& device()
const {
469 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
470 m_impl.evalSubExprsIfNeeded(NULL);
471 internal::ScanLauncher<Self, Op, Device> launcher;
473 launcher(*
this, data);
477 const Index total_size = internal::array_prod(dimensions());
478 m_output =
static_cast<EvaluatorPointerType
>(m_device.get((Scalar*) m_device.allocate_temp(total_size *
sizeof(Scalar))));
479 launcher(*
this, m_output);
483 template<
int LoadMode>
484 EIGEN_DEVICE_FUNC PacketReturnType packet(Index index)
const {
485 return internal::ploadt<PacketReturnType, LoadMode>(m_output + index);
488 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data()
const
493 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
495 return m_output[index];
498 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool)
const {
499 return TensorOpCost(
sizeof(CoeffReturnType), 0, 0);
502 EIGEN_STRONG_INLINE
void cleanup() {
504 m_device.deallocate_temp(m_output);
512 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
518 TensorEvaluator<ArgType, Device> m_impl;
519 const Device EIGEN_DEVICE_REF m_device;
520 const bool m_exclusive;
525 EvaluatorPointerType m_output;
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index