Eigen-unsupported  3.4.90 (git rev a4098ac676528a83cfb73d4d26ce1b42ec05f47c)
TensorGenerator.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
11#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
12
13#include "./InternalHeaderCheck.h"
14
15namespace Eigen {
16
24namespace internal {
25template<typename Generator, typename XprType>
26struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType>
27{
28 typedef typename XprType::Scalar Scalar;
29 typedef traits<XprType> XprTraits;
30 typedef typename XprTraits::StorageKind StorageKind;
31 typedef typename XprTraits::Index Index;
32 typedef typename XprType::Nested Nested;
33 typedef typename remove_reference<Nested>::type _Nested;
34 static const int NumDimensions = XprTraits::NumDimensions;
35 static const int Layout = XprTraits::Layout;
36 typedef typename XprTraits::PointerType PointerType;
37};
38
39template<typename Generator, typename XprType>
40struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense>
41{
42 typedef const TensorGeneratorOp<Generator, XprType>& type;
43};
44
45template<typename Generator, typename XprType>
46struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
47{
48 typedef TensorGeneratorOp<Generator, XprType> type;
49};
50
51} // end namespace internal
52
53
54
55template<typename Generator, typename XprType>
56class TensorGeneratorOp : public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors>
57{
58 public:
59 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
60 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
61 typedef typename XprType::CoeffReturnType CoeffReturnType;
62 typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
63 typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
64 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
65
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator)
67 : m_xpr(expr), m_generator(generator) {}
68
69 EIGEN_DEVICE_FUNC
70 const Generator& generator() const { return m_generator; }
71
72 EIGEN_DEVICE_FUNC
73 const typename internal::remove_all<typename XprType::Nested>::type&
74 expression() const { return m_xpr; }
75
76 protected:
77 typename XprType::Nested m_xpr;
78 const Generator m_generator;
79};
80
81
82// Eval as rvalue
83template<typename Generator, typename ArgType, typename Device>
84struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
85{
87 typedef typename XprType::Index Index;
88 typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
89 static const int NumDims = internal::array_size<Dimensions>::value;
90 typedef typename XprType::Scalar Scalar;
91 typedef typename XprType::CoeffReturnType CoeffReturnType;
92 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
93 typedef StorageMemory<CoeffReturnType, Device> Storage;
94 typedef typename Storage::Type EvaluatorPointerType;
95 enum {
96 IsAligned = false,
97 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
98 BlockAccess = true,
99 PreferBlockAccess = true,
101 CoordAccess = false, // to be implemented
102 RawAccess = false
103 };
104
105 typedef internal::TensorIntDivisor<Index> IndexDivisor;
106
107 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
108 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
109 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
110
111 typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
112 Layout, Index>
113 TensorBlock;
114 //===--------------------------------------------------------------------===//
115
116 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
117 : m_device(device), m_generator(op.generator())
118 {
119 TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
120 m_dimensions = argImpl.dimensions();
121
122 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
123 m_strides[0] = 1;
124 EIGEN_UNROLL_LOOP
125 for (int i = 1; i < NumDims; ++i) {
126 m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
127 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
128 }
129 } else {
130 m_strides[NumDims - 1] = 1;
131 EIGEN_UNROLL_LOOP
132 for (int i = NumDims - 2; i >= 0; --i) {
133 m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
134 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
135 }
136 }
137 }
138
139 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
140
141 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
142 return true;
143 }
144 EIGEN_STRONG_INLINE void cleanup() {
145 }
146
147 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
148 {
149 array<Index, NumDims> coords;
150 extract_coordinates(index, coords);
151 return m_generator(coords);
152 }
153
154 template<int LoadMode>
155 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
156 {
157 const int packetSize = PacketType<CoeffReturnType, Device>::size;
158 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
159 eigen_assert(index+packetSize-1 < dimensions().TotalSize());
160
161 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
162 for (int i = 0; i < packetSize; ++i) {
163 values[i] = coeff(index+i);
164 }
165 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
166 return rslt;
167 }
168
169 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
170 internal::TensorBlockResourceRequirements getResourceRequirements() const {
171 const size_t target_size = m_device.firstLevelCacheSize();
172 // TODO(ezhulenev): Generator should have a cost.
173 return internal::TensorBlockResourceRequirements::skewed<Scalar>(
174 target_size);
175 }
176
177 struct BlockIteratorState {
178 Index stride;
179 Index span;
180 Index size;
181 Index count;
182 };
183
184 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
185 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
186 bool /*root_of_expr_ast*/ = false) const {
187 static const bool is_col_major =
188 static_cast<int>(Layout) == static_cast<int>(ColMajor);
189
190 // Compute spatial coordinates for the first block element.
191 array<Index, NumDims> coords;
192 extract_coordinates(desc.offset(), coords);
193 array<Index, NumDims> initial_coords = coords;
194
195 // Offset in the output block buffer.
196 Index offset = 0;
197
198 // Initialize output block iterator state. Dimension in this array are
199 // always in inner_most -> outer_most order (col major layout).
200 array<BlockIteratorState, NumDims> it;
201 for (int i = 0; i < NumDims; ++i) {
202 const int dim = is_col_major ? i : NumDims - 1 - i;
203 it[i].size = desc.dimension(dim);
204 it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride);
205 it[i].span = it[i].stride * (it[i].size - 1);
206 it[i].count = 0;
207 }
208 eigen_assert(it[0].stride == 1);
209
210 // Prepare storage for the materialized generator result.
211 const typename TensorBlock::Storage block_storage =
212 TensorBlock::prepareStorage(desc, scratch);
213
214 CoeffReturnType* block_buffer = block_storage.data();
215
216 static const int packet_size = PacketType<CoeffReturnType, Device>::size;
217
218 static const int inner_dim = is_col_major ? 0 : NumDims - 1;
219 const Index inner_dim_size = it[0].size;
220 const Index inner_dim_vectorized = inner_dim_size - packet_size;
221
222 while (it[NumDims - 1].count < it[NumDims - 1].size) {
223 Index i = 0;
224 // Generate data for the vectorized part of the inner-most dimension.
225 for (; i <= inner_dim_vectorized; i += packet_size) {
226 for (Index j = 0; j < packet_size; ++j) {
227 array<Index, NumDims> j_coords = coords; // Break loop dependence.
228 j_coords[inner_dim] += j;
229 *(block_buffer + offset + i + j) = m_generator(j_coords);
230 }
231 coords[inner_dim] += packet_size;
232 }
233 // Finalize non-vectorized part of the inner-most dimension.
234 for (; i < inner_dim_size; ++i) {
235 *(block_buffer + offset + i) = m_generator(coords);
236 coords[inner_dim]++;
237 }
238 coords[inner_dim] = initial_coords[inner_dim];
239
240 // For the 1d tensor we need to generate only one inner-most dimension.
241 if (NumDims == 1) break;
242
243 // Update offset.
244 for (i = 1; i < NumDims; ++i) {
245 if (++it[i].count < it[i].size) {
246 offset += it[i].stride;
247 coords[is_col_major ? i : NumDims - 1 - i]++;
248 break;
249 }
250 if (i != NumDims - 1) it[i].count = 0;
251 coords[is_col_major ? i : NumDims - 1 - i] =
252 initial_coords[is_col_major ? i : NumDims - 1 - i];
253 offset -= it[i].span;
254 }
255 }
256
257 return block_storage.AsTensorMaterializedBlock();
258 }
259
260 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
261 costPerCoeff(bool) const {
262 // TODO(rmlarsen): This is just a placeholder. Define interface to make
263 // generators return their cost.
264 return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
265 TensorOpCost::MulCost<Scalar>());
266 }
267
268 EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
269
270#ifdef EIGEN_USE_SYCL
271 // binding placeholder accessors to a command group handler for SYCL
272 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler&) const {}
273#endif
274
275 protected:
276 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
277 void extract_coordinates(Index index, array<Index, NumDims>& coords) const {
278 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
279 for (int i = NumDims - 1; i > 0; --i) {
280 const Index idx = index / m_fast_strides[i];
281 index -= idx * m_strides[i];
282 coords[i] = idx;
283 }
284 coords[0] = index;
285 } else {
286 for (int i = 0; i < NumDims - 1; ++i) {
287 const Index idx = index / m_fast_strides[i];
288 index -= idx * m_strides[i];
289 coords[i] = idx;
290 }
291 coords[NumDims-1] = index;
292 }
293 }
294
295 const Device EIGEN_DEVICE_REF m_device;
296 Dimensions m_dimensions;
297 array<Index, NumDims> m_strides;
298 array<IndexDivisor, NumDims> m_fast_strides;
299 Generator m_generator;
300};
301
302} // end namespace Eigen
303
304#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
The tensor base class.
Definition: TensorForwardDeclarations.h:58
Tensor generator class.
Definition: TensorGenerator.h:57
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31