10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
13 #include "./InternalHeaderCheck.h"
25 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
26 struct traits<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >
29 typedef typename promote_storage_type<
typename LhsXprType::Scalar,
30 typename RhsXprType::Scalar>::ret Scalar;
31 typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
32 typename traits<RhsXprType>::StorageKind>::ret StorageKind;
33 typedef typename promote_index_type<typename traits<LhsXprType>::Index,
34 typename traits<RhsXprType>::Index>::type
Index;
35 typedef typename LhsXprType::Nested LhsNested;
36 typedef typename RhsXprType::Nested RhsNested;
37 typedef std::remove_reference_t<LhsNested> LhsNested_;
38 typedef std::remove_reference_t<RhsNested> RhsNested_;
39 static constexpr
int NumDimensions = traits<LhsXprType>::NumDimensions;
40 static constexpr
int Layout = traits<LhsXprType>::Layout;
42 typedef std::conditional_t<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
43 typename traits<LhsXprType>::PointerType,
typename traits<RhsXprType>::PointerType> PointerType;
46 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
47 struct eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>,
Eigen::Dense>
49 typedef const TensorConcatenationOp<Axis, LhsXprType, RhsXprType>& type;
52 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
53 struct nested<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, 1, typename eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >::type>
55 typedef TensorConcatenationOp<Axis, LhsXprType, RhsXprType> type;
61 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
66 typedef typename internal::traits<TensorConcatenationOp>::Scalar Scalar;
67 typedef typename internal::traits<TensorConcatenationOp>::StorageKind StorageKind;
68 typedef typename internal::traits<TensorConcatenationOp>::Index Index;
69 typedef typename internal::nested<TensorConcatenationOp>::type Nested;
70 typedef typename internal::promote_storage_type<
typename LhsXprType::CoeffReturnType,
71 typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
74 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorConcatenationOp(
const LhsXprType& lhs,
const RhsXprType& rhs, Axis axis)
75 : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {}
78 const internal::remove_all_t<typename LhsXprType::Nested>&
79 lhsExpression()
const {
return m_lhs_xpr; }
82 const internal::remove_all_t<typename RhsXprType::Nested>&
83 rhsExpression()
const {
return m_rhs_xpr; }
85 EIGEN_DEVICE_FUNC
const Axis& axis()
const {
return m_axis; }
89 typename LhsXprType::Nested m_lhs_xpr;
90 typename RhsXprType::Nested m_rhs_xpr;
96 template<
typename Axis,
typename LeftArgType,
typename RightArgType,
typename Device>
100 typedef typename XprType::Index
Index;
101 static constexpr
int NumDims = internal::array_size<typename TensorEvaluator<LeftArgType, Device>::Dimensions>::value;
102 static constexpr
int RightNumDims = internal::array_size<typename TensorEvaluator<RightArgType, Device>::Dimensions>::value;
103 typedef DSizes<Index, NumDims> Dimensions;
104 typedef typename XprType::Scalar Scalar;
105 typedef typename XprType::CoeffReturnType CoeffReturnType;
106 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
107 typedef StorageMemory<CoeffReturnType, Device> Storage;
108 typedef typename Storage::Type EvaluatorPointerType;
121 typedef internal::TensorBlockNotImplemented TensorBlock;
124 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
125 : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis())
127 EIGEN_STATIC_ASSERT((
static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
128 EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
129 EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
131 eigen_assert(0 <= m_axis && m_axis < NumDims);
132 const Dimensions& lhs_dims = m_leftImpl.dimensions();
133 const Dimensions& rhs_dims = m_rightImpl.dimensions();
136 for (; i < m_axis; ++i) {
137 eigen_assert(lhs_dims[i] > 0);
138 eigen_assert(lhs_dims[i] == rhs_dims[i]);
139 m_dimensions[i] = lhs_dims[i];
141 eigen_assert(lhs_dims[i] > 0);
142 eigen_assert(rhs_dims[i] > 0);
143 m_dimensions[i] = lhs_dims[i] + rhs_dims[i];
144 for (++i; i < NumDims; ++i) {
145 eigen_assert(lhs_dims[i] > 0);
146 eigen_assert(lhs_dims[i] == rhs_dims[i]);
147 m_dimensions[i] = lhs_dims[i];
151 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
152 m_leftStrides[0] = 1;
153 m_rightStrides[0] = 1;
154 m_outputStrides[0] = 1;
156 for (
int j = 1; j < NumDims; ++j) {
157 m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1];
158 m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1];
159 m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1];
162 m_leftStrides[NumDims - 1] = 1;
163 m_rightStrides[NumDims - 1] = 1;
164 m_outputStrides[NumDims - 1] = 1;
166 for (
int j = NumDims - 2; j >= 0; --j) {
167 m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1];
168 m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1];
169 m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1];
174 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
177 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType)
179 m_leftImpl.evalSubExprsIfNeeded(NULL);
180 m_rightImpl.evalSubExprsIfNeeded(NULL);
184 EIGEN_STRONG_INLINE
void cleanup()
186 m_leftImpl.cleanup();
187 m_rightImpl.cleanup();
192 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
195 array<Index, NumDims> subs;
196 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
197 for (
int i = NumDims - 1; i > 0; --i) {
198 subs[i] = index / m_outputStrides[i];
199 index -= subs[i] * m_outputStrides[i];
203 for (
int i = 0; i < NumDims - 1; ++i) {
204 subs[i] = index / m_outputStrides[i];
205 index -= subs[i] * m_outputStrides[i];
207 subs[NumDims - 1] = index;
210 const Dimensions& left_dims = m_leftImpl.dimensions();
211 if (subs[m_axis] < left_dims[m_axis]) {
213 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
214 left_index = subs[0];
216 for (
int i = 1; i < NumDims; ++i) {
217 left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
220 left_index = subs[NumDims - 1];
222 for (
int i = NumDims - 2; i >= 0; --i) {
223 left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
226 return m_leftImpl.coeff(left_index);
228 subs[m_axis] -= left_dims[m_axis];
229 const Dimensions& right_dims = m_rightImpl.dimensions();
231 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
232 right_index = subs[0];
234 for (
int i = 1; i < NumDims; ++i) {
235 right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
238 right_index = subs[NumDims - 1];
240 for (
int i = NumDims - 2; i >= 0; --i) {
241 right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
244 return m_rightImpl.coeff(right_index);
249 template<
int LoadMode>
250 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
252 const int packetSize = PacketType<CoeffReturnType, Device>::size;
253 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
254 eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
256 EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
258 for (
int i = 0; i < packetSize; ++i) {
259 values[i] = coeff(index+i);
261 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
265 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
266 costPerCoeff(
bool vectorized)
const {
267 const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
268 2 * TensorOpCost::MulCost<Index>() +
269 TensorOpCost::DivCost<Index>() +
270 TensorOpCost::ModCost<Index>());
271 const double lhs_size = m_leftImpl.dimensions().TotalSize();
272 const double rhs_size = m_rightImpl.dimensions().TotalSize();
273 return (lhs_size / (lhs_size + rhs_size)) *
274 m_leftImpl.costPerCoeff(vectorized) +
275 (rhs_size / (lhs_size + rhs_size)) *
276 m_rightImpl.costPerCoeff(vectorized) +
277 TensorOpCost(0, 0, compute_cost);
280 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return NULL; }
282 #ifdef EIGEN_USE_SYCL
284 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
285 m_leftImpl.bind(cgh);
286 m_rightImpl.bind(cgh);
291 Dimensions m_dimensions;
292 array<Index, NumDims> m_outputStrides;
293 array<Index, NumDims> m_leftStrides;
294 array<Index, NumDims> m_rightStrides;
295 TensorEvaluator<LeftArgType, Device> m_leftImpl;
296 TensorEvaluator<RightArgType, Device> m_rightImpl;
301 template<
typename Axis,
typename LeftArgType,
typename RightArgType,
typename Device>
302 struct TensorEvaluator<TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
303 :
public TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
305 typedef TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device> Base;
306 typedef TensorConcatenationOp<Axis, LeftArgType, RightArgType> XprType;
307 typedef typename Base::Dimensions Dimensions;
308 static constexpr
int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
311 PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess &&
312 TensorEvaluator<RightArgType, Device>::PacketAccess,
314 PreferBlockAccess = TensorEvaluator<LeftArgType, Device>::PreferBlockAccess ||
315 TensorEvaluator<RightArgType, Device>::PreferBlockAccess,
320 typedef internal::TensorBlockNotImplemented TensorBlock;
323 EIGEN_STRONG_INLINE TensorEvaluator(XprType& op,
const Device& device)
326 EIGEN_STATIC_ASSERT((
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE);
329 typedef typename XprType::Index
Index;
330 typedef typename XprType::Scalar Scalar;
331 typedef typename XprType::CoeffReturnType CoeffReturnType;
332 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
334 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
337 array<Index, Base::NumDims> subs;
338 for (
int i = Base::NumDims - 1; i > 0; --i) {
339 subs[i] = index / this->m_outputStrides[i];
340 index -= subs[i] * this->m_outputStrides[i];
344 const Dimensions& left_dims = this->m_leftImpl.dimensions();
345 if (subs[this->m_axis] < left_dims[this->m_axis]) {
346 Index left_index = subs[0];
347 for (
int i = 1; i < Base::NumDims; ++i) {
348 left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i];
350 return this->m_leftImpl.coeffRef(left_index);
352 subs[this->m_axis] -= left_dims[this->m_axis];
353 const Dimensions& right_dims = this->m_rightImpl.dimensions();
354 Index right_index = subs[0];
355 for (
int i = 1; i < Base::NumDims; ++i) {
356 right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i];
358 return this->m_rightImpl.coeffRef(right_index);
362 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
363 void writePacket(Index index,
const PacketReturnType& x)
365 const int packetSize = PacketType<CoeffReturnType, Device>::size;
366 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
367 eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize());
369 EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
370 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
371 for (
int i = 0; i < packetSize; ++i) {
372 coeffRef(index+i) = values[i];
The tensor base class.
Definition: TensorForwardDeclarations.h:58
Tensor concatenation class.
Definition: TensorConcatenation.h:63
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31