10#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
11#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
13#include "./InternalHeaderCheck.h"
26template<
typename LhsXprType,
typename RhsXprType>
27struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
29 typedef typename LhsXprType::Scalar Scalar;
30 typedef typename traits<LhsXprType>::StorageKind StorageKind;
31 typedef typename promote_index_type<typename traits<LhsXprType>::Index,
32 typename traits<RhsXprType>::Index>::type
Index;
33 typedef typename LhsXprType::Nested LhsNested;
34 typedef typename RhsXprType::Nested RhsNested;
35 typedef typename remove_reference<LhsNested>::type _LhsNested;
36 typedef typename remove_reference<RhsNested>::type _RhsNested;
37 static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
38 static const int Layout = internal::traits<LhsXprType>::Layout;
39 typedef typename traits<LhsXprType>::PointerType PointerType;
46template<
typename LhsXprType,
typename RhsXprType>
47struct eval<TensorAssignOp<LhsXprType, RhsXprType>,
Eigen::Dense>
49 typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
52template<
typename LhsXprType,
typename RhsXprType>
53struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
55 typedef TensorAssignOp<LhsXprType, RhsXprType> type;
62template<
typename LhsXprType,
typename RhsXprType>
63class TensorAssignOp :
public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> >
66 typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar;
68 typedef typename LhsXprType::CoeffReturnType CoeffReturnType;
69 typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested;
70 typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
71 typedef typename Eigen::internal::traits<TensorAssignOp>::Index
Index;
73 static const int NumDims = Eigen::internal::traits<TensorAssignOp>::NumDimensions;
75 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs,
const RhsXprType& rhs)
76 : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
80 typename internal::remove_all<typename LhsXprType::Nested>::type&
81 lhsExpression()
const {
return *((
typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); }
84 const typename internal::remove_all<typename RhsXprType::Nested>::type&
85 rhsExpression()
const {
return m_rhs_xpr; }
88 typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr;
89 const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr;
93template<
typename LeftArgType,
typename RightArgType,
typename Device>
94struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
96 typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
97 typedef typename XprType::Index Index;
98 typedef typename XprType::Scalar Scalar;
99 typedef typename XprType::CoeffReturnType CoeffReturnType;
100 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
101 typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
102 typedef StorageMemory<CoeffReturnType, Device> Storage;
103 typedef typename Storage::Type EvaluatorPointerType;
105 static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
106 static const int NumDims = XprType::NumDims;
109 IsAligned = int(TensorEvaluator<LeftArgType, Device>::IsAligned) &
110 int(TensorEvaluator<RightArgType, Device>::IsAligned),
111 PacketAccess = int(TensorEvaluator<LeftArgType, Device>::PacketAccess) &
112 int(TensorEvaluator<RightArgType, Device>::PacketAccess),
113 BlockAccess = int(TensorEvaluator<LeftArgType, Device>::BlockAccess) &
114 int(TensorEvaluator<RightArgType, Device>::BlockAccess),
115 PreferBlockAccess = int(TensorEvaluator<LeftArgType, Device>::PreferBlockAccess) |
116 int(TensorEvaluator<RightArgType, Device>::PreferBlockAccess),
117 Layout = TensorEvaluator<LeftArgType, Device>::Layout,
118 RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
122 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
123 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
125 typedef typename TensorEvaluator<const RightArgType, Device>::TensorBlock
129 TensorEvaluator(
const XprType& op,
const Device& device) :
130 m_leftImpl(op.lhsExpression(), device),
131 m_rightImpl(op.rhsExpression(), device)
134 (
static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
135 static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
136 YOU_MADE_A_PROGRAMMING_MISTAKE);
139 EIGEN_DEVICE_FUNC
const Dimensions& dimensions()
const
144 return m_rightImpl.dimensions();
147 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType) {
148 eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
149 m_leftImpl.evalSubExprsIfNeeded(NULL);
154 return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
157#ifdef EIGEN_USE_THREADS
158 template <
typename EvalSubExprsCallback>
159 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(
160 EvaluatorPointerType, EvalSubExprsCallback done) {
161 m_leftImpl.evalSubExprsIfNeededAsync(
nullptr, [
this, done](
bool) {
162 m_rightImpl.evalSubExprsIfNeededAsync(
163 m_leftImpl.data(), [done](
bool need_assign) { done(need_assign); });
168 EIGEN_STRONG_INLINE
void cleanup() {
169 m_leftImpl.cleanup();
170 m_rightImpl.cleanup();
173 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalScalar(Index i) {
174 m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
176 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalPacket(Index i) {
178 const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ?
Aligned :
Unaligned;
179 const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ?
Aligned :
Unaligned;
180 m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
182 EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index)
const
184 return m_leftImpl.coeff(index);
186 template<
int LoadMode>
187 EIGEN_DEVICE_FUNC PacketReturnType packet(Index index)
const
189 return m_leftImpl.template packet<LoadMode>(index);
192 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
193 costPerCoeff(
bool vectorized)
const {
197 TensorOpCost left = m_leftImpl.costPerCoeff(vectorized);
198 return m_rightImpl.costPerCoeff(vectorized) +
200 numext::maxi(0.0, left.bytes_loaded() -
sizeof(CoeffReturnType)),
201 left.bytes_stored(), left.compute_cycles()) +
202 TensorOpCost(0,
sizeof(CoeffReturnType), 0, vectorized, PacketSize);
205 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
206 internal::TensorBlockResourceRequirements getResourceRequirements()
const {
207 return internal::TensorBlockResourceRequirements::merge(
208 m_leftImpl.getResourceRequirements(),
209 m_rightImpl.getResourceRequirements());
212 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalBlock(
213 TensorBlockDesc& desc, TensorBlockScratch& scratch) {
214 if (TensorEvaluator<LeftArgType, Device>::RawAccess &&
215 m_leftImpl.data() != NULL) {
218 desc.template AddDestinationBuffer<Layout>(
219 m_leftImpl.data() + desc.offset(),
220 internal::strides<Layout>(m_leftImpl.dimensions()));
223 RightTensorBlock block = m_rightImpl.block(desc, scratch,
true);
225 if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) {
226 m_leftImpl.writeBlock(desc, block);
233 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
234 m_leftImpl.bind(cgh);
235 m_rightImpl.bind(cgh);
239 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return m_leftImpl.data(); }
242 TensorEvaluator<LeftArgType, Device> m_leftImpl;
243 TensorEvaluator<RightArgType, Device> m_rightImpl;
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index