13 #include "eteq/generated/api.hpp" 17 #ifndef ETEQ_GRADER_HPP 18 #define ETEQ_GRADER_HPP 33 assert(
nullptr != coorder);
35 coorder->forward(dims.begin(), dims.begin());
37 std::fill(bcast.begin(), bcast.end(), 1);
42 bcast[d] = shape.
at(d);
45 revcoord = std::make_shared<CoordMap>(bcast,
false);
47 return make_functor<T>(
teq::Opcode{
"EXTEND",egen::EXTEND}, {
61 auto coorder = child.get_coorder();
62 assert(
nullptr != coorder);
64 coorder->forward(dims.begin(), dims.begin());
71 revcoord = std::make_shared<CoordMap>(order,
true);
73 return make_functor<T>(
teq::Opcode{
"PERMUTE",egen::PERMUTE},{
87 auto coorder = child.get_coorder();
88 assert(
nullptr != coorder);
90 coorder->forward(dims.begin(), dims.begin());
91 std::vector<teq::RankT> red_dims;
96 red_dims.push_back(i);
99 revcoord =
reduce(red_dims);
101 return make_functor<T>(
teq::Opcode{
"REDUCE_SUM",egen::REDUCE_SUM},{
107 template <
typename T>
112 size_t arg_idx)
const override 117 switch ((egen::_GENERATED_OPCODE) opcode.
code_)
123 out = make_constant_scalar<T>(
124 -1,
args[0].get_tensor()->shape());
143 out = (T) 1 / ((T) 2 *
TO_NODE(op));
155 case egen::SIGMOID_GRAD:
156 out =
TO_NODE(op) * ((T) 1 - (T) 2 *
163 case egen::REDUCE_SUM:
170 out = make_constant_scalar<T>(1,
args[0].get_tensor()->shape());
174 out =
TO_NODE(
args[(
size_t)(arg_idx==0)].get_tensor());
191 out = make_constant_scalar<T>(arg_idx == 0 ?
192 1 : -1,
args[0].get_tensor()->shape());
206 case egen::RAND_UNIF:
208 out = make_constant_scalar<T>(0,
args[0].get_tensor()->shape());
210 case egen::REDUCE_PROD:
215 case egen::REDUCE_MAX:
216 case egen::REDUCE_MIN:
228 lhs->shape().at(1)}), {0,2,1}) :
231 rhs->shape().at(0)}), {2,1,0});
234 case egen::CONV_IMG_GRAD:
235 logs::fatal(
"cannot derive CONV_IMG_GRAD");
237 case egen::CONV_KRN_GRAD:
238 logs::fatal(
"cannot derive CONV_KRN_GRAD");
241 logs::fatalf(
"Unknown op %s", opcode.
name_.c_str());
243 return out->get_tensor();
252 switch (opcode.
code_)
266 case egen::SIGMOID_GRAD:
279 case egen::RAND_UNIF:
283 case egen::REDUCE_MAX:
284 case egen::REDUCE_MIN:
285 case egen::REDUCE_PROD:
286 case egen::REDUCE_SUM:
288 op->get_children()[0],
TO_NODE(supcomp_grad), arg_idx);
292 op.get(),
TO_NODE(supcomp_grad), arg_idx);
296 op.get(),
TO_NODE(supcomp_grad), arg_idx);
303 op->get_children()[0].
304 get_tensor()->shape().at(0)
307 std::vector<teq::RankT>{2, 1, 0} :
308 std::vector<teq::RankT>{0, 2, 1}), 2, 1);
313 auto args = op->get_children();
315 args[(size_t)(0 == arg_idx)].get_shaper();
317 args[arg_idx].get_shaper()->reverse());
321 egen::CONV_IMG_GRAD};
326 egen::CONV_KRN_GRAD};
329 fwd_shaper->connect(*rev_shaper));
330 out = make_functor<T>(opcode, {
339 auto& child = op->get_children()[0];
340 child.get_coorder()->forward(
341 slicings.begin(), slicings.begin());
343 teq::DimT dim = child.get_tensor()->shape().at(dimension);
345 teq::DimT right_pad = dim - (left_pad + slicings[1]);
348 std::pair<teq::DimT,teq::DimT>{
349 left_pad, right_pad}, dimension);
355 auto& child = op->get_children()[0];
356 child.get_coorder()->forward(
357 paddings.begin(), paddings.begin());
359 teq::DimT dim = op->shape().at(dimension);
361 teq::DimT extent = dim - paddings[1] - offset;
364 offset, extent, dimension);
375 op->get_children()[0].get_tensor());
376 auto then =
TO_NODE(supcomp_grad);
377 auto otherwise = make_constant_scalar<T>(0, op->shape());
380 std::swap(then, otherwise);
382 out = tenncor::if_then_else(condition, then, otherwise);
385 case egen::CONV_IMG_GRAD:
386 logs::fatal(
"cannot derive CONV_IMG_GRAD");
388 case egen::CONV_KRN_GRAD:
389 logs::fatal(
"cannot derive CONV_KRN_GRAD");
393 logs::fatalf(
"Unknown op %s", opcode.
name_.c_str());
395 return out->get_tensor();
401 return make_constant_scalar<T>(1, shape)->get_tensor();
407 return make_constant_scalar<T>(0, shape)->get_tensor();
421 template <
typename T>
426 root->get_tensor(), target->get_tensor());
432 #endif // ETEQ_GRADER_HPP std::array< CDimT, rank_cap > CoordT
Definition: shape.hpp:56
const RankT rank_cap
Number of dimsensions in a shape/coordinate.
Definition: shape.hpp:47
NodeptrT< T > derive(NodeptrT< T > root, NodeptrT< T > target)
Derive root with respect to target and optimized.
Definition: grader.hpp:422
args
Definition: csv_to_png.py:105
EigenptrT< T > sin(teq::Shape &outshape, const OpArg< T > &in)
Definition: operator.hpp:280
virtual const ArgsT & get_children(void) const =0
Return children nodes as a vector of raw pointers.
NodeptrT< T > reduce_grad(const teq::FuncArg &child, NodeptrT< T > bwd, size_t idx)
Return reduction operator gradient of reduced functor node (bwd)
Definition: grader.hpp:25
Encoding of operation.
Definition: ifunctor.hpp:18
CoordptrT extend(teq::RankT rank, std::vector< teq::DimT > ext)
Return CoordMap wrapper of extension parameters.
CoordptrT get_shaper(void) const
Return shaper coord map.
Definition: funcarg.hpp:67
Interface of iOperation-defined operation node.
Definition: ifunctor.hpp:28
EigenptrT< T > square(teq::Shape &outshape, const OpArg< T > &in)
Definition: operator.hpp:559
Definition: constant.hpp:17
std::vector< FuncArg > ArgsT
Type of functor arguments.
Definition: funcarg.hpp:101
CoordptrT reduce(std::vector< teq::RankT > red_dims)
Return CoordMap wrapper of reduction dimensions.
uint8_t RankT
Type used for shape rank.
Definition: shape.hpp:23
Eigen node version of teq::FuncArg.
Definition: funcarg.hpp:22
TensptrT get_tensor(void) const
Return tensor being mapped.
Definition: funcarg.hpp:61
std::shared_ptr< iCoordMap > CoordptrT
Type of iCoordMap smartpointer.
Definition: coord.hpp:106
NodeptrT< T > extend_grad(teq::iFunctor *fwd, NodeptrT< T > bwd, size_t idx)
Return extension gradient of extended functor node (bwd)
Definition: grader.hpp:80
teq::TensptrT chain_rule(teq::FuncptrT op, const teq::TensptrT &local_der, teq::TensptrT supcomp_grad, size_t arg_idx) const override
Implementation of iGradientBuilder.
Definition: grader.hpp:247
std::string name_
String representation of operation.
Definition: ifunctor.hpp:21
teq::TensptrT get_const_one(teq::Shape shape) const override
Implementation of iGradientBuilder.
Definition: grader.hpp:399
teq::TensptrT add(teq::TensptrT &lhs, teq::TensptrT &rhs) const override
Implementation of iGradientBuilder.
Definition: grader.hpp:411
CoordptrT get_coorder(void) const
Return coord map for coordinates.
Definition: funcarg.hpp:80
std::shared_ptr< CoordMap > CoordptrT
Type of iCoordMap smartpointer.
Definition: coord.hpp:64
Coordinate mapper and tensor pair.
Definition: funcarg.hpp:21
NodeptrT< T > permute_grad(teq::iFunctor *fwd, NodeptrT< T > bwd, size_t idx)
Return permutation gradient of permuted functor node (bwd)
Definition: grader.hpp:54
teq::TensptrT get_const_zero(teq::Shape shape) const override
Implementation of iGradientBuilder.
Definition: grader.hpp:405
Definition: grad_def.hpp:28
EigenptrT< T > sigmoid_grad(teq::Shape &outshape, const OpArg< T > &in)
Definition: operator.hpp:498
EigenptrT< T > sigmoid(teq::Shape &outshape, const OpArg< T > &in)
Definition: operator.hpp:475
size_t code_
Numerical encoding of operation.
Definition: ifunctor.hpp:24
CoordptrT permute(std::vector< teq::RankT > dims)
Return CoordMap wrapper of permute indices.
EigenptrT< T > slice(teq::Shape &outshape, const OpArg< T > &in)
Return Eigen data object representing data slicing of dimensions.
Definition: operator.hpp:157
std::shared_ptr< iTensor > TensptrT
Tensor smart pointer.
Definition: itensor.hpp:51
uint16_t DimT
Type used for shape dimension.
Definition: shape.hpp:31
EigenptrT< T > pad(teq::Shape &outshape, const OpArg< T > &in)
Return Eigen data object representing data zero padding.
Definition: operator.hpp:183
EigenptrT< T > cos(teq::Shape &outshape, const OpArg< T > &in)
Definition: operator.hpp:314
ETEQ implementation of TEQ's Backward Propagation Builder.
Definition: grader.hpp:108
teq::TensptrT local_derivative(teq::FuncptrT op, size_t arg_idx) const override
Implementation of iGradientBuilder.
Definition: grader.hpp:111
EigenptrT< T > log(teq::Shape &outshape, const OpArg< T > &in)
Definition: operator.hpp:402
EigenptrT< T > reduce_sum(teq::Shape &outshape, const OpArg< T > &in) template< typename T > EigenptrT< T > reduce_prod(teq
Return Eigen data object representing reduction where aggregation is sum.
Definition: operator.hpp:94
Functor implementation of operable functor of Eigen operators.
Definition: functor.hpp:25
std::shared_ptr< iNode< T > > NodeptrT
Smart pointer of node.
Definition: inode.hpp:63
std::shared_ptr< iFunctor > FuncptrT
Functor smart pointer.
Definition: ifunctor.hpp:49
FuncArg< T > identity_map(NodeptrT< T > node)
Return FuncArg<T> that identity maps input tensor.
Definition: funcarg.hpp:88
DimT at(RankT idx) const
Return DimT element at idx for any index in range [0:rank_cap)
Definition: shape.hpp:108
TensptrT derive(TensptrT root, TensptrT target) const
Return derivative of root with respect to target.
Definition: grad_def.hpp:54
#define TO_NODE(tens)
Macro for converting tensor to node.
Definition: inode.hpp:106
EigenptrT< T > pow(teq::Shape &outshape, const OpArg< T > &a, const OpArg< T > &b)
Definition: operator.hpp:608