diff --git a/src/relay/op/device_copy.cc b/src/relay/op/device_copy.cc deleted file mode 100644 index 997eec5a333f..000000000000 --- a/src/relay/op/device_copy.cc +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * - * \file src/relay/op/device_copy.cc - * \brief Crossing device data copy operator. - * - * The pattern of this operator is registered as kOpaque. Hence, it could be - * used as "barrier" to avoid fusing operators belonging to differen devices. - */ - -#include -#include -#include -#include -#include -#include - -#include "../transforms/infer_layout_utils.h" -#include "type_relations.h" - -namespace tvm { -namespace relay { - -// relay.device_copy -TVM_REGISTER_NODE_TYPE(DeviceCopyAttrs); - -TVM_REGISTER_GLOBAL("relay.op._make.device_copy") - .set_body_typed([](Expr data, int src_dev_type, int dst_dev_type) { - auto attrs = make_object(); - attrs->src_dev_type = src_dev_type; - attrs->dst_dev_type = dst_dev_type; - static const Op& op = Op::Get("device_copy"); - return Call(op, {data}, Attrs(attrs), {}); - }); - -RELAY_REGISTER_OP("device_copy") - .describe(R"code( -Copy data from one tensor to another. The source and destination might be -on different devices. -)code" TVM_ADD_FILELINE) - .set_num_inputs(1) - .add_argument("data", "Tensor", "The input data.") - .set_support_level(10) - .add_type_rel("Identity", IdentityRel) - .set_attr("TOpPattern", kOpaque) - .set_attr("TOpIsStateful", false) - .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) - .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype) -> Array { - return {topi::identity(inputs[0])}; - }); - -} // namespace relay -} // namespace tvm diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index c0edf467815a..287564ba4f21 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -22,6 +22,9 @@ * \brief Operators for manifest shape-aware memory allocation in Relay. */ +#include "memory.h" + +#include #include #include #include @@ -29,9 +32,12 @@ #include #include +#include + #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" +#include "tvm/relay/attrs/device_copy.h" namespace tvm { namespace relay { @@ -42,15 +48,16 @@ TVM_REGISTER_NODE_TYPE(AllocTensorAttrs); // The passing value in attrs and args doesn't seem super great. // We should consider a better solution, i.e the type relation // being able to see the arguments as well? -TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_storage") - .set_body_typed([](Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint) { - auto attrs = make_object(); - attrs->dtype = dtype_hint; - attrs->device_id = ctx.device_id; - attrs->device_type = ctx.device_type; - static const Op& op = Op::Get("memory.alloc_storage"); - return Call(op, {size, alignment}, Attrs(attrs), {}); - }); +Expr AllocStorage(Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint) { + auto attrs = make_object(); + attrs->dtype = dtype_hint; + attrs->device_id = ctx.device_id; + attrs->device_type = ctx.device_type; + static const Op& op = Op::Get("memory.alloc_storage"); + return Call(op, {size, alignment}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_storage").set_body_typed(AllocStorage); bool AllocStorageRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { @@ -90,19 +97,20 @@ RELAY_REGISTER_OP("memory.alloc_storage") return {topi::identity(inputs[0])}; }); -TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_tensor") - .set_body_typed([](Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype, - Array assert_shape) { - auto attrs = make_object(); - attrs->dtype = dtype; - if (assert_shape.defined()) { - attrs->assert_shape = assert_shape; - } else { - attrs->const_shape = Downcast(shape); - } - static const Op& op = Op::Get("memory.alloc_tensor"); - return Call(op, {storage, offset, shape}, Attrs(attrs), {}); - }); +Expr AllocTensor(Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype, + Array assert_shape) { + auto attrs = make_object(); + attrs->dtype = dtype; + if (assert_shape.defined()) { + attrs->assert_shape = assert_shape; + } else { + attrs->const_shape = Downcast(shape); + } + static const Op& op = Op::Get("memory.alloc_tensor"); + return Call(op, {storage, offset, shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_tensor").set_body_typed(AllocTensor); std::vector FromConstShape(Constant konst) { runtime::NDArray shape = konst->data; @@ -299,5 +307,36 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.ToTupleType") return ToTupleType(t, std::vector(array.begin(), array.end())); }); +// relay.device_copy +TVM_REGISTER_NODE_TYPE(DeviceCopyAttrs); + +Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type) { + auto attrs = make_object(); + attrs->src_dev_type = src_dev_type; + attrs->dst_dev_type = dst_dev_type; + static const Op& op = Op::Get("device_copy"); + return Call(op, {data}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op._make.device_copy").set_body_typed(DeviceCopy); + +RELAY_REGISTER_OP("device_copy") + .describe(R"code( +Copy data from one tensor to another. The source and destination might be +on different devices. +)code" TVM_ADD_FILELINE) + .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") + .set_support_level(10) + .add_type_rel("Identity", IdentityRel) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype) -> Array { + return {topi::identity(inputs[0])}; + }); + } // namespace relay } // namespace tvm diff --git a/src/relay/op/memory/memory.h b/src/relay/op/memory/memory.h new file mode 100644 index 000000000000..6e184507bad5 --- /dev/null +++ b/src/relay/op/memory/memory.h @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/relay/op/memory/memory.h + * \brief Operators for memory related operations in Relay. + */ + +#ifndef TVM_RELAY_OP_MEMORY_MEMORY_H_ +#define TVM_RELAY_OP_MEMORY_MEMORY_H_ + +#include + +#include "tvm/relay/expr.h" + +namespace tvm { +namespace relay { + +Expr AllocStorage(Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint); +Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type); +Expr AllocTensor(Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype, + Array assert_shape); +Expr ToTupleType(const Type& ty, const std::vector& exprs); +std::vector FromTupleType(const Type& type, const Expr& expr); +std::vector FlattenTupleType(const Type& type); + +} // namespace relay +} // namespace tvm + +#endif // TVM_RELAY_OP_MEMORY_MEMORY_H_ diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index 0b198005001b..4fa8aca4f3a9 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -475,7 +475,11 @@ Array ProdCompute(const Attrs& attrs, const Array& input return ReduceCompute(attrs, inputs, out_type, topi::prod); } -RELAY_REGISTER_REDUCE_OP("prod") +TVM_REGISTER_GLOBAL("relay.op._make.prod").set_body_typed(Prod); + +RELAY_REGISTER_OP("prod") + .set_num_inputs(1) + .add_argument("data", "Tensor", "The input tensor.") .describe(R"code(Computes the products of array elements over given axes. Example:: diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index 0fb79206d71d..a74a259a114f 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -22,6 +22,8 @@ * \brief Dialect operators for Relay VM. */ +#include "vm.h" + #include #include #include @@ -30,6 +32,8 @@ #include #include +#include + #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" @@ -52,20 +56,23 @@ RELAY_REGISTER_OP("vm.shape_of") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); -TVM_REGISTER_GLOBAL("relay.op.vm.shape_of").set_body_typed([](Expr expr) { +Expr ShapeOf(Expr expr) { auto attrs = make_object(); attrs->dtype = DataType::Int(64); static const Op& op = Op::Get("vm.shape_of"); return Call(op, {expr}, Attrs(attrs), {}); -}); +} + +TVM_REGISTER_GLOBAL("relay.op.vm.shape_of").set_body_typed(ShapeOf); + +Expr ShapeFunc(Expr func, Expr inputs, Expr outputs, Array is_input) { + static const Op& op = Op::Get("vm.shape_func"); + auto attrs = make_object(); + attrs->is_input = is_input; + return Call(op, {func, inputs, outputs}, Attrs(attrs), {}); +} -TVM_REGISTER_GLOBAL("relay.op.vm.shape_func") - .set_body_typed([](Expr func, Expr inputs, Expr outputs, Array is_input) { - static const Op& op = Op::Get("vm.shape_func"); - auto attrs = make_object(); - attrs->is_input = is_input; - return Call(op, {func, inputs, outputs}, Attrs(attrs), {}); - }); +TVM_REGISTER_GLOBAL("relay.op.vm.shape_func").set_body_typed(ShapeFunc); bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { @@ -162,10 +169,11 @@ bool InvokeTVMOpRel(const Array& types, int num_inputs, const Attrs& attrs return true; } -TVM_REGISTER_GLOBAL("relay.op.vm.invoke_tvm_op") - .set_body_typed([](Expr func, Expr inputs, Expr outputs) { - return Call(Op::Get("vm.invoke_tvm_op"), {func, inputs, outputs}, Attrs()); - }); +Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs) { + return Call(Op::Get("vm.invoke_tvm_op"), {func, inputs, outputs}, Attrs()); +} + +TVM_REGISTER_GLOBAL("relay.op.vm.invoke_tvm_op").set_body_typed(InvokeTVMOp); RELAY_REGISTER_OP("vm.invoke_tvm_op") .describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE) @@ -212,13 +220,14 @@ RELAY_REGISTER_OP("vm.reshape_tensor") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); -TVM_REGISTER_GLOBAL("relay.op.vm.reshape_tensor") - .set_body_typed([](Expr data, Expr shape, Array newshape) { - static const Op& op = Op::Get("vm.reshape_tensor"); - auto attrs = make_object(); - attrs->newshape = std::move(newshape); - return Call(op, {data, shape}, Attrs(attrs), {}); - }); +Expr ReshapeTensor(Expr data, Expr shape, Array newshape) { + static const Op& op = Op::Get("vm.reshape_tensor"); + auto attrs = make_object(); + attrs->newshape = std::move(newshape); + return Call(op, {data, shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.vm.reshape_tensor").set_body_typed(ReshapeTensor); } // namespace relay } // namespace tvm diff --git a/src/relay/op/vm/vm.h b/src/relay/op/vm/vm.h new file mode 100644 index 000000000000..802c8100125a --- /dev/null +++ b/src/relay/op/vm/vm.h @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/relay/op/vm/vm.h + * \brief Dialect operators for Relay VM. + */ +#ifndef TVM_RELAY_OP_VM_VM_H_ +#define TVM_RELAY_OP_VM_VM_H_ + +#include "tvm/relay/expr.h" + +namespace tvm { +namespace relay { + +Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs); +Expr ShapeFunc(Expr func, Expr inputs, Expr outputs, Array is_input); +Expr ShapeOf(Expr expr); +Expr ReshapeTensor(Expr data, Expr shape, Array newshape); + +} // namespace relay +} // namespace tvm + +#endif // TVM_RELAY_OP_VM_VM_H_ diff --git a/src/relay/transforms/fold_constant.cc b/src/relay/transforms/fold_constant.cc index 0689263cca77..657d4db993b0 100644 --- a/src/relay/transforms/fold_constant.cc +++ b/src/relay/transforms/fold_constant.cc @@ -82,10 +82,6 @@ class ConstantFolder : public MixedModeMutator { device_copy_op_(Op::Get("device_copy")), shape_of_op_(Op::Get("shape_of")), vm_shape_of_op_(Op::Get("vm.shape_of")), - invoke_tvm_op_(Op::Get("vm.invoke_tvm_op")), - shape_func_op_(Op::Get("vm.shape_func")), - alloc_tensor_op_(Op::Get("memory.alloc_tensor")), - alloc_storage_op_(Op::Get("memory.alloc_storage")), cast_op_(Op::Get("cast")), ndarray_size_op_(Op::Get("ndarray_size")) {} @@ -217,10 +213,6 @@ class ConstantFolder : public MixedModeMutator { const Op& device_copy_op_; const Op& shape_of_op_; const Op& vm_shape_of_op_; - const Op& invoke_tvm_op_; - const Op& shape_func_op_; - const Op& alloc_tensor_op_; - const Op& alloc_storage_op_; const Op& cast_op_; const Op& ndarray_size_op_; diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 360778e1723b..b8c87909a025 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -41,6 +41,8 @@ #include #include "../backend/compile_engine.h" +#include "../op/memory/memory.h" +#include "../op/vm/vm.h" #include "let_list.h" #include "pattern_utils.h" @@ -49,10 +51,6 @@ using namespace tvm::runtime; namespace tvm { namespace relay { -extern Expr ToTupleType(const Type& ty, const std::vector& exprs); -extern std::vector FromTupleType(const Type& type, const Expr& expr); -extern std::vector FlattenTupleType(const Type& type); - using AnalysisResultMap = std::unordered_map; @@ -62,10 +60,8 @@ inline Constant MakeConstant(const std::vector& value) { inline Expr AllocTensor(const Expr& storage, tvm::relay::Expr shape, DataType dtype, Array assert_shape) { - auto f = runtime::Registry::Get("relay.op.memory._make.alloc_tensor"); - CHECK(f != nullptr) << "unable to find alloc_tensor op"; auto offset = MakeConstantScalar(DataType::Int(64), 0); - return (*f)(storage, offset, shape, dtype, assert_shape); + return AllocTensor(storage, offset, shape, dtype, assert_shape); } // A pass to check if the fused op contains only reshape ops. @@ -106,18 +102,7 @@ bool IsReshapeOnly(const Expr& expr) { class DialectRewriter : public ExprMutator { public: DialectRewriter(const Target& target_host, const AnalysisResultMap& context_analysis_map) - : target_host_(target_host), - context_analysis_map_(context_analysis_map), - device_copy_(runtime::Registry::Get("relay.op._make.device_copy")), - invoke_tvm_(runtime::Registry::Get("relay.op.vm.invoke_tvm_op")), - alloc_storage_(runtime::Registry::Get("relay.op.memory._make.alloc_storage")), - shape_func_(runtime::Registry::Get("relay.op.vm.shape_func")), - shape_of_(runtime::Registry::Get("relay.op.vm.shape_of")), - reshape_tensor_(runtime::Registry::Get("relay.op.vm.reshape_tensor")), - prod_(runtime::Registry::Get("relay.op._make.prod")), - divide_(runtime::Registry::Get("relay.op._make.divide")), - add_(runtime::Registry::Get("relay.op._make.add")), - multiply_(runtime::Registry::Get("relay.op._make.multiply")) {} + : target_host_(target_host), context_analysis_map_(context_analysis_map) {} // Get the context of an expression. TVMContext GetContext(const Expr& expr) const { @@ -209,7 +194,7 @@ class DialectRewriter : public ExprMutator { outs.push_back(out); } Tuple output(outs); - Expr invoke = (*invoke_tvm_)(cn->op, ins, output); + Expr invoke = InvokeTVMOp(cn->op, ins, output); scope.Push(invoke); return ToTupleType(ret_type, std::vector(output->fields.begin(), output->fields.end())); @@ -222,7 +207,7 @@ class DialectRewriter : public ExprMutator { private: // Insert a device copy node. Expr DeviceCopy(const Expr& inp, int src_ctx, int dst_ctx) { - return ExprMutator::Mutate((*device_copy_)(inp, src_ctx, dst_ctx)); + return ExprMutator::Mutate(relay::DeviceCopy(inp, src_ctx, dst_ctx)); } // Check if a call invokes a primitive function. @@ -257,11 +242,11 @@ class DialectRewriter : public ExprMutator { Expr ComputeStorageInRelay(const Expr& shape, const TensorType& type) const { auto dtype = DataType(type->dtype); - Expr els = (*prod_)(shape, Array(nullptr), false, false); + Expr els = Prod(shape, Array(nullptr), false, false); Expr num = MakeConstantScalar(DataType::Int(64), dtype.bits() * dtype.lanes()); - Expr add = (*add_)(num, MakeConstantScalar(DataType::Int(64), 7)); + Expr add = Add(num, MakeConstantScalar(DataType::Int(64), 7)); Expr div = MakeConstantScalar(DataType::Int(64), 8); - Expr ret = (*multiply_)(els, (*divide_)(add, div)); + Expr ret = Multiply(els, Divide(add, div)); return std::move(ret); } @@ -290,7 +275,7 @@ class DialectRewriter : public ExprMutator { Expr alignment = ComputeAlignment(type->dtype); // Run type inference later to get the correct type. Var var("storage_" + name_hint, Type(nullptr)); - Expr value = (*alloc_storage_)(size, alignment, ctx, type->dtype); + Expr value = AllocStorage(size, alignment, ctx, type->dtype); auto sto = scope->Push(var, value); // TODO(@jroesch): There is a bug with typing based on the constant shape. @@ -325,7 +310,7 @@ class DialectRewriter : public ExprMutator { if (state == 2) { std::vector exprs = FromTupleType(ty, arg); for (size_t j = 0; j < exprs.size(); ++j) { - Expr sh_of = ExprMutator::Mutate((*shape_of_)(exprs[j])); + Expr sh_of = ExprMutator::Mutate(ShapeOf(exprs[j])); Var in_shape_var("in_shape_" + std::to_string(input_pos + j), Type(nullptr)); shape_func_ins.push_back(scope->Push(in_shape_var, sh_of)); input_pos++; @@ -358,7 +343,7 @@ class DialectRewriter : public ExprMutator { alloc = scope->Push(shape_func_out_var, alloc); out_shapes.push_back(alloc); } - auto shape_call = (*shape_func_)(func, Tuple(shape_func_ins), Tuple(out_shapes), is_inputs); + auto shape_call = ShapeFunc(func, Tuple(shape_func_ins), Tuple(out_shapes), is_inputs); Var shape_func_var("shape_func", Type(nullptr)); scope->Push(shape_func_var, shape_call); return out_shapes; @@ -378,7 +363,7 @@ class DialectRewriter : public ExprMutator { auto size = ComputeStorageInRelay(out_shape, out_type); auto alignment = ComputeAlignment(out_type->dtype); Var sto_var("storage_" + std::to_string(i), Type(nullptr)); - auto val = (*alloc_storage_)(size, alignment, func_ctx, out_type->dtype); + auto val = AllocStorage(size, alignment, func_ctx, out_type->dtype); storages.push_back(scope->Push(sto_var, val)); } @@ -393,7 +378,7 @@ class DialectRewriter : public ExprMutator { } Tuple tuple_outs(outs); - auto invoke = (*invoke_tvm_)(func, ins, tuple_outs); + auto invoke = InvokeTVMOp(func, ins, tuple_outs); scope->Push(invoke); return ToTupleType(ret_type, std::vector(tuple_outs->fields.begin(), tuple_outs->fields.end())); @@ -415,7 +400,7 @@ class DialectRewriter : public ExprMutator { } shape_expr = MakeConstant(shape); } - return (*reshape_tensor_)(new_args[0], shape_expr, ret_ty->shape); + return ReshapeTensor(new_args[0], shape_expr, ret_ty->shape); } private: @@ -423,18 +408,6 @@ class DialectRewriter : public ExprMutator { AnalysisResultMap context_analysis_map_; std::vector scopes_; - // Cache the following ops - const PackedFunc* device_copy_; - const PackedFunc* invoke_tvm_; - const PackedFunc* alloc_storage_; - const PackedFunc* shape_func_; - const PackedFunc* shape_of_; - const PackedFunc* reshape_tensor_; - const PackedFunc* prod_; - const PackedFunc* divide_; - const PackedFunc* add_; - const PackedFunc* multiply_; - runtime::DataType compute_dtype_ = runtime::DataType::Int(64); TVMContext default_context_{kDLCPU, 0}; }; diff --git a/src/relay/transforms/pattern_utils.h b/src/relay/transforms/pattern_utils.h index 8ef86e088193..bc0fcc9f2988 100644 --- a/src/relay/transforms/pattern_utils.h +++ b/src/relay/transforms/pattern_utils.h @@ -644,6 +644,10 @@ static inline Expr Sum(Expr data, Array axis, bool keepdims, bool exclu return MakeReduce(data, axis, keepdims, exclude, "sum"); } +static inline Expr Prod(Expr data, Array axis, bool keepdims, bool exclude) { + return MakeReduce(data, axis, keepdims, exclude, "prod"); +} + static inline Expr Reshape(Expr data, Array newshape) { return MakeReshape(data, newshape); }