From 44da26ccaed88a505b4cd7b93ea9e2d4bf9f9da3 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 18:28:39 +0900 Subject: [PATCH 1/9] started moving things to header --- src/relay/op/memory/memory.cc | 39 +++++++++++++++++++++++++++ src/relay/op/memory/memory.h | 38 ++++++++++++++++++++++++++ src/relay/op/vm/vm.cc | 11 +++++--- src/relay/op/vm/vm.h | 37 +++++++++++++++++++++++++ src/relay/transforms/fold_constant.cc | 8 ------ 5 files changed, 121 insertions(+), 12 deletions(-) create mode 100644 src/relay/op/memory/memory.h create mode 100644 src/relay/op/vm/vm.h diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index c0edf467815a..748b596bd23e 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -22,6 +22,7 @@ * \brief Operators for manifest shape-aware memory allocation in Relay. */ +#include #include #include #include @@ -299,5 +300,43 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.ToTupleType") return ToTupleType(t, std::vector(array.begin(), array.end())); }); +// Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type) { +// auto attrs = make_object(); +// attrs->src_dev_type = src_dev_type; +// attrs->dst_dev_type = dst_dev_type; +// static const Op& op = Op::Get("device_copy"); +// return Call(op, {data}, Attrs(attrs), {}); +// } + +// // relay.device_copy +// TVM_REGISTER_NODE_TYPE(DeviceCopyAttrs); + +// TVM_REGISTER_GLOBAL("relay.op._make.device_copy") +// .set_body_typed([](Expr data, int src_dev_type, int dst_dev_type) { +// auto attrs = make_object(); +// attrs->src_dev_type = src_dev_type; +// attrs->dst_dev_type = dst_dev_type; +// static const Op& op = Op::Get("device_copy"); +// return Call(op, {data}, Attrs(attrs), {}); +// }); + +// RELAY_REGISTER_OP("device_copy") +// .describe(R"code( +// Copy data from one tensor to another. The source and destination might be +// on different devices. +// )code" TVM_ADD_FILELINE) +// .set_num_inputs(1) +// .add_argument("data", "Tensor", "The input data.") +// .set_support_level(10) +// .add_type_rel("Identity", IdentityRel) +// .set_attr("TOpPattern", kOpaque) +// .set_attr("TOpIsStateful", false) +// .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) +// .set_attr("FTVMCompute", +// [](const Attrs& attrs, const Array& inputs, +// const Type& out_dtype) -> Array { +// return {topi::identity(inputs[0])}; +// }); + } // namespace relay } // namespace tvm diff --git a/src/relay/op/memory/memory.h b/src/relay/op/memory/memory.h new file mode 100644 index 000000000000..d09f517c0125 --- /dev/null +++ b/src/relay/op/memory/memory.h @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/relay/op/memory/memory.h + * \brief Operators for manifest shape-aware memory allocation in Relay. + */ + +#ifndef TVM_RELAY_OP_MEMORY_H_ +#define TVM_RELAY_OP_MEMORY_H_ + +#include "tvm/relay/expr.h" + +namespace tvm { +namespace relay { + +Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type); + +} // namespace relay +} // namespace tvm + +#endif // TVM_RELAY_OP_MEMORY_H_ diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index 0fb79206d71d..e48aed3c620a 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -22,6 +22,8 @@ * \brief Dialect operators for Relay VM. */ +#include "vm.h" + #include #include #include @@ -162,10 +164,11 @@ bool InvokeTVMOpRel(const Array& types, int num_inputs, const Attrs& attrs return true; } -TVM_REGISTER_GLOBAL("relay.op.vm.invoke_tvm_op") - .set_body_typed([](Expr func, Expr inputs, Expr outputs) { - return Call(Op::Get("vm.invoke_tvm_op"), {func, inputs, outputs}, Attrs()); - }); +Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs) { + return Call(Op::Get("vm.invoke_tvm_op"), {func, inputs, outputs}, Attrs()); +} + +TVM_REGISTER_GLOBAL("relay.op.vm.invoke_tvm_op").set_body_typed(InvokeTVMOp); RELAY_REGISTER_OP("vm.invoke_tvm_op") .describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE) diff --git a/src/relay/op/vm/vm.h b/src/relay/op/vm/vm.h new file mode 100644 index 000000000000..97ae2808f9b4 --- /dev/null +++ b/src/relay/op/vm/vm.h @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/relay/op/vm/vm.h + * \brief Dialect operators for Relay VM. + */ +#ifndef TVM_RELAY_OP_VM_H_ +#define TVM_RELAY_OP_VM_H_ + +#include "tvm/relay/expr.h" + +namespace tvm { +namespace relay { + +Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs); + +} // namespace relay +} // namespace tvm + +#endif // TVM_RELAY_OP_VM_H_ diff --git a/src/relay/transforms/fold_constant.cc b/src/relay/transforms/fold_constant.cc index 0689263cca77..657d4db993b0 100644 --- a/src/relay/transforms/fold_constant.cc +++ b/src/relay/transforms/fold_constant.cc @@ -82,10 +82,6 @@ class ConstantFolder : public MixedModeMutator { device_copy_op_(Op::Get("device_copy")), shape_of_op_(Op::Get("shape_of")), vm_shape_of_op_(Op::Get("vm.shape_of")), - invoke_tvm_op_(Op::Get("vm.invoke_tvm_op")), - shape_func_op_(Op::Get("vm.shape_func")), - alloc_tensor_op_(Op::Get("memory.alloc_tensor")), - alloc_storage_op_(Op::Get("memory.alloc_storage")), cast_op_(Op::Get("cast")), ndarray_size_op_(Op::Get("ndarray_size")) {} @@ -217,10 +213,6 @@ class ConstantFolder : public MixedModeMutator { const Op& device_copy_op_; const Op& shape_of_op_; const Op& vm_shape_of_op_; - const Op& invoke_tvm_op_; - const Op& shape_func_op_; - const Op& alloc_tensor_op_; - const Op& alloc_storage_op_; const Op& cast_op_; const Op& ndarray_size_op_; From e040bff3be916ef3f652e98d1573128c6409e141 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 18:31:00 +0900 Subject: [PATCH 2/9] directly call InvokeTVMOp --- src/relay/transforms/memory_alloc.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 360778e1723b..bdd311eb7c2e 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -41,6 +41,7 @@ #include #include "../backend/compile_engine.h" +#include "../op/vm/vm.h" #include "let_list.h" #include "pattern_utils.h" @@ -109,7 +110,6 @@ class DialectRewriter : public ExprMutator { : target_host_(target_host), context_analysis_map_(context_analysis_map), device_copy_(runtime::Registry::Get("relay.op._make.device_copy")), - invoke_tvm_(runtime::Registry::Get("relay.op.vm.invoke_tvm_op")), alloc_storage_(runtime::Registry::Get("relay.op.memory._make.alloc_storage")), shape_func_(runtime::Registry::Get("relay.op.vm.shape_func")), shape_of_(runtime::Registry::Get("relay.op.vm.shape_of")), @@ -209,7 +209,7 @@ class DialectRewriter : public ExprMutator { outs.push_back(out); } Tuple output(outs); - Expr invoke = (*invoke_tvm_)(cn->op, ins, output); + Expr invoke = InvokeTVMOp(cn->op, ins, output); scope.Push(invoke); return ToTupleType(ret_type, std::vector(output->fields.begin(), output->fields.end())); @@ -393,7 +393,7 @@ class DialectRewriter : public ExprMutator { } Tuple tuple_outs(outs); - auto invoke = (*invoke_tvm_)(func, ins, tuple_outs); + auto invoke = InvokeTVMOp(func, ins, tuple_outs); scope->Push(invoke); return ToTupleType(ret_type, std::vector(tuple_outs->fields.begin(), tuple_outs->fields.end())); @@ -425,7 +425,6 @@ class DialectRewriter : public ExprMutator { // Cache the following ops const PackedFunc* device_copy_; - const PackedFunc* invoke_tvm_; const PackedFunc* alloc_storage_; const PackedFunc* shape_func_; const PackedFunc* shape_of_; From df3cfa1064ca9d1f38b92bb685e3f1b90716b1f3 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 18:48:17 +0900 Subject: [PATCH 3/9] done all memory op --- src/relay/op/device_copy.cc | 73 ----------------------- src/relay/op/memory/memory.cc | 87 +++++++++++++--------------- src/relay/op/memory/memory.h | 6 +- src/relay/op/vm/vm.cc | 36 +++++++----- src/relay/op/vm/vm.h | 4 +- src/relay/transforms/memory_alloc.cc | 27 +++------ 6 files changed, 76 insertions(+), 157 deletions(-) delete mode 100644 src/relay/op/device_copy.cc diff --git a/src/relay/op/device_copy.cc b/src/relay/op/device_copy.cc deleted file mode 100644 index 997eec5a333f..000000000000 --- a/src/relay/op/device_copy.cc +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * - * \file src/relay/op/device_copy.cc - * \brief Crossing device data copy operator. - * - * The pattern of this operator is registered as kOpaque. Hence, it could be - * used as "barrier" to avoid fusing operators belonging to differen devices. - */ - -#include -#include -#include -#include -#include -#include - -#include "../transforms/infer_layout_utils.h" -#include "type_relations.h" - -namespace tvm { -namespace relay { - -// relay.device_copy -TVM_REGISTER_NODE_TYPE(DeviceCopyAttrs); - -TVM_REGISTER_GLOBAL("relay.op._make.device_copy") - .set_body_typed([](Expr data, int src_dev_type, int dst_dev_type) { - auto attrs = make_object(); - attrs->src_dev_type = src_dev_type; - attrs->dst_dev_type = dst_dev_type; - static const Op& op = Op::Get("device_copy"); - return Call(op, {data}, Attrs(attrs), {}); - }); - -RELAY_REGISTER_OP("device_copy") - .describe(R"code( -Copy data from one tensor to another. The source and destination might be -on different devices. -)code" TVM_ADD_FILELINE) - .set_num_inputs(1) - .add_argument("data", "Tensor", "The input data.") - .set_support_level(10) - .add_type_rel("Identity", IdentityRel) - .set_attr("TOpPattern", kOpaque) - .set_attr("TOpIsStateful", false) - .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) - .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype) -> Array { - return {topi::identity(inputs[0])}; - }); - -} // namespace relay -} // namespace tvm diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index 748b596bd23e..0a68f1e58bac 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -33,6 +33,7 @@ #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" +#include "tvm/relay/attrs/device_copy.h" namespace tvm { namespace relay { @@ -43,15 +44,16 @@ TVM_REGISTER_NODE_TYPE(AllocTensorAttrs); // The passing value in attrs and args doesn't seem super great. // We should consider a better solution, i.e the type relation // being able to see the arguments as well? -TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_storage") - .set_body_typed([](Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint) { - auto attrs = make_object(); - attrs->dtype = dtype_hint; - attrs->device_id = ctx.device_id; - attrs->device_type = ctx.device_type; - static const Op& op = Op::Get("memory.alloc_storage"); - return Call(op, {size, alignment}, Attrs(attrs), {}); - }); +Expr AllocStorage(Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint) { + auto attrs = make_object(); + attrs->dtype = dtype_hint; + attrs->device_id = ctx.device_id; + attrs->device_type = ctx.device_type; + static const Op& op = Op::Get("memory.alloc_storage"); + return Call(op, {size, alignment}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_storage").set_body_typed(AllocStorage); bool AllocStorageRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { @@ -300,43 +302,36 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.ToTupleType") return ToTupleType(t, std::vector(array.begin(), array.end())); }); -// Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type) { -// auto attrs = make_object(); -// attrs->src_dev_type = src_dev_type; -// attrs->dst_dev_type = dst_dev_type; -// static const Op& op = Op::Get("device_copy"); -// return Call(op, {data}, Attrs(attrs), {}); -// } - -// // relay.device_copy -// TVM_REGISTER_NODE_TYPE(DeviceCopyAttrs); - -// TVM_REGISTER_GLOBAL("relay.op._make.device_copy") -// .set_body_typed([](Expr data, int src_dev_type, int dst_dev_type) { -// auto attrs = make_object(); -// attrs->src_dev_type = src_dev_type; -// attrs->dst_dev_type = dst_dev_type; -// static const Op& op = Op::Get("device_copy"); -// return Call(op, {data}, Attrs(attrs), {}); -// }); - -// RELAY_REGISTER_OP("device_copy") -// .describe(R"code( -// Copy data from one tensor to another. The source and destination might be -// on different devices. -// )code" TVM_ADD_FILELINE) -// .set_num_inputs(1) -// .add_argument("data", "Tensor", "The input data.") -// .set_support_level(10) -// .add_type_rel("Identity", IdentityRel) -// .set_attr("TOpPattern", kOpaque) -// .set_attr("TOpIsStateful", false) -// .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) -// .set_attr("FTVMCompute", -// [](const Attrs& attrs, const Array& inputs, -// const Type& out_dtype) -> Array { -// return {topi::identity(inputs[0])}; -// }); +// relay.device_copy +TVM_REGISTER_NODE_TYPE(DeviceCopyAttrs); + +Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type) { + auto attrs = make_object(); + attrs->src_dev_type = src_dev_type; + attrs->dst_dev_type = dst_dev_type; + static const Op& op = Op::Get("device_copy"); + return Call(op, {data}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op._make.device_copy").set_body_typed(DeviceCopy); + +RELAY_REGISTER_OP("device_copy") + .describe(R"code( +Copy data from one tensor to another. The source and destination might be +on different devices. +)code" TVM_ADD_FILELINE) + .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") + .set_support_level(10) + .add_type_rel("Identity", IdentityRel) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype) -> Array { + return {topi::identity(inputs[0])}; + }); } // namespace relay } // namespace tvm diff --git a/src/relay/op/memory/memory.h b/src/relay/op/memory/memory.h index d09f517c0125..5d13510741db 100644 --- a/src/relay/op/memory/memory.h +++ b/src/relay/op/memory/memory.h @@ -19,7 +19,7 @@ /*! * \file src/relay/op/memory/memory.h - * \brief Operators for manifest shape-aware memory allocation in Relay. + * \brief Operators for memory related operations in Relay. */ #ifndef TVM_RELAY_OP_MEMORY_H_ @@ -30,7 +30,11 @@ namespace tvm { namespace relay { +Expr AllocStorage(Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint); Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type); +Expr ToTupleType(const Type& ty, const std::vector& exprs); +std::vector FromTupleType(const Type& type, const Expr& expr); +std::vector FlattenTupleType(const Type& type); } // namespace relay } // namespace tvm diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index e48aed3c620a..e1f44cb51b08 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -54,20 +54,23 @@ RELAY_REGISTER_OP("vm.shape_of") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); -TVM_REGISTER_GLOBAL("relay.op.vm.shape_of").set_body_typed([](Expr expr) { +Expr ShapeOf(Expr expr) { auto attrs = make_object(); attrs->dtype = DataType::Int(64); static const Op& op = Op::Get("vm.shape_of"); return Call(op, {expr}, Attrs(attrs), {}); -}); +} + +TVM_REGISTER_GLOBAL("relay.op.vm.shape_of").set_body_typed(ShapeOf); + +Expr ShapeFunc(Expr func, Expr inputs, Expr outputs, Array is_input) { + static const Op& op = Op::Get("vm.shape_func"); + auto attrs = make_object(); + attrs->is_input = is_input; + return Call(op, {func, inputs, outputs}, Attrs(attrs), {}); +} -TVM_REGISTER_GLOBAL("relay.op.vm.shape_func") - .set_body_typed([](Expr func, Expr inputs, Expr outputs, Array is_input) { - static const Op& op = Op::Get("vm.shape_func"); - auto attrs = make_object(); - attrs->is_input = is_input; - return Call(op, {func, inputs, outputs}, Attrs(attrs), {}); - }); +TVM_REGISTER_GLOBAL("relay.op.vm.shape_func").set_body_typed(ShapeFunc); bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { @@ -215,13 +218,14 @@ RELAY_REGISTER_OP("vm.reshape_tensor") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); -TVM_REGISTER_GLOBAL("relay.op.vm.reshape_tensor") - .set_body_typed([](Expr data, Expr shape, Array newshape) { - static const Op& op = Op::Get("vm.reshape_tensor"); - auto attrs = make_object(); - attrs->newshape = std::move(newshape); - return Call(op, {data, shape}, Attrs(attrs), {}); - }); +Expr ReshapeTensor(Expr data, Expr shape, Array newshape) { + static const Op& op = Op::Get("vm.reshape_tensor"); + auto attrs = make_object(); + attrs->newshape = std::move(newshape); + return Call(op, {data, shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.vm.reshape_tensor").set_body_typed(ReshapeTensor); } // namespace relay } // namespace tvm diff --git a/src/relay/op/vm/vm.h b/src/relay/op/vm/vm.h index 97ae2808f9b4..90e7ef6f8c29 100644 --- a/src/relay/op/vm/vm.h +++ b/src/relay/op/vm/vm.h @@ -30,7 +30,9 @@ namespace tvm { namespace relay { Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs); - +Expr ShapeFunc(Expr func, Expr inputs, Expr outputs, Array is_input); + Expr ShapeOf(Expr expr); + Expr ReshapeTensor(Expr data, Expr shape, Array newshape); } // namespace relay } // namespace tvm diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index bdd311eb7c2e..44c851bfb531 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -41,6 +41,7 @@ #include #include "../backend/compile_engine.h" +#include "../op/memory/memory.h" #include "../op/vm/vm.h" #include "let_list.h" #include "pattern_utils.h" @@ -50,10 +51,6 @@ using namespace tvm::runtime; namespace tvm { namespace relay { -extern Expr ToTupleType(const Type& ty, const std::vector& exprs); -extern std::vector FromTupleType(const Type& type, const Expr& expr); -extern std::vector FlattenTupleType(const Type& type); - using AnalysisResultMap = std::unordered_map; @@ -109,11 +106,6 @@ class DialectRewriter : public ExprMutator { DialectRewriter(const Target& target_host, const AnalysisResultMap& context_analysis_map) : target_host_(target_host), context_analysis_map_(context_analysis_map), - device_copy_(runtime::Registry::Get("relay.op._make.device_copy")), - alloc_storage_(runtime::Registry::Get("relay.op.memory._make.alloc_storage")), - shape_func_(runtime::Registry::Get("relay.op.vm.shape_func")), - shape_of_(runtime::Registry::Get("relay.op.vm.shape_of")), - reshape_tensor_(runtime::Registry::Get("relay.op.vm.reshape_tensor")), prod_(runtime::Registry::Get("relay.op._make.prod")), divide_(runtime::Registry::Get("relay.op._make.divide")), add_(runtime::Registry::Get("relay.op._make.add")), @@ -222,7 +214,7 @@ class DialectRewriter : public ExprMutator { private: // Insert a device copy node. Expr DeviceCopy(const Expr& inp, int src_ctx, int dst_ctx) { - return ExprMutator::Mutate((*device_copy_)(inp, src_ctx, dst_ctx)); + return ExprMutator::Mutate(relay::DeviceCopy(inp, src_ctx, dst_ctx)); } // Check if a call invokes a primitive function. @@ -290,7 +282,7 @@ class DialectRewriter : public ExprMutator { Expr alignment = ComputeAlignment(type->dtype); // Run type inference later to get the correct type. Var var("storage_" + name_hint, Type(nullptr)); - Expr value = (*alloc_storage_)(size, alignment, ctx, type->dtype); + Expr value = AllocStorage(size, alignment, ctx, type->dtype); auto sto = scope->Push(var, value); // TODO(@jroesch): There is a bug with typing based on the constant shape. @@ -325,7 +317,7 @@ class DialectRewriter : public ExprMutator { if (state == 2) { std::vector exprs = FromTupleType(ty, arg); for (size_t j = 0; j < exprs.size(); ++j) { - Expr sh_of = ExprMutator::Mutate((*shape_of_)(exprs[j])); + Expr sh_of = ExprMutator::Mutate(ShapeOf(exprs[j])); Var in_shape_var("in_shape_" + std::to_string(input_pos + j), Type(nullptr)); shape_func_ins.push_back(scope->Push(in_shape_var, sh_of)); input_pos++; @@ -358,7 +350,7 @@ class DialectRewriter : public ExprMutator { alloc = scope->Push(shape_func_out_var, alloc); out_shapes.push_back(alloc); } - auto shape_call = (*shape_func_)(func, Tuple(shape_func_ins), Tuple(out_shapes), is_inputs); + auto shape_call = ShapeFunc(func, Tuple(shape_func_ins), Tuple(out_shapes), is_inputs); Var shape_func_var("shape_func", Type(nullptr)); scope->Push(shape_func_var, shape_call); return out_shapes; @@ -378,7 +370,7 @@ class DialectRewriter : public ExprMutator { auto size = ComputeStorageInRelay(out_shape, out_type); auto alignment = ComputeAlignment(out_type->dtype); Var sto_var("storage_" + std::to_string(i), Type(nullptr)); - auto val = (*alloc_storage_)(size, alignment, func_ctx, out_type->dtype); + auto val = AllocStorage(size, alignment, func_ctx, out_type->dtype); storages.push_back(scope->Push(sto_var, val)); } @@ -415,7 +407,7 @@ class DialectRewriter : public ExprMutator { } shape_expr = MakeConstant(shape); } - return (*reshape_tensor_)(new_args[0], shape_expr, ret_ty->shape); + return ReshapeTensor(new_args[0], shape_expr, ret_ty->shape); } private: @@ -424,11 +416,6 @@ class DialectRewriter : public ExprMutator { std::vector scopes_; // Cache the following ops - const PackedFunc* device_copy_; - const PackedFunc* alloc_storage_; - const PackedFunc* shape_func_; - const PackedFunc* shape_of_; - const PackedFunc* reshape_tensor_; const PackedFunc* prod_; const PackedFunc* divide_; const PackedFunc* add_; From fe4aad1bb1e55cb6158232fa0ed25b24215e1e67 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 18:54:17 +0900 Subject: [PATCH 4/9] also refactor AllocTensor --- src/relay/op/memory/memory.cc | 27 ++++++++++++++------------- src/relay/op/memory/memory.h | 2 ++ src/relay/transforms/memory_alloc.cc | 4 +--- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index 0a68f1e58bac..657ee9c2ac4b 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -93,19 +93,20 @@ RELAY_REGISTER_OP("memory.alloc_storage") return {topi::identity(inputs[0])}; }); -TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_tensor") - .set_body_typed([](Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype, - Array assert_shape) { - auto attrs = make_object(); - attrs->dtype = dtype; - if (assert_shape.defined()) { - attrs->assert_shape = assert_shape; - } else { - attrs->const_shape = Downcast(shape); - } - static const Op& op = Op::Get("memory.alloc_tensor"); - return Call(op, {storage, offset, shape}, Attrs(attrs), {}); - }); +Expr AllocTensor(Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype, + Array assert_shape) { + auto attrs = make_object(); + attrs->dtype = dtype; + if (assert_shape.defined()) { + attrs->assert_shape = assert_shape; + } else { + attrs->const_shape = Downcast(shape); + } + static const Op& op = Op::Get("memory.alloc_tensor"); + return Call(op, {storage, offset, shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_tensor").set_body_typed(AllocTensor); std::vector FromConstShape(Constant konst) { runtime::NDArray shape = konst->data; diff --git a/src/relay/op/memory/memory.h b/src/relay/op/memory/memory.h index 5d13510741db..58a21156a63c 100644 --- a/src/relay/op/memory/memory.h +++ b/src/relay/op/memory/memory.h @@ -32,6 +32,8 @@ namespace relay { Expr AllocStorage(Expr size, Expr alignment, TVMContext ctx, DataType dtype_hint); Expr DeviceCopy(Expr data, int src_dev_type, int dst_dev_type); +Expr AllocTensor(Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype, + Array assert_shape); Expr ToTupleType(const Type& ty, const std::vector& exprs); std::vector FromTupleType(const Type& type, const Expr& expr); std::vector FlattenTupleType(const Type& type); diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 44c851bfb531..c30195afabed 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -60,10 +60,8 @@ inline Constant MakeConstant(const std::vector& value) { inline Expr AllocTensor(const Expr& storage, tvm::relay::Expr shape, DataType dtype, Array assert_shape) { - auto f = runtime::Registry::Get("relay.op.memory._make.alloc_tensor"); - CHECK(f != nullptr) << "unable to find alloc_tensor op"; auto offset = MakeConstantScalar(DataType::Int(64), 0); - return (*f)(storage, offset, shape, dtype, assert_shape); + return AllocTensor(storage, offset, shape, dtype, assert_shape); } // A pass to check if the fused op contains only reshape ops. From eb4945aeab92d784cde821fe99aa7977f00695f5 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 19:10:01 +0900 Subject: [PATCH 5/9] declare Prod --- src/relay/op/tensor/reduce.cc | 10 +++++++- src/relay/op/tensor/tensor.h | 37 ++++++++++++++++++++++++++++ src/relay/transforms/memory_alloc.cc | 5 ++-- 3 files changed, 48 insertions(+), 4 deletions(-) create mode 100644 src/relay/op/tensor/tensor.h diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index 0b198005001b..c8b35b752da1 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -475,7 +475,15 @@ Array ProdCompute(const Attrs& attrs, const Array& input return ReduceCompute(attrs, inputs, out_type, topi::prod); } -RELAY_REGISTER_REDUCE_OP("prod") +Expr Prod(Expr data, Array axis, bool keepdims, bool exclude) { + return MakeReduce(data, axis, keepdims, exclude, "prod"); +} + +TVM_REGISTER_GLOBAL("relay.op._make.prod").set_body_typed(Prod); + +RELAY_REGISTER_OP("prod") + .set_num_inputs(1) + .add_argument("data", "Tensor", "The input tensor.") .describe(R"code(Computes the products of array elements over given axes. Example:: diff --git a/src/relay/op/tensor/tensor.h b/src/relay/op/tensor/tensor.h new file mode 100644 index 000000000000..999686f6cccd --- /dev/null +++ b/src/relay/op/tensor/tensor.h @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file tensor.h + * \brief Declarations for tensor operations. + */ +#ifndef TVM_RELAY_OP_TENSOR_H_ +#define TVM_RELAY_OP_TENSOR_H_ + +#include "tvm/relay/expr.h" + +namespace tvm { +namespace relay { + +Expr Prod(Expr data, Array axis, bool keepdims, bool exclude); + +} // namespace relay +} // namespace tvm + +#endif // TVM_RELAY_OP_TENSOR_H_ diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index c30195afabed..65c9a5cba0a8 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -43,6 +43,7 @@ #include "../backend/compile_engine.h" #include "../op/memory/memory.h" #include "../op/vm/vm.h" +#include "../op/tensor/tensor.h" #include "let_list.h" #include "pattern_utils.h" @@ -104,7 +105,6 @@ class DialectRewriter : public ExprMutator { DialectRewriter(const Target& target_host, const AnalysisResultMap& context_analysis_map) : target_host_(target_host), context_analysis_map_(context_analysis_map), - prod_(runtime::Registry::Get("relay.op._make.prod")), divide_(runtime::Registry::Get("relay.op._make.divide")), add_(runtime::Registry::Get("relay.op._make.add")), multiply_(runtime::Registry::Get("relay.op._make.multiply")) {} @@ -247,7 +247,7 @@ class DialectRewriter : public ExprMutator { Expr ComputeStorageInRelay(const Expr& shape, const TensorType& type) const { auto dtype = DataType(type->dtype); - Expr els = (*prod_)(shape, Array(nullptr), false, false); + Expr els = Prod(shape, {}, false, false); Expr num = MakeConstantScalar(DataType::Int(64), dtype.bits() * dtype.lanes()); Expr add = (*add_)(num, MakeConstantScalar(DataType::Int(64), 7)); Expr div = MakeConstantScalar(DataType::Int(64), 8); @@ -414,7 +414,6 @@ class DialectRewriter : public ExprMutator { std::vector scopes_; // Cache the following ops - const PackedFunc* prod_; const PackedFunc* divide_; const PackedFunc* add_; const PackedFunc* multiply_; From 22d575e74c8a86e894625a192d85a2d3d90d0bfe Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 19:25:47 +0900 Subject: [PATCH 6/9] remove cached func for Add, Multiply, Divide --- src/relay/op/memory/memory.cc | 2 ++ src/relay/transforms/memory_alloc.cc | 17 ++++------------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index 657ee9c2ac4b..683016663059 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -22,6 +22,8 @@ * \brief Operators for manifest shape-aware memory allocation in Relay. */ +#include "memory.h" + #include #include #include diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 65c9a5cba0a8..481fd0477630 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -42,8 +42,8 @@ #include "../backend/compile_engine.h" #include "../op/memory/memory.h" -#include "../op/vm/vm.h" #include "../op/tensor/tensor.h" +#include "../op/vm/vm.h" #include "let_list.h" #include "pattern_utils.h" @@ -103,11 +103,7 @@ bool IsReshapeOnly(const Expr& expr) { class DialectRewriter : public ExprMutator { public: DialectRewriter(const Target& target_host, const AnalysisResultMap& context_analysis_map) - : target_host_(target_host), - context_analysis_map_(context_analysis_map), - divide_(runtime::Registry::Get("relay.op._make.divide")), - add_(runtime::Registry::Get("relay.op._make.add")), - multiply_(runtime::Registry::Get("relay.op._make.multiply")) {} + : target_host_(target_host), context_analysis_map_(context_analysis_map) {} // Get the context of an expression. TVMContext GetContext(const Expr& expr) const { @@ -249,9 +245,9 @@ class DialectRewriter : public ExprMutator { auto dtype = DataType(type->dtype); Expr els = Prod(shape, {}, false, false); Expr num = MakeConstantScalar(DataType::Int(64), dtype.bits() * dtype.lanes()); - Expr add = (*add_)(num, MakeConstantScalar(DataType::Int(64), 7)); + Expr add = Add(num, MakeConstantScalar(DataType::Int(64), 7)); Expr div = MakeConstantScalar(DataType::Int(64), 8); - Expr ret = (*multiply_)(els, (*divide_)(add, div)); + Expr ret = Multiply(els, Divide(add, div)); return std::move(ret); } @@ -413,11 +409,6 @@ class DialectRewriter : public ExprMutator { AnalysisResultMap context_analysis_map_; std::vector scopes_; - // Cache the following ops - const PackedFunc* divide_; - const PackedFunc* add_; - const PackedFunc* multiply_; - runtime::DataType compute_dtype_ = runtime::DataType::Int(64); TVMContext default_context_{kDLCPU, 0}; }; From 3354c4285ed0b30281e0ab60a16ce94887472456 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 5 Feb 2021 19:40:14 +0900 Subject: [PATCH 7/9] lint fix --- src/relay/op/memory/memory.cc | 2 ++ src/relay/op/memory/memory.h | 8 +++++--- src/relay/op/tensor/tensor.h | 6 +++--- src/relay/op/vm/vm.cc | 2 ++ src/relay/op/vm/vm.h | 10 +++++----- src/relay/transforms/memory_alloc.cc | 2 +- tests/python/frontend/pytorch/test_object_detection.py | 7 ++++++- 7 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index 683016663059..287564ba4f21 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -32,6 +32,8 @@ #include #include +#include + #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" diff --git a/src/relay/op/memory/memory.h b/src/relay/op/memory/memory.h index 58a21156a63c..6e184507bad5 100644 --- a/src/relay/op/memory/memory.h +++ b/src/relay/op/memory/memory.h @@ -22,8 +22,10 @@ * \brief Operators for memory related operations in Relay. */ -#ifndef TVM_RELAY_OP_MEMORY_H_ -#define TVM_RELAY_OP_MEMORY_H_ +#ifndef TVM_RELAY_OP_MEMORY_MEMORY_H_ +#define TVM_RELAY_OP_MEMORY_MEMORY_H_ + +#include #include "tvm/relay/expr.h" @@ -41,4 +43,4 @@ std::vector FlattenTupleType(const Type& type); } // namespace relay } // namespace tvm -#endif // TVM_RELAY_OP_MEMORY_H_ +#endif // TVM_RELAY_OP_MEMORY_MEMORY_H_ diff --git a/src/relay/op/tensor/tensor.h b/src/relay/op/tensor/tensor.h index 999686f6cccd..da7463db763f 100644 --- a/src/relay/op/tensor/tensor.h +++ b/src/relay/op/tensor/tensor.h @@ -21,8 +21,8 @@ * \file tensor.h * \brief Declarations for tensor operations. */ -#ifndef TVM_RELAY_OP_TENSOR_H_ -#define TVM_RELAY_OP_TENSOR_H_ +#ifndef TVM_RELAY_OP_TENSOR_TENSOR_H_ +#define TVM_RELAY_OP_TENSOR_TENSOR_H_ #include "tvm/relay/expr.h" @@ -34,4 +34,4 @@ Expr Prod(Expr data, Array axis, bool keepdims, bool exclude); } // namespace relay } // namespace tvm -#endif // TVM_RELAY_OP_TENSOR_H_ +#endif // TVM_RELAY_OP_TENSOR_TENSOR_H_ diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index e1f44cb51b08..a74a259a114f 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -32,6 +32,8 @@ #include #include +#include + #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" diff --git a/src/relay/op/vm/vm.h b/src/relay/op/vm/vm.h index 90e7ef6f8c29..cd98ee6d8cd6 100644 --- a/src/relay/op/vm/vm.h +++ b/src/relay/op/vm/vm.h @@ -21,8 +21,8 @@ * \file src/relay/op/vm/vm.h * \brief Dialect operators for Relay VM. */ -#ifndef TVM_RELAY_OP_VM_H_ -#define TVM_RELAY_OP_VM_H_ +#ifndef TVM_RELAY_OP_VM_VM_H_ +#define TVM_RELAY_OP_VM_VM_H_ #include "tvm/relay/expr.h" @@ -31,9 +31,9 @@ namespace relay { Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs); Expr ShapeFunc(Expr func, Expr inputs, Expr outputs, Array is_input); - Expr ShapeOf(Expr expr); - Expr ReshapeTensor(Expr data, Expr shape, Array newshape); +Expr ShapeOf(Expr expr); +Expr ReshapeTensor(Expr data, Expr shape, Array newshape); } // namespace relay } // namespace tvm -#endif // TVM_RELAY_OP_VM_H_ +#endif // TVM_RELAY_OP_VM_VM_H_ diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 481fd0477630..1654bf31c695 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -243,7 +243,7 @@ class DialectRewriter : public ExprMutator { Expr ComputeStorageInRelay(const Expr& shape, const TensorType& type) const { auto dtype = DataType(type->dtype); - Expr els = Prod(shape, {}, false, false); + Expr els = Prod(shape, Array(nullptr), false, false); Expr num = MakeConstantScalar(DataType::Int(64), dtype.bits() * dtype.lanes()); Expr add = Add(num, MakeConstantScalar(DataType::Int(64), 7)); Expr div = MakeConstantScalar(DataType::Int(64), 8); diff --git a/tests/python/frontend/pytorch/test_object_detection.py b/tests/python/frontend/pytorch/test_object_detection.py index a404a88393bc..2a24e442967a 100644 --- a/tests/python/frontend/pytorch/test_object_detection.py +++ b/tests/python/frontend/pytorch/test_object_detection.py @@ -34,7 +34,7 @@ import torch import torchvision -in_size = 300 +in_size = 150 def process_image(img): @@ -162,3 +162,8 @@ def compile_and_run_vm(mod, params, data_np, target): # Results should be equivalent after rewriting for res1, res2 in zip(tvm_res, tvm_res_after_rewrite): tvm.testing.assert_allclose(res1.asnumpy(), res2.asnumpy()) + + print("ok") + + +test_detection_models() From 9f462d731576deae716a093df4d5ff2ef6df8d20 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Sat, 6 Feb 2021 09:11:58 +0900 Subject: [PATCH 8/9] revert test change --- tests/python/frontend/pytorch/test_object_detection.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/python/frontend/pytorch/test_object_detection.py b/tests/python/frontend/pytorch/test_object_detection.py index 2a24e442967a..a404a88393bc 100644 --- a/tests/python/frontend/pytorch/test_object_detection.py +++ b/tests/python/frontend/pytorch/test_object_detection.py @@ -34,7 +34,7 @@ import torch import torchvision -in_size = 150 +in_size = 300 def process_image(img): @@ -162,8 +162,3 @@ def compile_and_run_vm(mod, params, data_np, target): # Results should be equivalent after rewriting for res1, res2 in zip(tvm_res, tvm_res_after_rewrite): tvm.testing.assert_allclose(res1.asnumpy(), res2.asnumpy()) - - print("ok") - - -test_detection_models() From 9877ddf3a08e81461e59484f5bd811012c70e20f Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Sat, 6 Feb 2021 09:19:48 +0900 Subject: [PATCH 9/9] remove tensor.h and declare Prod in pattern_utils.h --- src/relay/op/tensor/reduce.cc | 4 --- src/relay/op/tensor/tensor.h | 37 ---------------------------- src/relay/op/vm/vm.h | 1 + src/relay/transforms/memory_alloc.cc | 1 - src/relay/transforms/pattern_utils.h | 4 +++ 5 files changed, 5 insertions(+), 42 deletions(-) delete mode 100644 src/relay/op/tensor/tensor.h diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index c8b35b752da1..4fa8aca4f3a9 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -475,10 +475,6 @@ Array ProdCompute(const Attrs& attrs, const Array& input return ReduceCompute(attrs, inputs, out_type, topi::prod); } -Expr Prod(Expr data, Array axis, bool keepdims, bool exclude) { - return MakeReduce(data, axis, keepdims, exclude, "prod"); -} - TVM_REGISTER_GLOBAL("relay.op._make.prod").set_body_typed(Prod); RELAY_REGISTER_OP("prod") diff --git a/src/relay/op/tensor/tensor.h b/src/relay/op/tensor/tensor.h deleted file mode 100644 index da7463db763f..000000000000 --- a/src/relay/op/tensor/tensor.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file tensor.h - * \brief Declarations for tensor operations. - */ -#ifndef TVM_RELAY_OP_TENSOR_TENSOR_H_ -#define TVM_RELAY_OP_TENSOR_TENSOR_H_ - -#include "tvm/relay/expr.h" - -namespace tvm { -namespace relay { - -Expr Prod(Expr data, Array axis, bool keepdims, bool exclude); - -} // namespace relay -} // namespace tvm - -#endif // TVM_RELAY_OP_TENSOR_TENSOR_H_ diff --git a/src/relay/op/vm/vm.h b/src/relay/op/vm/vm.h index cd98ee6d8cd6..802c8100125a 100644 --- a/src/relay/op/vm/vm.h +++ b/src/relay/op/vm/vm.h @@ -33,6 +33,7 @@ Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs); Expr ShapeFunc(Expr func, Expr inputs, Expr outputs, Array is_input); Expr ShapeOf(Expr expr); Expr ReshapeTensor(Expr data, Expr shape, Array newshape); + } // namespace relay } // namespace tvm diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 1654bf31c695..b8c87909a025 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -42,7 +42,6 @@ #include "../backend/compile_engine.h" #include "../op/memory/memory.h" -#include "../op/tensor/tensor.h" #include "../op/vm/vm.h" #include "let_list.h" #include "pattern_utils.h" diff --git a/src/relay/transforms/pattern_utils.h b/src/relay/transforms/pattern_utils.h index 8ef86e088193..bc0fcc9f2988 100644 --- a/src/relay/transforms/pattern_utils.h +++ b/src/relay/transforms/pattern_utils.h @@ -644,6 +644,10 @@ static inline Expr Sum(Expr data, Array axis, bool keepdims, bool exclu return MakeReduce(data, axis, keepdims, exclude, "sum"); } +static inline Expr Prod(Expr data, Array axis, bool keepdims, bool exclude) { + return MakeReduce(data, axis, keepdims, exclude, "prod"); +} + static inline Expr Reshape(Expr data, Array newshape) { return MakeReshape(data, newshape); }