From 1ad12917187009bcbb5bf304a542c86eec85f0c5 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Fri, 30 Oct 2020 22:13:16 +0900 Subject: [PATCH 1/7] add test --- .../relay/test_backend_graph_runtime.py | 43 ++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 1bd551004ad7..916217c4fc64 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -184,10 +184,41 @@ def unit_numpy(X, W): tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) +def test_plan_memory_nested_tuples(): + # it is sufficient to cycle through two memories. + x = relay.var("x", shape=(10,)) + x1 = relay.exp(x) + x2 = relay.exp(x) + x3 = relay.exp(x) + out = relay.Tuple([x1, relay.Tuple([x2, x3])]) + func = relay.Function([x], out) + mod = tvm.IRModule.from_expr(func) + mod = relay.transform.InferType()(mod) + mod = relay.transform.FuseOps(0)(mod) + func = mod["main"] + mod = relay.transform.InferType()(mod) + smap = relay.backend._backend.GraphPlanMemory(func) + # storage_ids = set() + # device_types = set() + # for k, v in smap.items(): + # assert len(v) == 2 + # for x in v[0]: + # storage_ids.add(x.value) + # for x in v[1]: + # device_types.add(x.value) + + # # Current rule requires vars have unique storage id + # # because we don't do inplace, we will need another + # # two alternating temporary space. + # assert len(storage_ids) == 4 + # assert len(device_types) == 1 + + if __name__ == "__main__": - test_plan_memory() - test_with_params() - test_add_op_scalar() - test_add_op_tensor() - test_add_op_broadcast() - test_gru_like() + # test_plan_memory() + # test_with_params() + # test_add_op_scalar() + # test_add_op_tensor() + # test_add_op_broadcast() + # test_gru_like() + test_plan_memory_nested_tuples() From 531aff66339ca01b4b79c53f7ef414c88165f6cc Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Sat, 31 Oct 2020 10:02:10 +0900 Subject: [PATCH 2/7] test working --- src/relay/backend/graph_plan_memory.cc | 6 +-- .../relay/test_backend_graph_runtime.py | 44 ++++++++----------- 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc index bf58c8d5be41..58b54e097b44 100644 --- a/src/relay/backend/graph_plan_memory.cc +++ b/src/relay/backend/graph_plan_memory.cc @@ -82,9 +82,9 @@ class StorageAllocaBaseVisitor : public ExprVisitor { void VisitExpr_(const TupleNode* op) final { std::vector fields; for (Expr field : op->fields) { - auto tok = GetToken(field); - ICHECK_EQ(tok.size(), 1U); - fields.push_back(tok[0]); + VisitExpr(field); + auto tokens = GetToken(field); + fields.insert(fields.end(), tokens.begin(), tokens.end()); } token_map_[op] = fields; } diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 916217c4fc64..7802348b7f5e 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -184,34 +184,26 @@ def unit_numpy(X, W): tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) -def test_plan_memory_nested_tuples(): - # it is sufficient to cycle through two memories. +def test_compile_nested_tuples(): x = relay.var("x", shape=(10,)) - x1 = relay.exp(x) - x2 = relay.exp(x) - x3 = relay.exp(x) + x1 = x + relay.const(1.) + x2 = x1 + relay.const(1.) + x3 = x2 + relay.const(1.) out = relay.Tuple([x1, relay.Tuple([x2, x3])]) func = relay.Function([x], out) - mod = tvm.IRModule.from_expr(func) - mod = relay.transform.InferType()(mod) - mod = relay.transform.FuseOps(0)(mod) - func = mod["main"] - mod = relay.transform.InferType()(mod) - smap = relay.backend._backend.GraphPlanMemory(func) - # storage_ids = set() - # device_types = set() - # for k, v in smap.items(): - # assert len(v) == 2 - # for x in v[0]: - # storage_ids.add(x.value) - # for x in v[1]: - # device_types.add(x.value) - - # # Current rule requires vars have unique storage id - # # because we don't do inplace, we will need another - # # two alternating temporary space. - # assert len(storage_ids) == 4 - # assert len(device_types) == 1 + + graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm") + mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) + + x_data = np.random.uniform(size=(10,)).astype(np.float32) + mod.set_input(x=x_data) + mod.run() + + ref = x_data + 1 + for i in range(mod.get_num_outputs()): + out = mod.get_output(i).asnumpy() + tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) + ref = ref + 1 if __name__ == "__main__": @@ -221,4 +213,4 @@ def test_plan_memory_nested_tuples(): # test_add_op_tensor() # test_add_op_broadcast() # test_gru_like() - test_plan_memory_nested_tuples() + test_compile_nested_tuples() From 5dd5b9408d83e527c1b6d8c7978774ba8a7b5d74 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Sun, 1 Nov 2020 07:47:59 +0900 Subject: [PATCH 3/7] uncomment other tests --- .../relay/test_backend_graph_runtime.py | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 7802348b7f5e..a92f1df81e47 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -186,9 +186,9 @@ def unit_numpy(X, W): def test_compile_nested_tuples(): x = relay.var("x", shape=(10,)) - x1 = x + relay.const(1.) - x2 = x1 + relay.const(1.) - x3 = x2 + relay.const(1.) + x1 = x + relay.const(1.0) + x2 = x1 + relay.const(1.0) + x3 = x2 + relay.const(1.0) out = relay.Tuple([x1, relay.Tuple([x2, x3])]) func = relay.Function([x], out) @@ -199,6 +199,8 @@ def test_compile_nested_tuples(): mod.set_input(x=x_data) mod.run() + assert mod.get_num_outputs() == 3 + ref = x_data + 1 for i in range(mod.get_num_outputs()): out = mod.get_output(i).asnumpy() @@ -207,10 +209,10 @@ def test_compile_nested_tuples(): if __name__ == "__main__": - # test_plan_memory() - # test_with_params() - # test_add_op_scalar() - # test_add_op_tensor() - # test_add_op_broadcast() - # test_gru_like() + test_plan_memory() + test_with_params() + test_add_op_scalar() + test_add_op_tensor() + test_add_op_broadcast() + test_gru_like() test_compile_nested_tuples() From a071aae0cb9e63e6dae39db745ea7485b317fb89 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Sun, 1 Nov 2020 14:53:26 +0900 Subject: [PATCH 4/7] remove redundant visit --- src/relay/backend/graph_plan_memory.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc index 58b54e097b44..15173c2c79db 100644 --- a/src/relay/backend/graph_plan_memory.cc +++ b/src/relay/backend/graph_plan_memory.cc @@ -82,7 +82,6 @@ class StorageAllocaBaseVisitor : public ExprVisitor { void VisitExpr_(const TupleNode* op) final { std::vector fields; for (Expr field : op->fields) { - VisitExpr(field); auto tokens = GetToken(field); fields.insert(fields.end(), tokens.begin(), tokens.end()); } From 3cd95d3a9dc51adff5ee6a05c686481cda2c1faa Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 2 Nov 2020 12:29:53 +0900 Subject: [PATCH 5/7] test double nesting --- tests/python/relay/test_backend_graph_runtime.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index a92f1df81e47..3c42b7b4196f 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -189,7 +189,8 @@ def test_compile_nested_tuples(): x1 = x + relay.const(1.0) x2 = x1 + relay.const(1.0) x3 = x2 + relay.const(1.0) - out = relay.Tuple([x1, relay.Tuple([x2, x3])]) + x4 = x3 + relay.const(1.0) + out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])]) func = relay.Function([x], out) graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm") @@ -199,7 +200,7 @@ def test_compile_nested_tuples(): mod.set_input(x=x_data) mod.run() - assert mod.get_num_outputs() == 3 + assert mod.get_num_outputs() == 4 ref = x_data + 1 for i in range(mod.get_num_outputs()): From 07dc29ef9eb466ca2485a1e3734e74b0071d65fd Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Tue, 3 Nov 2020 06:28:54 +0900 Subject: [PATCH 6/7] support nested tuple in CallNode's return type --- src/relay/backend/compile_engine.cc | 4 +- src/relay/backend/graph_plan_memory.cc | 7 +++- src/relay/backend/graph_runtime_codegen.cc | 12 +++--- src/relay/op/memory/memory.cc | 1 + src/relay/op/memory/utils.h | 39 ++++++++++++++++++ .../relay/test_backend_graph_runtime.py | 41 +++++++++++++++++-- 6 files changed, 89 insertions(+), 15 deletions(-) create mode 100644 src/relay/op/memory/utils.h diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index 767cb6f644de..9494e32ab069 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -270,10 +270,8 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const TupleNode* op) final { Array fields; for (Expr field : op->fields) { - ICHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; Array res = VisitExpr(field); - ICHECK_EQ(res.size(), 1); - fields.push_back(res[0]); + fields.insert(fields.end(), res.begin(), res.end()); } return fields; } diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc index 15173c2c79db..b3f0182a4210 100644 --- a/src/relay/backend/graph_plan_memory.cc +++ b/src/relay/backend/graph_plan_memory.cc @@ -28,6 +28,7 @@ #include #include "../../support/arena.h" +#include "../op/memory/utils.h" namespace tvm { namespace relay { @@ -145,8 +146,10 @@ class StorageAllocaInit : protected StorageAllocaBaseVisitor { std::vector tokens; int device_type = node_device_map_.count(GetRef(op)) ? node_device_map_[GetRef(op)]->value : 0; - if (const auto* tuple_type = op->checked_type().as()) { - for (Type t : tuple_type->fields) { + const Type checked_type = op->checked_type(); + if (checked_type.as()) { + std::vector fields = FlattenTupleType(checked_type); + for (TensorType t : fields) { const auto* ttype = t.as(); ICHECK(ttype); StorageToken* token = arena_->make(); diff --git a/src/relay/backend/graph_runtime_codegen.cc b/src/relay/backend/graph_runtime_codegen.cc index e24d18de931c..44405b3848e1 100644 --- a/src/relay/backend/graph_runtime_codegen.cc +++ b/src/relay/backend/graph_runtime_codegen.cc @@ -32,6 +32,7 @@ #include #include +#include "../op/memory/utils.h" #include "compile_engine.h" #include "utils.h" @@ -273,14 +274,11 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator ret; ShapeVector shape; std::vector dtype; + std::vector fields = FlattenTupleType(checked_type); for (size_t i = 0; i < tuple_type->fields.size(); ++i) { - if (const auto* typ = tuple_type->fields[i].as()) { - ret.push_back(GraphNodeRef(node_id, i)); - shape.emplace_back(_ShapeToJSON(typ->shape)); - dtype.emplace_back(DType2String(typ->dtype)); - } else { - LOG(FATAL) << "type " << checked_type->GetTypeKey() << " not supported"; - } + ret.push_back(GraphNodeRef(node_id, i)); + shape.emplace_back(_ShapeToJSON(fields[i]->shape)); + dtype.emplace_back(DType2String(fields[i]->dtype)); } ICHECK_EQ(node->Type(), kGraphOpNode); auto op_nd = std::dynamic_pointer_cast(node); diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index dc5a1ebd3c73..d19e0bbb49f5 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -32,6 +32,7 @@ #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" +#include "utils.h" namespace tvm { namespace relay { diff --git a/src/relay/op/memory/utils.h b/src/relay/op/memory/utils.h new file mode 100644 index 000000000000..85ef2040b696 --- /dev/null +++ b/src/relay/op/memory/utils.h @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/relay/op/memory/memory.h + * \brief Utilities related to memory allocation + */ + +#ifndef TVM_RELAY_OP_MEMORY_UTILS_H_ +#define TVM_RELAY_OP_MEMORY_UTILS_H_ + +#include + +#include + +namespace tvm { +namespace relay { + +std::vector FlattenTupleType(const Type& type); + +} // namespace relay +} // namespace tvm +#endif // TVM_RELAY_OP_MEMORY_UTILS_H_ diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 3c42b7b4196f..051a87ba757f 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -20,8 +20,10 @@ from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.op import add +from tvm.relay import transform import tvm.testing + # @tq, @jr should we put this in testing ns? def check_rts(expr, args, expected_result, mod=None): """ @@ -184,7 +186,7 @@ def unit_numpy(X, W): tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) -def test_compile_nested_tuples(): +def test_return_nested_tuples(): x = relay.var("x", shape=(10,)) x1 = x + relay.const(1.0) x2 = x1 + relay.const(1.0) @@ -193,7 +195,9 @@ def test_compile_nested_tuples(): out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])]) func = relay.Function([x], out) - graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm") + with tvm.transform.PassContext(opt_level=3): + graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm") + mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) x_data = np.random.uniform(size=(10,)).astype(np.float32) @@ -209,6 +213,36 @@ def test_compile_nested_tuples(): ref = ref + 1 +def test_compile_nested_tuples_call_output(): + mod = tvm.IRModule() + x = relay.var("x", shape=(10, 10)) + a_split = relay.split(x, 2) + a_split_0 = relay.TupleGetItem(a_split.astuple(), 0) + a_split_1 = relay.TupleGetItem(a_split.astuple(), 1) + tuple_out = relay.Tuple((a_split_0, relay.Tuple([a_split_1]))) + func0 = relay.Function([x], tuple_out) + func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) + + data = relay.var("x", shape=(10, 10)) + call = relay.Call(func0, [data]) + mod["main"] = relay.Function([data], call) + + with tvm.transform.PassContext(opt_level=3): + graph, lib, _ = relay.build(mod, "llvm") + + mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) + x_data = np.random.uniform(size=(10, 10)).astype(np.float32) + mod.set_input(x=x_data) + mod.run() + + assert mod.get_num_outputs() == 2 + + ref = np.split(x_data, 2) + for i in range(mod.get_num_outputs()): + out = mod.get_output(i).asnumpy() + tvm.testing.assert_allclose(out, ref[i], rtol=1e-5, atol=1e-5) + + if __name__ == "__main__": test_plan_memory() test_with_params() @@ -216,4 +250,5 @@ def test_compile_nested_tuples(): test_add_op_tensor() test_add_op_broadcast() test_gru_like() - test_compile_nested_tuples() + test_return_nested_tuples() + test_compile_nested_tuples_call_output() From 2171fac9b841b13d3dcb01983b2f36c20dfc69bb Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Tue, 3 Nov 2020 09:00:46 +0900 Subject: [PATCH 7/7] Revert "support nested tuple in CallNode's return type" This reverts commit 66225eda33f37647cfc11ceb8caa2125dfe88d0d. --- src/relay/backend/compile_engine.cc | 4 +- src/relay/backend/graph_plan_memory.cc | 7 +--- src/relay/backend/graph_runtime_codegen.cc | 12 +++--- src/relay/op/memory/memory.cc | 1 - src/relay/op/memory/utils.h | 39 ------------------ .../relay/test_backend_graph_runtime.py | 41 ++----------------- 6 files changed, 15 insertions(+), 89 deletions(-) delete mode 100644 src/relay/op/memory/utils.h diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index 9494e32ab069..767cb6f644de 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -270,8 +270,10 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const TupleNode* op) final { Array fields; for (Expr field : op->fields) { + ICHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; Array res = VisitExpr(field); - fields.insert(fields.end(), res.begin(), res.end()); + ICHECK_EQ(res.size(), 1); + fields.push_back(res[0]); } return fields; } diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc index b3f0182a4210..15173c2c79db 100644 --- a/src/relay/backend/graph_plan_memory.cc +++ b/src/relay/backend/graph_plan_memory.cc @@ -28,7 +28,6 @@ #include #include "../../support/arena.h" -#include "../op/memory/utils.h" namespace tvm { namespace relay { @@ -146,10 +145,8 @@ class StorageAllocaInit : protected StorageAllocaBaseVisitor { std::vector tokens; int device_type = node_device_map_.count(GetRef(op)) ? node_device_map_[GetRef(op)]->value : 0; - const Type checked_type = op->checked_type(); - if (checked_type.as()) { - std::vector fields = FlattenTupleType(checked_type); - for (TensorType t : fields) { + if (const auto* tuple_type = op->checked_type().as()) { + for (Type t : tuple_type->fields) { const auto* ttype = t.as(); ICHECK(ttype); StorageToken* token = arena_->make(); diff --git a/src/relay/backend/graph_runtime_codegen.cc b/src/relay/backend/graph_runtime_codegen.cc index 44405b3848e1..e24d18de931c 100644 --- a/src/relay/backend/graph_runtime_codegen.cc +++ b/src/relay/backend/graph_runtime_codegen.cc @@ -32,7 +32,6 @@ #include #include -#include "../op/memory/utils.h" #include "compile_engine.h" #include "utils.h" @@ -274,11 +273,14 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator ret; ShapeVector shape; std::vector dtype; - std::vector fields = FlattenTupleType(checked_type); for (size_t i = 0; i < tuple_type->fields.size(); ++i) { - ret.push_back(GraphNodeRef(node_id, i)); - shape.emplace_back(_ShapeToJSON(fields[i]->shape)); - dtype.emplace_back(DType2String(fields[i]->dtype)); + if (const auto* typ = tuple_type->fields[i].as()) { + ret.push_back(GraphNodeRef(node_id, i)); + shape.emplace_back(_ShapeToJSON(typ->shape)); + dtype.emplace_back(DType2String(typ->dtype)); + } else { + LOG(FATAL) << "type " << checked_type->GetTypeKey() << " not supported"; + } } ICHECK_EQ(node->Type(), kGraphOpNode); auto op_nd = std::dynamic_pointer_cast(node); diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index d19e0bbb49f5..dc5a1ebd3c73 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -32,7 +32,6 @@ #include "../../transforms/infer_layout_utils.h" #include "../op_common.h" #include "../type_relations.h" -#include "utils.h" namespace tvm { namespace relay { diff --git a/src/relay/op/memory/utils.h b/src/relay/op/memory/utils.h deleted file mode 100644 index 85ef2040b696..000000000000 --- a/src/relay/op/memory/utils.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file src/relay/op/memory/memory.h - * \brief Utilities related to memory allocation - */ - -#ifndef TVM_RELAY_OP_MEMORY_UTILS_H_ -#define TVM_RELAY_OP_MEMORY_UTILS_H_ - -#include - -#include - -namespace tvm { -namespace relay { - -std::vector FlattenTupleType(const Type& type); - -} // namespace relay -} // namespace tvm -#endif // TVM_RELAY_OP_MEMORY_UTILS_H_ diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 051a87ba757f..3c42b7b4196f 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -20,10 +20,8 @@ from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.op import add -from tvm.relay import transform import tvm.testing - # @tq, @jr should we put this in testing ns? def check_rts(expr, args, expected_result, mod=None): """ @@ -186,7 +184,7 @@ def unit_numpy(X, W): tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) -def test_return_nested_tuples(): +def test_compile_nested_tuples(): x = relay.var("x", shape=(10,)) x1 = x + relay.const(1.0) x2 = x1 + relay.const(1.0) @@ -195,9 +193,7 @@ def test_return_nested_tuples(): out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])]) func = relay.Function([x], out) - with tvm.transform.PassContext(opt_level=3): - graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm") - + graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm") mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) x_data = np.random.uniform(size=(10,)).astype(np.float32) @@ -213,36 +209,6 @@ def test_return_nested_tuples(): ref = ref + 1 -def test_compile_nested_tuples_call_output(): - mod = tvm.IRModule() - x = relay.var("x", shape=(10, 10)) - a_split = relay.split(x, 2) - a_split_0 = relay.TupleGetItem(a_split.astuple(), 0) - a_split_1 = relay.TupleGetItem(a_split.astuple(), 1) - tuple_out = relay.Tuple((a_split_0, relay.Tuple([a_split_1]))) - func0 = relay.Function([x], tuple_out) - func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) - - data = relay.var("x", shape=(10, 10)) - call = relay.Call(func0, [data]) - mod["main"] = relay.Function([data], call) - - with tvm.transform.PassContext(opt_level=3): - graph, lib, _ = relay.build(mod, "llvm") - - mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) - x_data = np.random.uniform(size=(10, 10)).astype(np.float32) - mod.set_input(x=x_data) - mod.run() - - assert mod.get_num_outputs() == 2 - - ref = np.split(x_data, 2) - for i in range(mod.get_num_outputs()): - out = mod.get_output(i).asnumpy() - tvm.testing.assert_allclose(out, ref[i], rtol=1e-5, atol=1e-5) - - if __name__ == "__main__": test_plan_memory() test_with_params() @@ -250,5 +216,4 @@ def test_compile_nested_tuples_call_output(): test_add_op_tensor() test_add_op_broadcast() test_gru_like() - test_return_nested_tuples() - test_compile_nested_tuples_call_output() + test_compile_nested_tuples()