From ec1911f70393d95dade4cb381d5a9c513d4aebb2 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 5 Feb 2021 08:46:28 +0000 Subject: [PATCH 1/7] Dynamic Reshape --- python/tvm/relay/op/contrib/tensorrt.py | 2 +- tests/python/contrib/test_tensorrt.py | 34 +++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/op/contrib/tensorrt.py b/python/tvm/relay/op/contrib/tensorrt.py index db9684d02ac9..34afb89df1d3 100644 --- a/python/tvm/relay/op/contrib/tensorrt.py +++ b/python/tvm/relay/op/contrib/tensorrt.py @@ -635,7 +635,7 @@ def reshape_annotate_fn(expr): # pylint: disable=unused-variable if dynamic_reshape: # Make sure that the batch dim is unmodified. if int(new_shape[0]) < 0: - for shape_val, new_shape_val in enumerate(shape[1:], new_shape[1:]): + for shape_val, new_shape_val in zip(shape[1:], new_shape[1:]): if not ( isinstance(shape_val, int) and isinstance(new_shape_val, int) diff --git a/tests/python/contrib/test_tensorrt.py b/tests/python/contrib/test_tensorrt.py index bd8d92eedb4c..a66a5e484d75 100644 --- a/tests/python/contrib/test_tensorrt.py +++ b/tests/python/contrib/test_tensorrt.py @@ -631,6 +631,40 @@ def get_graph(x_shape, new_shape): run_and_verify_func(get_graph((1, 1, 2, 3), (1, 6))) +def test_dynamic_reshape(): + if skip_codegen_test(): + return + + def test_run(batches_to_test, x_shape, new_shape): + x_data = np.ones([max(batches_to_test)] + list(x_shape)[1:]).astype("float32") + result_arr = [{} for _ in range(len(batches_to_test))] + for use_trt in [True]: + x = relay.var("x", shape=x_shape, dtype="float32") + out = relay.reshape(x, new_shape) + f = relay.Function([x], out) + mod = tvm.IRModule() + mod["main"] = f + if use_trt: + mod, _ = tensorrt.partition_for_tensorrt(mod, params={}) + print(mod) + if not skip_runtime_test(): + with relay.build_config(opt_level=3): + relay_exec = relay.create_executor("vm", mod=mod, ctx=tvm.cpu(0), target="llvm") + + for i, batch_size in enumerate(batches_to_test): + result_arr[i][use_trt] = relay_exec.evaluate()(x_data[:batch_size, ...]) + print(x_data[:batch_size, ...].shape, result_arr[i][use_trt].shape) + + if not skip_runtime_test(): + for i in range(len(batches_to_test)): + assert_result_dict_holds(result_arr[i]) + + batches_to_test = [1, 1, 0, 2, 3, 0, 1, 3, 2] + x_shape = (relay.Any(), 3, 2, 3) + new_shape = (-1, 1, 2, 3) + test_run(batches_to_test, x_shape, new_shape) + + def test_transpose(): def get_graph(x_shape, order): x = relay.var("x", shape=(x_shape), dtype="float32") From d9d0408a885db2644b6a253b278669dc2f28b789 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 5 Feb 2021 19:03:47 +0000 Subject: [PATCH 2/7] Changes --- python/tvm/relay/op/contrib/tensorrt.py | 8 ++++---- tests/python/contrib/test_tensorrt.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/python/tvm/relay/op/contrib/tensorrt.py b/python/tvm/relay/op/contrib/tensorrt.py index 34afb89df1d3..c659ee80cc8e 100644 --- a/python/tvm/relay/op/contrib/tensorrt.py +++ b/python/tvm/relay/op/contrib/tensorrt.py @@ -637,15 +637,15 @@ def reshape_annotate_fn(expr): # pylint: disable=unused-variable if int(new_shape[0]) < 0: for shape_val, new_shape_val in zip(shape[1:], new_shape[1:]): if not ( - isinstance(shape_val, int) - and isinstance(new_shape_val, int) + isinstance(shape_val, (int, tvm.tir.expr.IntImm)) + and isinstance(new_shape_val, (int, tvm.tir.expr.IntImm)) and int(shape_val) == int(new_shape_val) ): return False elif int(new_shape[0]) > 0: if not ( - isinstance(shape[0], int) - and isinstance(new_shape[0], int) + isinstance(shape[0], (int, tvm.tir.expr.IntImm)) + and isinstance(new_shape[0], (int, tvm.tir.expr.IntImm)) and int(shape[0]) == int(new_shape[0]) ): return False diff --git a/tests/python/contrib/test_tensorrt.py b/tests/python/contrib/test_tensorrt.py index a66a5e484d75..7bb7c4c587f0 100644 --- a/tests/python/contrib/test_tensorrt.py +++ b/tests/python/contrib/test_tensorrt.py @@ -646,14 +646,13 @@ def test_run(batches_to_test, x_shape, new_shape): mod["main"] = f if use_trt: mod, _ = tensorrt.partition_for_tensorrt(mod, params={}) - print(mod) if not skip_runtime_test(): with relay.build_config(opt_level=3): relay_exec = relay.create_executor("vm", mod=mod, ctx=tvm.cpu(0), target="llvm") for i, batch_size in enumerate(batches_to_test): result_arr[i][use_trt] = relay_exec.evaluate()(x_data[:batch_size, ...]) - print(x_data[:batch_size, ...].shape, result_arr[i][use_trt].shape) + # print(x_data[:batch_size, ...].shape, result_arr[i][use_trt].shape) if not skip_runtime_test(): for i in range(len(batches_to_test)): @@ -1265,4 +1264,5 @@ def get_maskrcnn_input(in_size: int) -> np.ndarray: if __name__ == "__main__": - pytest.main([__file__]) + test_dynamic_reshape() + # pytest.main([__file__]) From 714c184ff39cbbcd643bdeab3b6b61c810c7407a Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 5 Feb 2021 23:55:33 +0000 Subject: [PATCH 3/7] Add test cases --- python/tvm/relay/op/contrib/tensorrt.py | 3 +- tests/python/contrib/test_tensorrt.py | 90 ++++++++++++++++++++++--- 2 files changed, 80 insertions(+), 13 deletions(-) diff --git a/python/tvm/relay/op/contrib/tensorrt.py b/python/tvm/relay/op/contrib/tensorrt.py index c659ee80cc8e..afdea9712342 100644 --- a/python/tvm/relay/op/contrib/tensorrt.py +++ b/python/tvm/relay/op/contrib/tensorrt.py @@ -615,7 +615,6 @@ def layout_transform_annotate_fn(expr): # pylint: disable=unused-variable @_register_external_dynamic_check_func("reshape") def reshape_annotate_fn(expr): # pylint: disable=unused-variable """Check if reshape is supported by TensorRT.""" - attrs, args = expr.attrs, expr.args if args[0].checked_type.dtype != "float32": logger.info("Only float32 inputs are supported for TensorRT.") @@ -629,7 +628,6 @@ def reshape_annotate_fn(expr): # pylint: disable=unused-variable if len(new_shape) == 0 or len(shape) == 0: logger.info("reshape: Can't reshape to or from scalar.") return False - dynamic_reshape = any([isinstance(x, tvm.tir.expr.Any) for x in shape]) if dynamic_reshape: @@ -643,6 +641,7 @@ def reshape_annotate_fn(expr): # pylint: disable=unused-variable ): return False elif int(new_shape[0]) > 0: + # Currently we only allow dim[0] to be Any, so this branch will always be False if not ( isinstance(shape[0], (int, tvm.tir.expr.IntImm)) and isinstance(new_shape[0], (int, tvm.tir.expr.IntImm)) diff --git a/tests/python/contrib/test_tensorrt.py b/tests/python/contrib/test_tensorrt.py index 7bb7c4c587f0..9f30c0d259e9 100644 --- a/tests/python/contrib/test_tensorrt.py +++ b/tests/python/contrib/test_tensorrt.py @@ -27,6 +27,7 @@ from tvm.contrib import graph_runtime, utils from tvm.runtime.vm import VirtualMachine from tvm.relay import Any, GlobalVar, transform +from tvm.relay.expr_functor import ExprVisitor from typing import Dict, Tuple, Union from tvm.contrib.download import download from tvm.relay.op.contrib import tensorrt @@ -631,37 +632,104 @@ def get_graph(x_shape, new_shape): run_and_verify_func(get_graph((1, 1, 2, 3), (1, 6))) +class AreOpsOnGraph(ExprVisitor): + """ + Visits the Graph recursively and checks if it contains ops in the op_list + """ + + def __init__(self, op_list): + ExprVisitor.__init__(self) + self.op_list = op_list + self.on_graph = False + + def visit_call(self, call): + if isinstance(call.op, tvm.tir.op.Op): + if str(call.op) in self.op_list: + self.on_graph = True + + return super().visit_call(call) + + def are_ops_on_graph(self, subgraph) -> bool: + """ + This function recursively visits the graph and checks if op_list ops are ongraph" + """ + self.visit(subgraph) + return self.on_graph + + +def are_ops_on_trt(mod, op_list): + for subgraph in mod.get_global_vars(): + name = subgraph.name_hint + op_on_trt = False + op_on_tvm = True + if name == "main": + op_on_tvm = AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) + elif mod[name].attrs and mod[name].attrs["Compiler"] == "tensorrt": + op_on_trt = AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) + elif mod[name].attrs and mod[name].attrs["Compiler"] != "tensorrt": + op_on_tvm &= AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) + + if not op_on_trt and op_on_tvm: + return False + + return True + + def test_dynamic_reshape(): if skip_codegen_test(): return - def test_run(batches_to_test, x_shape, new_shape): - x_data = np.ones([max(batches_to_test)] + list(x_shape)[1:]).astype("float32") - result_arr = [{} for _ in range(len(batches_to_test))] - for use_trt in [True]: + def test_run(x_data_list, x_shape, new_shape, should_offload_to_trt): + result_arr = [{} for _ in range(len(x_data_list))] + for use_trt in [True, False]: x = relay.var("x", shape=x_shape, dtype="float32") out = relay.reshape(x, new_shape) f = relay.Function([x], out) mod = tvm.IRModule() mod["main"] = f if use_trt: - mod, _ = tensorrt.partition_for_tensorrt(mod, params={}) + mod, _ = tensorrt.partition_for_tensorrt( + mod, params={}, remove_no_mac_subgraphs=False + ) + assert are_ops_on_trt(mod, op_list=["reshape"]) == should_offload_to_trt if not skip_runtime_test(): with relay.build_config(opt_level=3): relay_exec = relay.create_executor("vm", mod=mod, ctx=tvm.cpu(0), target="llvm") - for i, batch_size in enumerate(batches_to_test): - result_arr[i][use_trt] = relay_exec.evaluate()(x_data[:batch_size, ...]) - # print(x_data[:batch_size, ...].shape, result_arr[i][use_trt].shape) + for i, x_data in enumerate(x_data_list): + result_arr[i][use_trt] = relay_exec.evaluate()(x_data) if not skip_runtime_test(): - for i in range(len(batches_to_test)): + for i in range(len(x_data_list)): assert_result_dict_holds(result_arr[i]) - batches_to_test = [1, 1, 0, 2, 3, 0, 1, 3, 2] + dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2] + x_shape = (relay.Any(), 3, 2, 3) + x_data_list = [ + np.ones([dim_value] + list(x_shape)[1:]).astype("float32") for dim_value in dim_values + ] + new_shape = (-1, 3, 2, 3) + should_offload_to_trt = True + test_run(x_data_list, x_shape, new_shape, should_offload_to_trt) + + dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2] x_shape = (relay.Any(), 3, 2, 3) + x_data_list = [ + np.ones([dim_value] + list(x_shape)[1:]).astype("float32") for dim_value in dim_values + ] new_shape = (-1, 1, 2, 3) - test_run(batches_to_test, x_shape, new_shape) + should_offload_to_trt = False + test_run(x_data_list, x_shape, new_shape, should_offload_to_trt) + + dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2] + x_shape = (1, relay.Any(), 2, 3) + x_data_list = [ + np.ones(list(x_shape[:1]) + [dim_value] + list(x_shape)[2:]).astype("float32") + for dim_value in dim_values + ] + new_shape = (1, -1, 2, 3) + should_offload_to_trt = False + test_run(x_data_list, x_shape, new_shape, should_offload_to_trt) def test_transpose(): From 4f7a14fbd24caa06c073e05defa6c80848f8d78d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 5 Feb 2021 23:56:24 +0000 Subject: [PATCH 4/7] Add test cases --- tests/python/contrib/test_tensorrt.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/python/contrib/test_tensorrt.py b/tests/python/contrib/test_tensorrt.py index 9f30c0d259e9..a4a288c237e2 100644 --- a/tests/python/contrib/test_tensorrt.py +++ b/tests/python/contrib/test_tensorrt.py @@ -1332,5 +1332,4 @@ def get_maskrcnn_input(in_size: int) -> np.ndarray: if __name__ == "__main__": - test_dynamic_reshape() - # pytest.main([__file__]) + pytest.main([__file__]) From 22da405cefea7f87684a881569a15062d03786c2 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 8 Feb 2021 23:37:01 +0000 Subject: [PATCH 5/7] PR COmments --- tests/python/contrib/test_tensorrt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/contrib/test_tensorrt.py b/tests/python/contrib/test_tensorrt.py index a4a288c237e2..7ddc4e762cfd 100644 --- a/tests/python/contrib/test_tensorrt.py +++ b/tests/python/contrib/test_tensorrt.py @@ -666,10 +666,10 @@ def are_ops_on_trt(mod, op_list): op_on_tvm = AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) elif mod[name].attrs and mod[name].attrs["Compiler"] == "tensorrt": op_on_trt = AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) - elif mod[name].attrs and mod[name].attrs["Compiler"] != "tensorrt": + else: op_on_tvm &= AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) - if not op_on_trt and op_on_tvm: + if not op_on_trt or op_on_tvm: return False return True From 9d9ccd2cf998644f5bcf762f5d54d7ac9950a307 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 9 Feb 2021 18:59:30 +0000 Subject: [PATCH 6/7] CI Error From 8381659c8282ea345dc646168f9df8e573830de3 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 9 Feb 2021 20:56:28 +0000 Subject: [PATCH 7/7] EmptyCommitCIError