diff --git a/src/relay/backend/vm/compiler.cc b/src/relay/backend/vm/compiler.cc index 832cc0ee3891..ad23e132c486 100644 --- a/src/relay/backend/vm/compiler.cc +++ b/src/relay/backend/vm/compiler.cc @@ -978,6 +978,9 @@ void VMCompiler::Lower(IRModule mod, const TargetsMap& targets, const tvm::Targe transform::Sequential MemoryOpt(tvm::Target host_target, TargetsMap targets) { Array pass_seqs; + // Remove unused functions + Array entry_functions{"main"}; + pass_seqs.push_back(transform::RemoveUnusedFunctions(entry_functions)); // Manifest the allocations. pass_seqs.push_back(transform::ManifestAlloc(host_target, targets)); diff --git a/tests/python/relay/test_vm.py b/tests/python/relay/test_vm.py index 8f518695e6b4..9f861a2e7b54 100644 --- a/tests/python/relay/test_vm.py +++ b/tests/python/relay/test_vm.py @@ -29,6 +29,7 @@ from tvm.contrib import utils from tvm import rpc import tvm.testing +from tvm.relay.transform import InferType def check_result(args, expected_result, mod=None): @@ -186,6 +187,31 @@ def test_multiple_ifs(): assert res == [1, 0] +@tvm.testing.uses_gpu +def test_unused_function(): + cond = relay.const(True) + mod = tvm.IRModule() + then_name = relay.GlobalVar("times_2") + # define unused function + else_name = relay.GlobalVar("times_3") + t1 = relay.TensorType((2, 2), dtype="float32") + x1 = relay.var("x1", t1, dtype="float32") + x2 = relay.var("x2", t1, dtype="float32") + f2 = relay.multiply(x1, relay.const(2.0)) + f3 = relay.multiply(x2, relay.const(3.0)) + mod[then_name] = relay.Function([x1], f2) + mod[else_name] = relay.Function([x2], f3) + mod = InferType()(mod) + x3 = relay.var("x3", t1, dtype="float32") + # put unused function in else branch + f = relay.If(cond, then_name(x3), else_name(x3)) + mod["main"] = relay.Function([x3], f) + x_data = np.random.rand(2, 2).astype("float32") + y_data = x_data * 2 + + check_result([x_data], y_data, mod=mod) + + @tvm.testing.uses_gpu def test_simple_call(): mod = tvm.IRModule({})