Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions python/tvm/relax/op/builtin/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,21 @@ def alloc_tensor(
runtime_device_index = PrimValue(runtime_device_index)

return _ffi_api.alloc_tensor(shape, dtype, runtime_device_index) # type: ignore


def stop_lift_params(x: Expr) -> Expr:
"""
An indicator that the consumers of input tensor should not be
lifted to transform_params function

Parameters
----------
x: relax.Expr
The input data

Returns
-------
result : relax.Expr
The result tensor that is the same as input tensor
"""
return _ffi_api.stop_lift_params(x) # type: ignore
2 changes: 2 additions & 0 deletions python/tvm/script/ir_builder/relax/ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@
zeros_like,
nn,
)
from tvm.relax.op.builtin import stop_lift_params
from tvm.relax.struct_info import StructInfo
from tvm.relax.utils import args_converter
from tvm.runtime import Object as tvm_Object
Expand Down Expand Up @@ -634,6 +635,7 @@ def dtype(value: Union[py_str, DataType]) -> Expr:
"square",
"squeeze",
"sqrt",
"stop_lift_params",
"str",
"strided_slice",
"subtract",
Expand Down
17 changes: 17 additions & 0 deletions src/relax/op/op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -513,5 +513,22 @@ Expr MakeCallTIRDyn(Expr func, Tuple args) {

TVM_REGISTER_GLOBAL("relax.op.vm.call_tir_dyn").set_body_typed(MakeCallTIRDyn);

// builtin stop_lift_params
StructInfo InferStructInfoStopLiftParams(const Call& call, const BlockBuilder& ctx) {
return InferStructInfoUnaryArith<false>(call, ctx);
}

RELAY_REGISTER_OP("relax.builtin.stop_lift_params")
.set_num_inputs(1)
.add_argument("x", "Expr", "The input data")
.set_attr<FInferStructInfo>("FInferStructInfo", InferStructInfoStopLiftParams);

Expr MakeStopLiftParams(Expr x) {
static const Op& op = Op::Get("relax.builtin.stop_lift_params");
return Call(op, {x}, Attrs(), {});
}

TVM_REGISTER_GLOBAL("relax.op.builtin.stop_lift_params").set_body_typed(MakeStopLiftParams);

} // namespace relax
} // namespace tvm
13 changes: 13 additions & 0 deletions src/relax/transform/lift_transform_params.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,12 @@ class LiftTransformParamsPlanner : public ExprVisitor {
if (!is_in_dataflow_block_) {
can_lift = false;
}
if (const auto* call = binding->value.as<CallNode>()) {
static const Op& stop_lift_params_op = Op::Get("relax.builtin.stop_lift_params");
if (call->op.same_as(stop_lift_params_op)) {
can_lift = false;
}
}

PostOrderVisit(binding->value, [&](const ObjectRef& obj) {
if (const VarNode* var = obj.as<VarNode>()) {
Expand Down Expand Up @@ -268,6 +274,13 @@ class TransformParamsLifter : public ExprMutator {
if (lift_plan_.lifted_bindings.count(binding->var)) {
return;
}
if (const auto* call = binding->value.as<CallNode>()) {
static const Op& stop_lift_params_op = Op::Get("relax.builtin.stop_lift_params");
if (call->op.same_as(stop_lift_params_op)) {
var_remap_[binding->var->vid] = Downcast<Var>(VisitExpr(call->args[0]));
return;
}
}
ExprMutator::VisitBinding_(binding);
}

Expand Down
8 changes: 8 additions & 0 deletions tests/python/relax/test_op_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,5 +103,13 @@ def test_vm_alloc_tensor():
tvm.ir.assert_structural_equal(alloc.struct_info, R.Tensor([4, 5], "float32"))


def test_builtin_stop_lift_params():
bb = rx.BlockBuilder()
x = rx.Var("x", rx.TensorStructInfo(shape=[4, 5], dtype="float32"))
x1 = rx.op.builtin.stop_lift_params(x)
x1 = bb.normalize(x1)
tvm.ir.assert_structural_equal(x1.struct_info, R.Tensor([4, 5], "float32"))


if __name__ == "__main__":
tvm.testing.main()
48 changes: 48 additions & 0 deletions tests/python/relax/test_transform_lift_transform_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import tvm.testing
from tvm import relax
from tvm.script import relax as R, tir as T
from tvm.script import ir as I
import numpy as np
import tvm.topi.testing

Expand Down Expand Up @@ -392,5 +393,52 @@ def func3(
tvm.ir.assert_structural_equal(after, Expected)


def test_stop_lifting():
@tvm.script.ir_module
class Before:
@R.function
def func1(
x: R.Tensor((256, 256), "float32"),
w1: R.Tensor((256, 256), "float32"),
) -> R.Tensor((256, 256), "float32"):
R.func_attr({"num_input": 1})
with R.dataflow():
w1_t = R.permute_dims(w1, [1, 0])
w1_t1 = R.builtin.stop_lift_params(w1_t)
w1_add = R.add(w1_t1, R.const(1, "float32"))
y = R.matmul(x, w1_add)
R.output(y)
return y

@I.ir_module
class Expected:
@R.function
def func1(
x: R.Tensor((256, 256), dtype="float32"),
params: R.Tuple(R.Tensor((256, 256), dtype="float32")),
) -> R.Tensor((256, 256), dtype="float32"):
with R.dataflow():
lv: R.Tensor((256, 256), dtype="float32") = params[0]
w1_add: R.Tensor((256, 256), dtype="float32") = R.add(lv, R.const(1, "float32"))
y: R.Tensor((256, 256), dtype="float32") = R.matmul(x, w1_add, out_dtype="void")
R.output(y)
return y

@R.function
def func1_transform_params(
params: R.Tuple(R.Tensor((256, 256), dtype="float32"))
) -> R.Tuple(R.Tensor((256, 256), dtype="float32")):
with R.dataflow():
lv: R.Tensor((256, 256), dtype="float32") = params[0]
lv1: R.Tensor((256, 256), dtype="float32") = R.permute_dims(lv, axes=[1, 0])
gv: R.Tuple(R.Tensor((256, 256), dtype="float32")) = (lv1,)
R.output(gv)
return gv

mod = Before
after = relax.transform.LiftTransformParams()(mod)
tvm.ir.assert_structural_equal(after, Expected)


if __name__ == "__main__":
tvm.testing.main()
10 changes: 10 additions & 0 deletions tests/python/relax/test_tvmscript_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -1196,6 +1196,16 @@ def foo(x: R.Tensor(("m", "n"), dtype="float32")):
_check(foo)


def test_builtin_ops():
@R.function
def foo(x: R.Tensor(("m", "n"), dtype="float32")):
tensor = R.builtin.stop_lift_params(x)
gv = tensor
return gv

_check(foo)


def test_prim_value():
@R.function
def foo():
Expand Down