diff --git a/python/tvm/relax/block_builder.py b/python/tvm/relax/block_builder.py index 37866840bd68..330585599d08 100644 --- a/python/tvm/relax/block_builder.py +++ b/python/tvm/relax/block_builder.py @@ -35,12 +35,11 @@ class FunctionScope(object): """Auxiliary scope for function""" - def __init__(self, block_builder, name, params, attrs, is_pure): + def __init__(self, block_builder, name, params, attrs): self._bb = block_builder self._name = name self._params = params self._attrs = attrs - self._is_pure = is_pure # Blocks that have been collected within the function self._blocks = [] @@ -209,7 +208,6 @@ def function( name: str, params: Optional[Union[Var, Tuple, List[Var]]] = None, attrs: Optional[Dict[str, Object]] = None, - pure: bool = True, private: bool = False, ) -> FunctionScope: """Annotate a Relax function. @@ -227,9 +225,6 @@ def function( attrs : Dict[str, Object], optional The function attrs - pure : bool, optional - Whether the function is annotated as pure. - private : bool, optional Whether the function is annotated as private. If the function is private, it will not have a global symbol attribute. @@ -259,7 +254,7 @@ def function( if not private: attrs["global_symbol"] = name - return FunctionScope(self, name, params, attrs, is_pure=pure) + return FunctionScope(self, name, params, attrs) def testing_scope(self, def_vars: List[tir.Var]) -> TestingScope: """Start a scope for unit-testing purposes. @@ -645,7 +640,7 @@ def emit_func_output( # do not specify ret_struct_info and let constructor deduce # from seqe.struct_info - func = rx.Function(self._func._params, seqe, is_pure=self._func._is_pure) + func = rx.Function(self._func._params, seqe) for key, value in self._func._attrs.items(): func = func.with_attr(key, value) self.end_scope() diff --git a/python/tvm/relax/frontend/nn/modules.py b/python/tvm/relax/frontend/nn/modules.py index b61656a2e6bd..1579c5b512c5 100644 --- a/python/tvm/relax/frontend/nn/modules.py +++ b/python/tvm/relax/frontend/nn/modules.py @@ -19,7 +19,7 @@ from typing import List, Optional, Sequence, Union from tvm import relax as rx -from tvm import tir +from tvm import tir, ir from . import op from .core import Effect, Module, ModuleList, Parameter, Tensor, get_default_dtype @@ -599,12 +599,15 @@ def emit_init(self, name_hint: str, bb: rx.BlockBuilder): # pylint: disable=arg init_shape = rx.ShapeExpr([self.init_seq_len] + self.unit_shape) return [ bb.emit( - rx.op.call_pure_packed( - "vm.builtin.attention_kv_cache_create", - rx.op.zeros(init_shape, self.dtype), - init_shape, - rx.PrimValue(0), - sinfo_args=rx.ObjectStructInfo(), + rx.Call( + ir.Op.get("relax.call_pure_packed"), + args=[ + rx.extern("vm.builtin.attention_kv_cache_create"), + rx.op.zeros(init_shape, self.dtype), + init_shape, + rx.PrimValue(0), + ], + sinfo_args=[rx.ObjectStructInfo()], ), name_hint=name_hint, ) @@ -672,11 +675,14 @@ def view(self, seq_len: tir.Var) -> Tensor: shape = rx.ShapeExpr([seq_len] + self.unit_shape) return Tensor( _expr=rx.BlockBuilder.current().emit( - rx.op.call_pure_packed( - "vm.builtin.attention_kv_cache_view", - self.cache, - shape, - sinfo_args=rx.TensorStructInfo(shape, self.dtype), + rx.Call( + ir.Op.get("relax.call_pure_packed"), + args=[ + rx.extern("vm.builtin.attention_kv_cache_view"), + self.cache, + shape, + ], + sinfo_args=[rx.TensorStructInfo(shape, self.dtype)], ) ) ) @@ -696,12 +702,14 @@ def append(self, new_element: Tensor) -> None: f'but got "{new_element.dtype}"' ) self.cache = rx.BlockBuilder.current().emit( - rx.op.call_inplace_packed( - "vm.builtin.attention_kv_cache_append", - self.cache, - new_element._expr, - inplace_indices=[0], - sinfo_args=rx.ObjectStructInfo(), + rx.Call( + ir.Op.get("relax.call_pure_packed"), + args=[ + rx.extern("vm.builtin.attention_kv_cache_append"), + self.cache, + new_element._expr, + ], + sinfo_args=[rx.ObjectStructInfo()], ) ) diff --git a/python/tvm/script/ir_builder/ir/ir.py b/python/tvm/script/ir_builder/ir/ir.py index d35d73678b47..0d3523ec7dd7 100644 --- a/python/tvm/script/ir_builder/ir/ir.py +++ b/python/tvm/script/ir_builder/ir/ir.py @@ -43,7 +43,7 @@ def decl_function(func_name: str, func_signature: BaseFunc) -> GlobalVar: func_name : str The function unique name. - func_signature: BaseFunc + func_signature: Optional[BaseFunc] A Function w/o body, which used to specify the function signature (i.e. func params and func return type/shape). @@ -55,11 +55,7 @@ def decl_function(func_name: str, func_signature: BaseFunc) -> GlobalVar: gv : GlobalVar The corresponding GlobalVar. """ - if not isinstance(func_signature, BaseFunc): - raise ValueError( - "decl_function expects an instance of BaseFunc, " - f"but {func_signature} is of type {type(func_signature)}" - ) + return _ffi_api.DeclFunction( # type: ignore[attr-defined] # pylint: disable=no-member func_name, func_signature ) diff --git a/python/tvm/script/parser/core/entry.py b/python/tvm/script/parser/core/entry.py index 0c88cacf8a62..9a7430643cd8 100644 --- a/python/tvm/script/parser/core/entry.py +++ b/python/tvm/script/parser/core/entry.py @@ -18,20 +18,12 @@ import inspect from typing import Any, Dict, Union -from ....ir.module import IRModule from ...ir_builder import IRBuilder from . import doc from .diagnostics import Source from .error import ParserError from .parser import Parser -WELL_FORMED_ERROR_MESSAGE = ( - "Program is not well-formed. If this is deliberate, consider " - "setting check_well_formed in the top-level decorator to False " - "(e.g., @I.ir_module(check_well_formed=False) or " - "@R.function(check_well_formed=False))." -) - def _default_globals() -> Dict[str, Any]: import tvm # pylint: disable=import-outside-toplevel @@ -51,11 +43,7 @@ def scan_macro(program: Union[Any, str], extra_vars: Dict[str, Any] = None) -> A return source, closure_vars -def parse( - program: Union[doc.AST, Any, str], - extra_vars: Dict[str, Any] = None, - check_well_formed: bool = True, -) -> Any: +def parse(program: Union[doc.AST, Any, str], extra_vars: Dict[str, Any] = None) -> Any: """Register a method for a operand type, AST operator node and operand index. Parameters @@ -66,9 +54,6 @@ def parse( extra_vars : Dict[str, Any] The extra variable table for parsing. - check_well_formed : bool - Whether to check well-formedness after parsing. - Returns ------- func : Any @@ -92,25 +77,4 @@ def parse( parser.parse(extra_vars=extra_vars) except ParserError as err: parser.report_error(err.node, err.args[0]) - ret = builder.get() - # check well-formedness in both Relax and TIR - if check_well_formed: - # (C0415 = import-outside-toplevel. It is necessary here to avoid a circular dependency, - # since importing Relax imports a dependency on the parser) - from ....relax.analysis import well_formed as relax_well_formed # pylint: disable=C0415 - from ....tir.analysis import verify_well_formed as tir_well_formed # pylint: disable=C0415 - - check_ret = ret - if not isinstance(check_ret, IRModule): - check_ret = IRModule.from_expr(ret) - source_ast = source.as_ast() - if not relax_well_formed(check_ret): - parser.report_error(source_ast, err=WELL_FORMED_ERROR_MESSAGE) - try: - tir_well_formed(check_ret) - except Exception as err: # pylint: disable=broad-exception-caught - parser.report_error( - source_ast, - err=f"{WELL_FORMED_ERROR_MESSAGE}\n\nTraceback: {str(err)}", - ) - return ret + return builder.get() diff --git a/python/tvm/script/parser/ir/entry.py b/python/tvm/script/parser/ir/entry.py index f91c7701a2eb..5878a1ce55cc 100644 --- a/python/tvm/script/parser/ir/entry.py +++ b/python/tvm/script/parser/ir/entry.py @@ -17,17 +17,14 @@ """The entry point of TVM parser for ir module.""" import inspect -from typing import Optional, Type +from typing import Type from tvm.ir import IRModule from .._core import parse, utils -# this formulation allows us to support having @I.ir_module -# appear as a decorator by itself or to have optional arguments -# like @I.ir_module(check_well_formed=False) -def ir_module(mod: Optional[Type] = None, check_well_formed: bool = True) -> IRModule: +def ir_module(mod: Type) -> IRModule: """The parsing method for ir module, by using `@ir_module` as decorator. Parameters @@ -35,30 +32,17 @@ def ir_module(mod: Optional[Type] = None, check_well_formed: bool = True) -> IRM mod : Type The class to be parsed as ir module. - check_well_formed : bool - Whether to check well-formedness during parsing. - Returns ------- ir_module : IRModule The parsed ir module. """ + if not inspect.isclass(mod): + raise TypeError(f"Expect a class, but got: {mod}") - def decorator_wrapper(mod): - if not inspect.isclass(mod): - raise TypeError(f"Expect a class, but got: {mod}") - m = parse(mod, utils.inspect_class_capture(mod), check_well_formed=check_well_formed) - setattr(m, "__name__", mod.__name__) - return m - - if mod is not None: - # if there are no optional args given, this will directly invoke the wrapper - return decorator_wrapper(mod) - else: - # if there is a optional arg given, it returns the wrapper function - # as a new decorator and applies it - setattr(decorator_wrapper, "dispatch_token", "ir") - return decorator_wrapper + m = parse(mod, utils.inspect_class_capture(mod)) + setattr(m, "__name__", mod.__name__) + return m setattr(ir_module, "dispatch_token", "ir") diff --git a/python/tvm/script/parser/relax/entry.py b/python/tvm/script/parser/relax/entry.py index a3b391637cb4..a82cbeb16349 100644 --- a/python/tvm/script/parser/relax/entry.py +++ b/python/tvm/script/parser/relax/entry.py @@ -52,7 +52,7 @@ # appear as a decorator by itself or to have optional arguments # like @R.function(pure=False) def function( - f: Optional[FType] = None, pure: bool = True, private: bool = False, check_well_formed=True + f: Optional[FType] = None, pure: bool = True, private: bool = False ) -> Union[Function, FType]: # pylint: disable=unused-argument # (pure and private aren't used here, but are used later in parsing) @@ -66,7 +66,7 @@ def decorator_wrapper(f): raise TypeError(f"Expect a function, but got: {f}") if utils.is_defined_in_class(orig_stack, f): return f - return parse(f, utils.inspect_function_capture(f), check_well_formed=check_well_formed) + return parse(f, utils.inspect_function_capture(f)) if f is not None: # if there are no optional args given, this will directly invoke the wrapper diff --git a/python/tvm/script/parser/tir/entry.py b/python/tvm/script/parser/tir/entry.py index 79eb88dfc102..d2fb070aaab1 100644 --- a/python/tvm/script/parser/tir/entry.py +++ b/python/tvm/script/parser/tir/entry.py @@ -26,9 +26,7 @@ from ..core.parser import Parser, ScriptMacro -def prim_func( - func: Optional[Callable] = None, private: bool = False, check_well_formed=True -) -> Union[PrimFunc, Callable]: +def prim_func(func: Optional[Callable] = None, private: bool = False) -> Union[PrimFunc, Callable]: """The parsing method for tir prim func, by using `@prim_func` as decorator. Parameters @@ -62,7 +60,7 @@ def decorator_wrapper(func): raise TypeError(f"Expect a function, but got: {func}") if utils.is_defined_in_class(outer_stack, func): return func - f = parse(func, utils.inspect_function_capture(func), check_well_formed=check_well_formed) + f = parse(func, utils.inspect_function_capture(func)) setattr(f, "__name__", func.__name__) return f diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py index d0ceee4aa2a0..e1b1c654570a 100644 --- a/python/tvm/testing/utils.py +++ b/python/tvm/testing/utils.py @@ -527,6 +527,7 @@ def enabled_targets(): class Feature: + """A feature that may be required to run a test. Parameters @@ -1951,8 +1952,6 @@ def expected(A: T.Buffer(1, "int32")): """ - check_well_formed: bool = True - def __init_subclass__(cls): assert len([getattr(cls, name) for name in ["before", "Before"] if hasattr(cls, name)]) <= 1 assert ( @@ -1996,9 +1995,7 @@ def inner(self): func_dict[name] = method.with_attr("global_symbol", name) else: source_code = "@T.prim_func\n" + textwrap.dedent(inspect.getsource(method)) - prim_func = tvm.script.from_source( - source_code, check_well_formed=self.check_well_formed - ) + prim_func = tvm.script.from_source(source_code) func_dict[name] = prim_func.with_attr("global_symbol", name) return tvm.IRModule(func_dict) @@ -2007,7 +2004,7 @@ def inner(self): def inner(self): # pylint: disable=unused-argument source_code = "@T.prim_func\n" + textwrap.dedent(inspect.getsource(func)) - return tvm.script.from_source(source_code, check_well_formed=self.check_well_formed) + return tvm.script.from_source(source_code) return pytest.fixture(inner) diff --git a/python/tvm/tir/transform/transform.py b/python/tvm/tir/transform/transform.py index 9f7f92dbed74..c2022b918643 100644 --- a/python/tvm/tir/transform/transform.py +++ b/python/tvm/tir/transform/transform.py @@ -19,7 +19,7 @@ import enum -from typing import Any, Callable, Optional +from typing import Callable, Optional from . import _ffi_api from . import function_pass as _fpass @@ -323,7 +323,7 @@ def BF16ComputeLegalize(): return _ffi_api.BF16ComputeLegalize() # type: ignore -def FP8ComputeLegalize(target: Any, promote_dtype_str: str = "float32"): +def FP8ComputeLegalize(promote_dtype_str: str = "float32"): """Legalize fp8 compute Ops. Parameters @@ -331,15 +331,12 @@ def FP8ComputeLegalize(target: Any, promote_dtype_str: str = "float32"): promote_dtype : str The data type we promote fp8 to, options: float16/float32. - target : tvm.target.Target - The legalization target - Returns ------- fpass : tvm.transform.Pass The result pass """ - return _ffi_api.FP8ComputeLegalize(target, promote_dtype_str) # type: ignore + return _ffi_api.FP8ComputeLegalize(promote_dtype_str) # type: ignore def BF16StorageLegalize(): @@ -353,20 +350,15 @@ def BF16StorageLegalize(): return _ffi_api.BF16StorageLegalize() # type: ignore -def FP8StorageLegalize(target: Any): +def FP8StorageLegalize(): """Legalize fp8 storage types to u8. - Parameters - ---------- - target : tvm.target.Target - The legalization target - Returns ------- fpass : tvm.transform.Pass The result pass """ - return _ffi_api.FP8StorageLegalize(target) # type: ignore + return _ffi_api.FP8StorageLegalize() # type: ignore def CommonSubexprElimTIR(enable_cse_tir: bool = True, identify_equiv_terms: bool = False): diff --git a/src/driver/driver_api.cc b/src/driver/driver_api.cc index e3b4a5a6517c..33b4514e6b29 100644 --- a/src/driver/driver_api.cc +++ b/src/driver/driver_api.cc @@ -590,7 +590,6 @@ transform::Sequential MixedModulePassManager(IRModule mixed_mod, Target target) mixed_pass_list.push_back(tir::transform::ThreadSync("shared")); mixed_pass_list.push_back(tir::transform::ThreadSync("shared.dyn")); - mixed_pass_list.push_back(tir::transform::MergeSharedMemoryAllocations()); mixed_pass_list.push_back(tir::transform::ThreadSync("warp")); mixed_pass_list.push_back(tir::transform::InferFragment()); mixed_pass_list.push_back(tir::transform::LowerThreadAllreduce()); @@ -608,6 +607,9 @@ transform::Sequential MixedModulePassManager(IRModule mixed_mod, Target target) mixed_pass_list.push_back(tir::transform::AnnotateDeviceRegions()); mixed_pass_list.push_back(tir::transform::SplitHostDevice()); + // MergeSharedMemoryAllocations must be applied after SplitHostDevice + // because the merged allocation site is at the beginning of each device function + mixed_pass_list.push_back(tir::transform::MergeSharedMemoryAllocations()); bool unpacked_api = mixed_mod->GetAttr(tvm::attr::kExecutor) .value_or(relay::Executor::Create("graph", {})) diff --git a/src/tir/ir/data_type_rewriter.cc b/src/tir/ir/data_type_rewriter.cc index 3461597b8e0f..2d2c097be494 100644 --- a/src/tir/ir/data_type_rewriter.cc +++ b/src/tir/ir/data_type_rewriter.cc @@ -532,12 +532,6 @@ Stmt IndexDataTypeRewriter::VisitStmt_(const ForNode* op) { n->loop_var = new_loop_var; n->min = cast(new_loop_var.dtype(), min); n->extent = cast(new_loop_var.dtype(), extent); - if (op->thread_binding.defined()) { - auto old_thread_binding = op->thread_binding.value(); - auto* ptr = old_thread_binding.CopyOnWrite(); - ptr->var = old_thread_binding->var.copy_with_dtype(new_loop_var.dtype()); - n->thread_binding = std::move(Optional(std::move(old_thread_binding))); - } n->body = new_body; return std::move(new_for); } else { diff --git a/src/tir/transforms/default_gpu_schedule.cc b/src/tir/transforms/default_gpu_schedule.cc index 6d0542257309..6cf7f6e06743 100644 --- a/src/tir/transforms/default_gpu_schedule.cc +++ b/src/tir/transforms/default_gpu_schedule.cc @@ -113,8 +113,7 @@ bool IsScheduledOnGPU(const BaseFunc& func) { if (target.defined()) { int dev_type = target->GetTargetDeviceType(); - if (!(dev_type == kDLCUDA || dev_type == kDLMetal || dev_type == kDLROCM || - dev_type == kDLWebGPU)) { + if (dev_type != kDLCUDA) { return false; } } diff --git a/tests/python/arith/test_arith_domain_touched.py b/tests/python/arith/test_arith_domain_touched.py index e8d49316bdd6..1553aabd4e4c 100644 --- a/tests/python/arith/test_arith_domain_touched.py +++ b/tests/python/arith/test_arith_domain_touched.py @@ -72,14 +72,15 @@ def test_domain_touched_vector(): m = tvm.runtime.convert(128) @T.prim_func - def func(a: T.handle, b: T.handle, n: T.int32): + def func(a: T.handle, b: T.handle): + n = T.int32() A = T.match_buffer(a, (n * m,)) B = T.match_buffer(b, (n * m,)) for i in T.serial(n): A[i * m : (i + 1) * m : 1] = A[i * m : (i + 1) * m : 1] + B[i * m : (i + 1) * m : 1] - a, b = [func.buffer_map[var] for var in func.params[:2]] + a, b = [func.buffer_map[var] for var in func.params] assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, False)[0].extent.value == 128 assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, False)[0].extent.value == 128 diff --git a/tests/python/codegen/test_inject_ptx_ldg32.py b/tests/python/codegen/test_inject_ptx_ldg32.py index 4a6d4c366a61..8e8547c572d0 100644 --- a/tests/python/codegen/test_inject_ptx_ldg32.py +++ b/tests/python/codegen/test_inject_ptx_ldg32.py @@ -27,14 +27,13 @@ def vector_add(A: T.Buffer((16), "float32"), B: T.Buffer((32), "float32")) -> No tx = T.env_thread("threadIdx.x") T.launch_thread(bx, 1) T.launch_thread(tx, 32) - with T.block(): - A_local = T.alloc_buffer((32), "float32", scope="local") + A_local = T.Buffer((32), "float32", scope="local") - with T.block(): - T.reads(A[0:16]) - T.writes(A_local[0:32]) - A_local[tx] = T.if_then_else(tx % 2 == 0, A[tx // 2], T.float32(0), dtype="float32") - B[tx] = A_local[tx] + 1.0 + with T.block(): + T.reads(A[0:16]) + T.writes(A_local[0:32]) + A_local[tx] = T.if_then_else(tx % 2 == 0, A[tx // 2], T.float32(0), dtype="float32") + B[tx] = A_local[tx] + 1.0 @tvm.testing.requires_cuda diff --git a/tests/python/contrib/test_ethosu/test_copy_compute_reordering.py b/tests/python/contrib/test_ethosu/test_copy_compute_reordering.py index 6b9702f012ca..1a00e01b6031 100644 --- a/tests/python/contrib/test_ethosu/test_copy_compute_reordering.py +++ b/tests/python/contrib/test_ethosu/test_copy_compute_reordering.py @@ -22,9 +22,8 @@ from tvm.script import tir as T from tvm.relay.backend.contrib.ethosu.tir.passes import CopyComputeReordering -# Uninitialized vars used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class AllOperatorsWithWeights: @T.prim_func def main() -> None: @@ -71,9 +70,8 @@ def test_all_operators_with_weights_max_copy_movements_0(): def test_all_operators_with_weights_max_copy_movements_1(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -118,9 +116,8 @@ def main() -> None: def test_all_operators_with_weights_max_copy_movements_2(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -164,9 +161,8 @@ def main() -> None: tvm.ir.assert_structural_equal(test_mod, reference_mod, True) -# Uninitialized vars used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class AllOperatorsWithoutWeights: @T.prim_func def main() -> None: @@ -187,9 +183,8 @@ def test_all_operators_without_weights(max_copy_movements): tvm.ir.assert_structural_equal(test_mod, reference_mod, True) -# Uninitialized vars used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class OperatorsWithAndWithoutWeights: @T.prim_func def main() -> None: @@ -223,9 +218,8 @@ def test_operators_with_and_without_weights_max_copy_movements_0(): def test_operators_with_and_without_weights_max_copy_movements_1(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -257,9 +251,8 @@ def main() -> None: def test_operators_with_and_without_weights_max_copy_movements_2(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -290,9 +283,8 @@ def main() -> None: tvm.ir.assert_structural_equal(test_mod, reference_mod, True) -# Uninitialized vars used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class CopyToBufferWithLocalScope: @T.prim_func def main() -> None: @@ -332,9 +324,8 @@ def test_copy_to_buffer_with_local_scope_max_copy_movements_0(): @pytest.mark.parametrize("max_copy_movements", [1, 2]) def test_copy_to_buffer_with_local_scope_max_copy_movements_n(max_copy_movements): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -409,9 +400,8 @@ def abs(): def test_default_max_copy_movements(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -443,9 +433,8 @@ def main() -> None: def test_pass_context_option_max_copy_movements(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -480,9 +469,8 @@ def main() -> None: def test_reordering_based_on_cycles(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ModuleBefore: @T.prim_func def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208, "uint8"), placeholder_encoded_1: T.Buffer(112, "uint8"), placeholder_encoded_2: T.Buffer(96, "uint8"), placeholder_encoded_3: T.Buffer(112, "uint8"), ethosu_write: T.Buffer(43672, "int8")) -> None: @@ -530,8 +518,7 @@ def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208 T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_5[0], 0, 0, 0, T.float32(0.0057637207210063934), -128, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write[0], 0, 0, 0, T.float32(0.0057619437575340271), -128, "NHWC", 424, 4, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_3[0], 64, 0, placeholder_d_global_3[64], 48, 1, 2, 1, 2, "NONE", 0, 0, "TFL", "NONE", 14, 18, 8, dtype="handle")) - # Uninitialized vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ModuleAfter: @T.prim_func def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208, "uint8"), placeholder_encoded_1: T.Buffer(112, "uint8"), placeholder_encoded_2: T.Buffer(96, "uint8"), placeholder_encoded_3: T.Buffer(112, "uint8"), ethosu_write: T.Buffer(43672, "int8")) -> None: @@ -585,9 +572,8 @@ def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208 def test_reordering_based_on_cycles_luts_present(): - # Uninitialized vars used # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ModuleBefore: @T.prim_func def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208, "uint8"), placeholder_encoded_1: T.Buffer(112, "uint8"), placeholder_1: T.Buffer(256, "int8"), placeholder_encoded_2: T.Buffer(96, "uint8"), placeholder_2: T.Buffer(256, "int8"), placeholder_3: T.Buffer(256, "int8"), ethosu_write: T.Buffer(46200, "int8")) -> None: @@ -637,8 +623,7 @@ def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208 T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_5[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 440, 4, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 4, 64, 8, dtype="handle")) - # Uninitialized vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ModuleAfter: @T.prim_func def main(placeholder: T.Buffer(97156, "int8"), placeholder_encoded: T.Buffer(208, "uint8"), placeholder_encoded_1: T.Buffer(112, "uint8"), placeholder_1: T.Buffer(256, "int8"), placeholder_encoded_2: T.Buffer(96, "uint8"), placeholder_2: T.Buffer(256, "int8"), placeholder_3: T.Buffer(256, "int8"), ethosu_write: T.Buffer(46200, "int8")) -> None: diff --git a/tests/python/contrib/test_ethosu/test_create_tiles.py b/tests/python/contrib/test_ethosu/test_create_tiles.py index ac90e3c27839..e4b4067a2977 100644 --- a/tests/python/contrib/test_ethosu/test_create_tiles.py +++ b/tests/python/contrib/test_ethosu/test_create_tiles.py @@ -56,6 +56,8 @@ def main(placeholder1: T.Buffer((100,), "int8"), placeholder2: T.Buffer((100,), for i3 in T.serial(0, 1): for i4 in T.serial(0, 16): placeholder1[((i1*16) + i4)] = placeholder2[((T.floormod((i1 + 4), 6)*16) + i4)] + + __tvm_meta__ = None # fmt: on stmt = Module["main"].body @@ -85,6 +87,8 @@ def main(placeholder1: T.Buffer((100,), "int8"), placeholder2: T.Buffer((100,), for i3 in T.serial(0, 6): for i4 in T.serial(0, 16): placeholder1[((i3*16) + i4)] = placeholder2[((T.floormod((i3 + 4), 6)*16) + i4)] + + __tvm_meta__ = None # fmt: on stmt = Module["main"].body @@ -114,6 +118,8 @@ def main(placeholder1: T.Buffer((100,), "int8"), placeholder2: T.Buffer((100,), for i3 in T.serial(0, 1): for i4 in T.serial(0, 16): placeholder1[((i1*16) + i4)] = placeholder2[((T.floormod((i1 + 4), 6)*8) + i4)] + + __tvm_meta__ = None # fmt: on stmt = Module["main"].body @@ -142,6 +148,8 @@ def main(placeholder1: T.Buffer((100,), "int8"), placeholder2: T.Buffer((100,), for i2 in T.serial(0, 6): for i3 in T.serial(0, 4): placeholder1[(((i1*24) + (i2*4)) + i3)] = placeholder2[(((((T.floordiv((i1 - 1), 2)*48) + (T.floormod((i1 + 1), 2)*24)) + (i2*4)) + i3) + 96)] + + __tvm_meta__ = None # fmt: on stmt = Module["main"].body diff --git a/tests/python/contrib/test_ethosu/test_encode_constants.py b/tests/python/contrib/test_ethosu/test_encode_constants.py index 8c35a43e47e9..4341f367f0e1 100644 --- a/tests/python/contrib/test_ethosu/test_encode_constants.py +++ b/tests/python/contrib/test_ethosu/test_encode_constants.py @@ -32,9 +32,8 @@ from .infra import make_ethosu_binary_elementwise, make_ethosu_conv2d -# Uninitialized variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class WeightStreamOnlyU55: @T.prim_func def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write: T.Buffer((1, 16, 16, 8), "int8")) -> None: @@ -61,10 +60,10 @@ def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_writ T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 144, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, buffer9[0], 112, T.int8(-1), T.int8(-1), 12, buffer9[112], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, T.int8(-1), T.int8(-1), 12, p2[112], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# Uninitialized variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class WeightStreamOnlyU65: @T.prim_func def main(ifm: T.Buffer((1, 16, 16, 32), "int8"), ethosu_write: T.Buffer((1, 16, 16, 8), "int8")): @@ -90,6 +89,7 @@ def main(ifm: T.Buffer((1, 16, 16, 32), "int8"), ethosu_write: T.Buffer((1, 16, T.call_extern("handle", "ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, ifm_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write_1[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2_global_4_1[0], 80, p2_global_4_1[80], 80, 12, p2_global_4_1[160], 16, p2_global_4_1[176], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0) T.call_extern("handle", "ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, ifm_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write_1[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2_global_5_1[0], 96, p2_global_5_1[96], 80, 12, p2_global_5_1[176], 16, p2_global_5_1[192], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0) T.call_extern("handle", "ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, ifm_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write_1[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2_global_6_1[0], 80, p2_global_6_1[80], 80, 12, p2_global_6_1[160], 16, p2_global_6_1[176], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0) + __tvm_meta__ = None # fmt: on @@ -142,16 +142,15 @@ def _get_func(): func = _get_func() mod, consts = _lower_to_tir(func, cascader=_planner) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) test_const_size = [value.size for value in list(consts.values())] assert reference_const_sizes.sort() == test_const_size.sort() -# Uninitialized variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class RereadWeightsU55: @T.prim_func def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write: T.Buffer((1, 16, 16, 8), "int8")) -> None: @@ -169,10 +168,10 @@ def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_writ T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 384, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 304, T.int8(-1), T.int8(-1), 12, p1[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[256], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[64], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 304, T.int8(-1), T.int8(-1), 12, p2[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# Uninitialized variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class RereadWeightsU65: @T.prim_func def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write: T.Buffer((1, 16, 16, 8), "int8")) -> None: @@ -191,6 +190,8 @@ def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_writ T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 464, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[256], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[64], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + + __tvm_meta__ = None # fmt: on @@ -243,16 +244,15 @@ def _get_func(): func = _get_func() mod, consts = _lower_to_tir(func, cascader=_cascader) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) test_const_size = [value.size for value in list(consts.values())] assert reference_const_sizes.sort() == test_const_size.sort() -# Uninitialized variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class DirectReadOnlyU55: @T.prim_func def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write: T.Buffer((1, 16, 16, 8), "int8")) -> None: @@ -269,10 +269,10 @@ def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_writ ethosu_write_1 = T.Buffer([4096], "int8", data=ethosu_write_1_data) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, buffer[0], 592, T.int8(-1), T.int8(-1), 12, buffer_1[0], 160, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, buffer_2[0], 160, T.int8(-1), T.int8(-1), 12, buffer_3[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# Uninitialized variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class DirectReadOnlyU65: @T.prim_func def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write: T.Buffer((1, 16, 16, 8), "int8")) -> None: @@ -290,6 +290,7 @@ def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_writ ethosu_write_2 = T.Buffer([4096], "int8", data=ethosu_write_2_data) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_encoded[0], 304, placeholder_encoded[304], 304, 12, placeholder_encoded_1[0], 80, placeholder_encoded_1[80], 80, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_encoded_2[0], 112, placeholder_encoded_2[112], 96, 12, placeholder_encoded_3[0], 48, placeholder_encoded_3[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -341,16 +342,15 @@ def _get_func(): mod, consts = _lower_to_tir(func) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) test_const_size = [value.size for value in list(consts.values())] assert reference_const_sizes.sort() == test_const_size.sort() -# Uninitialized variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class MixedReadU55: @T.prim_func def main(input_ifm: T.Buffer((1,16,16,32), "int8"), input_ethosu_write: T.Buffer((1,16,16,8), "int8")) -> None: @@ -380,10 +380,10 @@ def main(input_ifm: T.Buffer((1,16,16,32), "int8"), input_ethosu_write: T.Buffer T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 112, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 80, T.int8(-1), T.int8(-1), 12, p1[80], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 80, T.int8(-1), T.int8(-1), 12, p2[80], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# Uninitialized variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class MixedReadU65: @T.prim_func def main(ifm: T.Buffer((1, 16, 16, 32), "int8"), ethosu_write: T.Buffer((1, 16, 16, 8), "int8")): @@ -414,6 +414,7 @@ def main(ifm: T.Buffer((1, 16, 16, 32), "int8"), ethosu_write: T.Buffer((1, 16, T.call_extern("handle", "ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write_3[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p5_global_4[0], 48, p5_global_4[48], 48, 12, p5_global_4[96], 16, p5_global_4[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0) T.call_extern("handle", "ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write_3[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p5_global_5[0], 48, p5_global_5[48], 48, 12, p5_global_5[96], 16, p5_global_5[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0) T.call_extern("handle", "ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write_3[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p5_global_6[0], 48, p5_global_6[48], 48, 12, p5_global_6[96], 16, p5_global_6[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0) + __tvm_meta__ = None # fmt: on @@ -476,7 +477,7 @@ def _get_func(): mod, consts = _lower_to_tir(func, cascader=_planner) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) test_const_size = [value.size for value in list(consts.values())] diff --git a/tests/python/contrib/test_ethosu/test_merge_constants.py b/tests/python/contrib/test_ethosu/test_merge_constants.py index 5c5cd960e5d3..624bef00c7f8 100644 --- a/tests/python/contrib/test_ethosu/test_merge_constants.py +++ b/tests/python/contrib/test_ethosu/test_merge_constants.py @@ -35,8 +35,7 @@ def check_const_dictionaries(const_dict, new_const_dict): def test_only_one_operator(): # fmt: off - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8")) -> None: @@ -54,8 +53,7 @@ def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main(buffer2: T.Buffer((160,), "uint8")) -> None: @@ -82,8 +80,7 @@ def main(buffer2: T.Buffer((160,), "uint8")) -> None: def test_all_operators_with_weights(): # fmt: off - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8"), buffer4: T.Buffer((112,), "uint8"), buffer5: T.Buffer((32,), "uint8"), buffer6: T.Buffer((112,), "uint8"), buffer7: T.Buffer((32,), "uint8"), buffer8: T.Buffer((112,), "uint8"), buffer9: T.Buffer((32,), "uint8")) -> None: @@ -122,8 +119,7 @@ def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8"), T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main(buffer2: T.Buffer((160,), "uint8"), buffer4: T.Buffer((144,), "uint8"), buffer6: T.Buffer((144,), "uint8"), buffer8: T.Buffer((144,), "uint8")) -> None: @@ -174,8 +170,7 @@ def main(buffer2: T.Buffer((160,), "uint8"), buffer4: T.Buffer((144,), "uint8"), def test_operators_with_and_without_weights(): # fmt: off - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main(buffer2: T.Buffer((80,), "uint8"), buffer3: T.Buffer((64,), "uint8")) -> None: @@ -194,8 +189,7 @@ def main(buffer2: T.Buffer((80,), "uint8"), buffer3: T.Buffer((64,), "uint8")) - T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, buffer0[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, buffer6[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main(buffer2: T.Buffer((144,), "uint8")) -> None: @@ -224,8 +218,7 @@ def main(buffer2: T.Buffer((144,), "uint8")) -> None: def test_copy_to_buffer_with_local_scope(): # fmt: off - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main(buffer1: T.Buffer((64,), "uint8"), @@ -262,8 +255,7 @@ def main(buffer1: T.Buffer((64,), "uint8"), T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer9[0], 0, 0, 0, T.float32(0.0078125), 0, "NHCWB16", 64, 16, 1, "int8", 4, 4, 4, 4, 0, 4, buffer8[0], 0, 0, 0, T.float32(0.00372155), -128, "NHWC", 16, 4, 1, 1, 1, 1, 1, 1, 1, p5[0], 16, 0, p6[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main(buffer1: T.Buffer((64,), "uint8"), @@ -313,9 +305,8 @@ def main(buffer1: T.Buffer((64,), "uint8"), def test_no_copies(): - # the vars placeholder and ethosu_write are undefined # fmt: off - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main() -> None: @@ -329,7 +320,7 @@ def main() -> None: T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 1, 4, 4, 1, 0, 4, placeholder[0], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 4, 1, "int8", 1, 4, 1, 1, 0, 4, placeholder[16], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 1, 1, "int8", 1, 4, 4, 1, 0, 4, ethosu_write_4[0], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 4, 1, "MAX", 0, "CLIP", -128, 127, "TFL", 1, 4, 4, dtype="handle")) T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 4, 4, 1, 0, 4, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 4, 4, 1, 0, 4, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main() -> None: @@ -354,8 +345,7 @@ def main() -> None: def test_copies_to_the_same_buffer(): # fmt: off - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8")) -> None: @@ -376,8 +366,7 @@ def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main(buffer2: T.Buffer((160,), "uint8")) -> None: @@ -424,6 +413,7 @@ def main(input_placeholder: T.Buffer((1, 16, 16, 32), "int8"), buffer1: T.Buffer T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 368, p1[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 96, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p2[0], 48, p2[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None @tvm.script.ir_module @@ -440,6 +430,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((4 p1 = T.Buffer([464], "uint8", data=p1_data) T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 464, p1[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on const_dict = { @@ -479,6 +470,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((3 T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 368, p3[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p4[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 192, p3[192], 176, 12, p4[0], 48, p4[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None @tvm.script.ir_module @@ -499,6 +491,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((4 T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 464, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on const_dict = { @@ -543,6 +536,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((3 T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 368, p3[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p4[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 192, p3[192], 176, 12, p4[0], 48, p4[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None @tvm.script.ir_module @@ -563,6 +557,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((4 T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 464, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on const_dict = { @@ -607,6 +602,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((3 T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 368, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p4[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p4[0], 48, p4[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None @tvm.script.ir_module @@ -627,6 +623,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((4 T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 464, p2[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on const_dict = { @@ -647,8 +644,7 @@ def main(input_placeholder: T.Buffer((1,16,16,32), "int8"), buffer1: T.Buffer((4 def test_cycle_count(): # fmt: off - # undefined vars used - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class InputModule: @T.prim_func def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8"), buffer4: T.Buffer((112,), "uint8"), buffer5: T.Buffer((32,), "uint8"), buffer6: T.Buffer((112,), "uint8"), buffer7: T.Buffer((32,), "uint8"), buffer8: T.Buffer((112,), "uint8"), buffer9: T.Buffer((32,), "uint8")) -> None: @@ -711,7 +707,7 @@ def main(buffer2: T.Buffer((128,), "uint8"), buffer3: T.Buffer((32,), "uint8"), T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class ReferenceModule: @T.prim_func def main(buffer2: T.Buffer((160,), "uint8"), buffer4: T.Buffer((144,), "uint8"), buffer6: T.Buffer((144,), "uint8"), buffer8: T.Buffer((144,), "uint8")) -> None: diff --git a/tests/python/contrib/test_ethosu/test_remove_concatenates.py b/tests/python/contrib/test_ethosu/test_remove_concatenates.py index 58cf5f72d7c0..ef034930d7bc 100644 --- a/tests/python/contrib/test_ethosu/test_remove_concatenates.py +++ b/tests/python/contrib/test_ethosu/test_remove_concatenates.py @@ -28,8 +28,7 @@ # fmt: off -# complains of an undefined buffer -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class ReferenceModule: @T.prim_func def main(input_placeholder: T.Buffer((1,8,12,16), "int8"), input_placeholder_1: T.Buffer((1,8,10,16), "int8"), input_T_concat: T.Buffer((1,8,32,16), "int8")) -> None: @@ -55,6 +54,7 @@ def main(input_placeholder: T.Buffer((1,8,12,16), "int8"), input_placeholder_1: T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 10, 16, 8, 0, 10, T_concat_1[192], 0, 0, 0, T.float32(0.5), 10, "NHWC", 352, 16, 1, "int8", 8, 10, 16, 8, 0, 10, T_concat[352], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 16, 1, 3, 3, 1, 1, 1, 1, buffer_2[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_3[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 12, 16, 8, 0, 12, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 192, 16, 1, "int8", 8, 12, 16, 8, 0, 12, T_concat_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 352, 16, 1, 3, 3, 1, 1, 1, 1, buffer_4[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_5[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 22, 16, 8, 0, 22, T_concat_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 352, 16, 1, "int8", 8, 22, 16, 8, 0, 22, T_concat[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 16, 1, 3, 3, 1, 1, 1, 1, buffer_6[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_7[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -75,7 +75,7 @@ def _get_func(): func = _get_func() mod, _ = _lower_to_tir(func) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) reference_mod = ReferenceModule tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) diff --git a/tests/python/contrib/test_ethosu/test_replace_conv2d.py b/tests/python/contrib/test_ethosu/test_replace_conv2d.py index a8aa4043293f..32d1303e124e 100644 --- a/tests/python/contrib/test_ethosu/test_replace_conv2d.py +++ b/tests/python/contrib/test_ethosu/test_replace_conv2d.py @@ -363,9 +363,8 @@ def _visit(stmt): assert data[0] == answer, data[0] -# Undefined variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dDoubleCascade1: @T.prim_func def main(input_placeholder_5: T.Buffer((1, 8, 8, 3), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 8, 8), "int8")) -> None: @@ -384,10 +383,10 @@ def main(input_placeholder_5: T.Buffer((1, 8, 8, 3), "int8"), input_ethosu_write T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 128, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 1, 1, 1, 1, 1, 1, buffer[0], 304, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 3, 8, 0, 4, placeholder_5[12], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 32, 1, 1, 1, 1, 1, 1, 1, buffer_3[0], 160, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 128, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[32], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 1, 1, 1, 1, 1, 1, buffer[0], 304, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dDoubleCascade2: @T.prim_func def main(input_placeholder_5: T.Buffer((1, 8, 8, 3), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 8, 8), "int8")) -> None: @@ -406,10 +405,10 @@ def main(input_placeholder_5: T.Buffer((1, 8, 8, 3), "int8"), input_ethosu_write T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[256], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 4, 8, 8, 4, 0, 8, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 3, 3, 1, 1, 1, 1, buffer_3[0], 2608, T.int8(-1), T.int8(-1), 12, buffer[0], 80, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 6, 8, 3, 6, 0, 8, placeholder_5[48], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 3, 3, 1, 1, 1, 1, buffer_2[0], 1312, T.int8(-1), T.int8(-1), 12, buffer_1[0], 320, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 4, 8, 8, 4, 0, 8, ethosu_write_1[256], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 3, 3, 1, 1, 1, 1, buffer_3[0], 2608, T.int8(-1), T.int8(-1), 12, buffer[0], 80, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dDoubleCascade3: @T.prim_func def main(input_placeholder_5: T.Buffer((1, 16, 16, 3), "int8"), input_ethosu_write_1: T.Buffer((1, 20, 4, 8), "int8")) -> None: @@ -431,10 +430,10 @@ def main(input_placeholder_5: T.Buffer((1, 16, 16, 3), "int8"), input_ethosu_wri T.evaluate(T.call_extern("ethosu_conv2d", "int8", 10, 8, 32, 10, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[256], 0, 0, 0, T.float32(0.25), 14, "NHWC", 32, 8, 1, 2, 3, 2, 1, 2, 1, buffer[0], 1744, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 1, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 16, 3, 4, 0, 16, placeholder_5[576], 0, 0, 0, T.float32(0.5), 10, "NHWC", 48, 3, 1, "int8", 4, 8, 32, 4, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 2, 3, 2, 1, 2, 1, buffer_3[0], 880, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 0, 1, 2, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 8, 32, 4, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 4, 4, 8, 4, 0, 4, ethosu_write_1[512], 0, 0, 0, T.float32(0.25), 14, "NHWC", 32, 8, 1, 2, 3, 2, 1, 2, 1, buffer[0], 1744, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 1, 2, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dDoubleCascade4: @T.prim_func def main(input_placeholder_5: T.Buffer((1, 8, 1, 8, 16), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 2, 8, 16), "int8")) -> None: @@ -453,10 +452,10 @@ def main(input_placeholder_5: T.Buffer((1, 8, 1, 8, 16), "int8"), input_ethosu_w T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[384], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 384, 16, 128, "int8", 4, 8, 26, 4, 0, 8, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 256, 16, 128, 3, 3, 1, 1, 1, 1, buffer_3[0], 11040, T.int8(-1), T.int8(-1), 12, buffer_2[0], 272, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 6, 8, 3, 6, 0, 8, placeholder_5[256], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 128, 16, 1, "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 384, 16, 128, 3, 3, 1, 1, 1, 1, buffer[0], 1456, T.int8(-1), T.int8(-1), 12, buffer_1[0], 352, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 384, 16, 128, "int8", 4, 8, 26, 4, 0, 8, ethosu_write_1[1024], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 256, 16, 128, 3, 3, 1, 1, 1, 1, buffer_3[0], 11040, T.int8(-1), T.int8(-1), 12, buffer_2[0], 272, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dDoubleCascade5: @T.prim_func def main(input_placeholder: T.Buffer((1, 8, 8, 3), "int8"), input_ethosu_write: T.Buffer((1, 32, 32, 8), "int8")) -> None: @@ -475,10 +474,10 @@ def main(input_placeholder: T.Buffer((1, 8, 8, 3), "int8"), input_ethosu_write: T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 32, 8, 16, 0, 32, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 8, 1, 1, 1, 1, 1, 1, 1, buffer_2[0], 304, T.int8(-1), T.int8(-1), 12, buffer_3[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 8, 3, 4, 0, 8, placeholder[96], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 32, 1, 1, 1, 1, 1, 1, 1, buffer[0], 160, T.int8(-1), T.int8(-1), 12, buffer_1[0], 320, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 32, 8, 16, 0, 32, ethosu_write[4096], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 8, 1, 1, 1, 1, 1, 1, 1, buffer_2[0], 304, T.int8(-1), T.int8(-1), 12, buffer_3[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined variables used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dDoubleCascade6: @T.prim_func def main(input_placeholder: T.Buffer((1, 8, 1, 8, 16), "int8"), input_ethosu_write: T.Buffer((1, 32, 2, 32, 16), "int8")) -> None: @@ -495,6 +494,7 @@ def main(input_placeholder: T.Buffer((1, 8, 1, 8, 16), "int8"), input_ethosu_wri ethosu_write_1 = T.Buffer([12288], "int8", data=ethosu_write_1_data) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 8, 3, 8, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 128, 16, 1, "int8", 16, 16, 35, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 768, 16, 256, 3, 3, 1, 1, 1, 1, buffer[0], 1456, T.int8(-1), T.int8(-1), 12, buffer_1[0], 352, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NEAREST", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 35, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 768, 16, 256, "int8", 32, 32, 26, 32, 0, 32, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 1024, 16, 512, 3, 3, 1, 1, 1, 1, buffer_2[0], 11040, T.int8(-1), T.int8(-1), 12, buffer_3[0], 272, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NEAREST", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -640,13 +640,12 @@ def _get_func( func = _get_func(*params[:-1]) mod, _ = _lower_to_tir(func, cascader=total_cascader(params[-1])) script = mod.script() - mod = tvm.script.from_source(script, check_well_formed=False) + mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(mod["main"], reference_mod["main"], True) -# Undefined vars used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dInlineCopy1: @T.prim_func def main(input_placeholder_3: T.Buffer((1, 10, 12, 8), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 8, 16), "int8")) -> None: @@ -658,10 +657,10 @@ def main(input_placeholder_3: T.Buffer((1, 10, 12, 8), "int8"), input_ethosu_wri ethosu_write_1 = T.Buffer([1024], 'int8', data=input_ethosu_write_1.data) # body T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 8, 4, 8, 0, 8, placeholder_3[120], 0, 0, 0, T.float32(0.5), 10, "NHWC", 96, 8, 1, "int8", 8, 8, 16, 8, 0, 8, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 16, 1, 3, 3, 1, 1, 1, 1, buffer[0], 848, T.int8(-1), T.int8(-1), 12, buffer_1[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# Undefined vars used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dInlineCopy2: @T.prim_func def main(input_placeholder_3: T.Buffer((1, 7, 9, 5), "int8"), input_ethosu_write_1: T.Buffer((1, 3, 5, 16), "int8")) -> None: @@ -673,6 +672,7 @@ def main(input_placeholder_3: T.Buffer((1, 7, 9, 5), "int8"), input_ethosu_write ethosu_write_1 = T.Buffer([240], 'int8', data=input_ethosu_write_1.data) # body T.evaluate(T.call_extern("ethosu_conv2d", "int8", 3, 5, 3, 3, 0, 5, placeholder_3[146], 0, 0, 0, T.float32(0.5), 10, "NHWC", 45, 5, 1, "int8", 3, 5, 16, 3, 0, 5, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 80, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 656, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -699,13 +699,12 @@ def _get_func(ifm_shape, lower, upper, ofm_channels=16): func = _get_func(*params) mod, _ = _lower_to_tir(func) script = mod.script() - mod = tvm.script.from_source(script, check_well_formed=False) + mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(mod["main"], reference_mod["main"], True) -# Undefined vars used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dInlineReshape1: @T.prim_func def main(input_placeholder_3: T.Buffer((4, 6, 8, 1), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 6, 16), "int8")) -> None: @@ -718,10 +717,10 @@ def main(input_placeholder_3: T.Buffer((4, 6, 8, 1), "int8"), input_ethosu_write # body T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined vars used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dInlineReshape2: @T.prim_func def main(input_placeholder_3: T.Buffer((1, 24, 8), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 6, 16), "int8")) -> None: @@ -734,10 +733,10 @@ def main(input_placeholder_3: T.Buffer((1, 24, 8), "int8"), input_ethosu_write_1 # body T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined vars used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dInlineReshape3: @T.prim_func def main(input_placeholder_3: T.Buffer((192, 1), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 6, 16), "int8")) -> None: @@ -750,10 +749,10 @@ def main(input_placeholder_3: T.Buffer((192, 1), "int8"), input_ethosu_write_1: # body T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None -# undefined vars used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Conv2dInlineReshape4: @T.prim_func def main(placeholder_3: T.Buffer((192,), "int8"), input_ethosu_write_1: T.Buffer((1, 8, 6, 16), "int8")) -> None: @@ -765,6 +764,7 @@ def main(placeholder_3: T.Buffer((192,), "int8"), input_ethosu_write_1: T.Buffer # body T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -801,7 +801,7 @@ def _get_func(ifm_shape, reshaped, ifm_layout): func = _get_func(*params) mod, _ = _lower_to_tir(func, cascader=total_cascader((1, 4, 6, 16))) script = mod.script() - mod = tvm.script.from_source(script, check_well_formed=False) + mod = tvm.script.from_source(script) tvm.ir.assert_structural_equal(mod["main"], reference_mod["main"], True) diff --git a/tests/python/contrib/test_ethosu/test_replace_copy.py b/tests/python/contrib/test_ethosu/test_replace_copy.py index ff343517352d..94763c5d3fbf 100644 --- a/tests/python/contrib/test_ethosu/test_replace_copy.py +++ b/tests/python/contrib/test_ethosu/test_replace_copy.py @@ -30,9 +30,8 @@ from .infra import make_ethosu_conv2d -# uninitialized varaibles used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class ReferenceModule: @T.prim_func def main(input_placeholder_3: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write_1: T.Buffer((1, 16, 16, 8), "int8")) -> None: @@ -46,6 +45,7 @@ def main(input_placeholder_3: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_wr placeholder_global = T.Buffer([384], "uint8", data=placeholder_global_data) T.evaluate(T.call_extern("ethosu_copy", buffer_1[0], 384, placeholder_global[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 304, T.int8(-1), T.int8(-1), 12, placeholder_global[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -69,14 +69,13 @@ def _get_func(): mod, _ = _lower_to_tir(func, cascader=copy_constants()) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) reference_mod = ReferenceModule tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) -# Uninitialized variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class WeightStream: @T.prim_func def main(input_placeholder_5: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write_1: T.Buffer((1, 16, 16, 16), "int8")) -> None: @@ -95,6 +94,7 @@ def main(input_placeholder_5: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_wr T.evaluate(T.call_extern("ethosu_copy", buffer_2[0], 336, placeholder_d_global_1[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 10, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_d_global[0], 416, T.int8(-1), T.int8(-1), 12, placeholder_d_global[416], 112, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 6, 16, 0, 16, ethosu_write_1[10], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_d_global_1[0], 272, T.int8(-1), T.int8(-1), 12, placeholder_d_global_1[272], 64, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -129,7 +129,7 @@ def _get_func(): mod, _ = _lower_to_tir(func, cascader=_cascader) script = mod.script() - test_mod = tvm.script.from_source(script, check_well_formed=False) + test_mod = tvm.script.from_source(script) reference_mod = WeightStream tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True) diff --git a/tests/python/contrib/test_ethosu/test_scheduler.py b/tests/python/contrib/test_ethosu/test_scheduler.py index 0b6f4a2629b7..e7abb707a69c 100644 --- a/tests/python/contrib/test_ethosu/test_scheduler.py +++ b/tests/python/contrib/test_ethosu/test_scheduler.py @@ -211,9 +211,8 @@ def test_schedule_cache_reads(): assert list(sch[cr].iter_var_attrs[iv].pragma_values) == ["ethosu_copy"] -# uninitialized variables used # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class DiamondGraphTir: @T.prim_func def main(input_placeholder: T.Buffer((1, 56, 56, 96), "int8"), input_ethosu_write: T.Buffer((1, 56, 56, 24), "int8")) -> None: @@ -235,6 +234,7 @@ def main(input_placeholder: T.Buffer((1, 56, 56, 96), "int8"), input_ethosu_writ T.evaluate(T.call_extern("ethosu_conv2d", "int8", 56, 56, 96, 56, 0, 56, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 5376, 96, 1, "int8", 56, 56, 24, 56, 0, 56, p5[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 1344, 24, 1, 1, 1, 1, 1, 1, 1, p1[0], 2608, T.int8(-1), T.int8(-1), 12, p1[2608], 240, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 56, 56, 24, 56, 0, 56, p5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 1344, 24, 1, "int8", 56, 56, 24, 56, 0, 56, p6[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 1344, 24, 1, 1, 1, 1, 1, 1, 1, p2[0], 736, T.int8(-1), T.int8(-1), 12, p2[736], 240, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 56, 56, 24, 56, 0, 56, p5[0], 0, 0, 0,T.float32(1), 0, "NHWC", 1344, 24, 1, "int8", 56, 56, 24, 56, 0, 56, p6[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1344, 24, 1, "int8", 56, 56, 24, 56, 0, 56, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1344, 24, 1, "ADD", 0, "NONE", 0, 0, "TFL", 0, 0, 0, 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on diff --git a/tests/python/contrib/test_ethosu/test_tir_to_cs_translator.py b/tests/python/contrib/test_ethosu/test_tir_to_cs_translator.py index 69076f5337c8..05d6f71037fa 100644 --- a/tests/python/contrib/test_ethosu/test_tir_to_cs_translator.py +++ b/tests/python/contrib/test_ethosu/test_tir_to_cs_translator.py @@ -29,9 +29,8 @@ # fmt: off -# Undefined vars used """A sample tir test case for translator""" -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class SingleEthosUConv2D: @T.prim_func def main(placeholder_3: T.Buffer((8192,), "int8"), ethosu_conv2d_1: T.Buffer((1024,), "int8")) -> None: @@ -45,9 +44,8 @@ def main(placeholder_3: T.Buffer((8192,), "int8"), ethosu_conv2d_1: T.Buffer((10 # fmt: off -# undefined vars used """A sample tir test case with multiple convolutions for translator""" -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class MultiEthosUConv2D: @T.prim_func def main(placeholder_6: T.Buffer((192,), "int8"), ethosu_conv2d_1: T.Buffer((512,), "int8")) -> None: @@ -68,9 +66,8 @@ def main(placeholder_6: T.Buffer((192,), "int8"), ethosu_conv2d_1: T.Buffer((512 # fmt: off -# undefined vars used """A sample tir test case with copy operations for translator""" -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class MultiEthosUCopy: @T.prim_func def main(placeholder_3: T.Buffer((8192,), "int8"), ethosu_conv2d_1: T.Buffer((2048,), "int8")) -> None: @@ -88,9 +85,8 @@ def main(placeholder_3: T.Buffer((8192,), "int8"), ethosu_conv2d_1: T.Buffer((20 # fmt: off -# undefined vars used """A tir test case with copy operation having a buffer size less than the minimum for a DMA operation""" -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class CopyLessMinimal: @T.prim_func def main(ethos_u_0_i0: T.Buffer((1, 4), "int8"), ethosu_write: T.Buffer((1, 4), "int8")): @@ -109,9 +105,8 @@ def main(ethos_u_0_i0: T.Buffer((1, 4), "int8"), ethosu_write: T.Buffer((1, 4), # fmt: off -# undefined vars used """A TIR test module of weight streaming""" -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class WeightStreamOnly: @T.prim_func def main(placeholder: T.Buffer((8192,), "int8"), ethosu_write: T.Buffer((2048,), "int8")) -> None: @@ -151,13 +146,13 @@ def main(placeholder: T.Buffer((8192,), "int8"), ethosu_write: T.Buffer((2048,), T.evaluate(T.call_extern("ethosu_copy", buffer_6[0], 112, placeholder_global[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer_7[0], 32, placeholder_d_global[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 112, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on # fmt: off -# undefined vars used """A TIR test module of weight streaming and direct reading""" -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class MixedRead: @T.prim_func def main(placeholder: T.Buffer((8192,), "int8"), ethosu_write: T.Buffer((2048,), "int8")) -> None: @@ -204,6 +199,7 @@ def main(placeholder: T.Buffer((8192,), "int8"), ethosu_write: T.Buffer((2048,), T.evaluate(T.call_extern("ethosu_copy", buffer_8[0], 80, placeholder_global[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_copy", buffer_9[0], 32, placeholder_d_global[0], dtype="handle")) T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 80, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle")) + __tvm_meta__ = None # fmt: on @@ -562,6 +558,7 @@ def main(placeholder: T.handle, placeholder_1: T.handle, placeholder_2: T.handle ethosu_depthwise_conv2d_1 = T.match_buffer(ethosu_depthwise_conv2d, [126], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 8, 8, 3, 8, 0, 8, placeholder_3[0], 0, 0, 0, T.float32(0.6), 11, "NHWC", 24, 3, 1, "int8", 6, 7, 3, 6, 0, 7, ethosu_depthwise_conv2d_1[0], 0, 0, 0, T.float32(0.26), 15, "NHWC", 21, 3, 1, 2, 3, 1, 1, 1, 1, placeholder_4[0], 18, 13, placeholder_5[0], 30, 0, 0, 0, 0, "CLIP", 15, 105, "TFL", "NONE", 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -709,8 +706,7 @@ def populate_ethosu_copy_calls(stmt): # fmt: off -# undefined vars used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class MixedConstantDatatypes: @T.prim_func def main(placeholder_4: T.Buffer((2048,), "int8"), ethosu_write_1: T.Buffer((16,), "int8")) -> None: @@ -1043,6 +1039,7 @@ def main(placeholder: T.handle, placeholder_3: T.handle, ethosu_write: T.handle) ethosu_write_2 = T.match_buffer(ethosu_write, [75], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_pooling", "int8", 5, 9, 3, 5, 0, 9, placeholder_4[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 5, 3, 5, 0, 5, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 15, 3, 1, "AVG", 2, 3, 2, 1, 1, 1, 1, 1, 1, 0, "CLIP", 10, 100, "TFL", "NONE", 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1119,6 +1116,8 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ) # body T.evaluate(T.call_extern( "ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "ADD", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + + __tvm_meta__ = None # fmt: on # fmt: off @@ -1133,6 +1132,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "SUB", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on # fmt: off @@ -1147,6 +1147,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "MUL", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1162,6 +1163,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "MIN", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1177,6 +1179,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "MAX", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1192,6 +1195,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int32", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "SHR", 0, "NONE", 0, 0, "TFL", 0, 0, 0, 0, 0, 0, dtype="int32")) + __tvm_meta__ = None # fmt: on @@ -1207,6 +1211,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int32", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "SHL", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int32")) + __tvm_meta__ = None # fmt: on @@ -1327,6 +1332,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "ADD", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on # fmt: off @@ -1341,6 +1347,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "SUB", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on # fmt: off @@ -1355,6 +1362,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "MUL", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1370,6 +1378,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "MIN", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1385,6 +1394,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "MAX", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int8")) + __tvm_meta__ = None # fmt: on @@ -1400,6 +1410,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int32", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int32", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int32", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "SHR", 1, "NONE", 0, 0, "TFL", 0, 0, 0, 0, 0, 0, dtype="int32")) + __tvm_meta__ = None # fmt: on @@ -1415,6 +1426,7 @@ def main(placeholder: T.handle, ethosu_write: T.handle) -> None: ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int32", elem_offset=0, align=64, offset_factor=1) # body T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int32", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int32", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "SHL", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, 0, 0, 0, dtype="int32")) + __tvm_meta__ = None # fmt: on diff --git a/tests/python/contrib/test_ethosu/test_vela_api.py b/tests/python/contrib/test_ethosu/test_vela_api.py index 7f4b5b8c7052..16785e182a49 100644 --- a/tests/python/contrib/test_ethosu/test_vela_api.py +++ b/tests/python/contrib/test_ethosu/test_vela_api.py @@ -123,6 +123,8 @@ def main( ) ) + __tvm_meta__ = None + """Test case 2 with per-channel quantization""" @@ -217,10 +219,11 @@ def main( ) ) + __tvm_meta__ = None + -# Complains of the use of undefined vars # fmt: off -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Module3: @T.prim_func def main(ethos_u_0_i0: T.Buffer((1, 299, 299, 2), "int8"), ethosu_write: T.Buffer((1, 299, 299, 3), "int8")): @@ -236,6 +239,8 @@ def main(ethos_u_0_i0: T.Buffer((1, 299, 299, 2), "int8"), ethosu_write: T.Buffe ethos_u_0_i0_1 = T.Buffer((178802,), "int8", data=ethos_u_0_i0.data) ethosu_write_1 = T.Buffer((268203,), "int8", data=ethosu_write.data) T.call_extern("handle", "ethosu_conv2d", "int8", 299, 299, 2, 299, 0, 299, ethos_u_0_i0_1[0], 0, 0, 0, T.float32(0.0039215683937072754), -128, "NHWC", 598, 2, 1, "int8", 299, 299, 3, 299, 0, 299, ethosu_write_1[0], 0, 0, 0, T.float32(0.025585981085896492), -128, "NHWC", 897, 3, 1, 2, 3, 1, 1, 1, 2, p2_global_1[0], 96, T.int8(-1), T.int8(-1), 0, p2_global_1[96], 32, T.int8(-1), T.int8(-1), 2, 0, 2, 1, "NONE", 0, 0, "TFL", "NONE", 32, 12, 8) + + __tvm_meta__ = None # fmt: on diff --git a/tests/python/contrib/test_hexagon/test_dma_builtin.py b/tests/python/contrib/test_hexagon/test_dma_builtin.py index e1c98ac35650..af82c2b55afd 100644 --- a/tests/python/contrib/test_hexagon/test_dma_builtin.py +++ b/tests/python/contrib/test_hexagon/test_dma_builtin.py @@ -49,7 +49,7 @@ def compute_add_in_vtcm(a: T.handle, b: T.handle, c: T.handle) -> None: T.writes(C[v_ax0]) C[v_ax0] = A[v_ax0] + B[v_ax0] - @R.function(pure=False) + @R.function def main( x: R.Tensor((12800,), data_type), y: R.Tensor((12800,), data_type), diff --git a/tests/python/contrib/test_hexagon/test_relax_2d_buffer_allocation.py b/tests/python/contrib/test_hexagon/test_relax_2d_buffer_allocation.py index ae459dc770d7..40de28cca0a8 100644 --- a/tests/python/contrib/test_hexagon/test_relax_2d_buffer_allocation.py +++ b/tests/python/contrib/test_hexagon/test_relax_2d_buffer_allocation.py @@ -46,7 +46,7 @@ def add( T.writes(output[v_ax0, v_ax1]) output[v_ax0, v_ax1] = arg0[v_ax0, v_ax1] + arg1[v_ax0, v_ax1] - @R.function(pure=False) + @R.function def main(x: R.Tensor((2, 2), dtype="float32")): cls = Module # Try allocating 2d storage (2,2) in global.vtcm scope with nd allocator diff --git a/tests/python/dlight/test_benchmark.py b/tests/python/dlight/test_benchmark.py index 695a0e90263d..3153be2cc9b0 100644 --- a/tests/python/dlight/test_benchmark.py +++ b/tests/python/dlight/test_benchmark.py @@ -36,11 +36,9 @@ ) import tvm.testing -# The test function uses an undefined symbolic var in Relax. -# In principle, this should be attached to an argument. # pylint: disable=no-self-argument,invalid-name,line-too-long,no-method-argument # fmt: off -@I.ir_module(check_well_formed=False) +@I.ir_module class Module: @T.prim_func def full1(var_T_full: T.handle): diff --git a/tests/python/integration/test_lower.py b/tests/python/integration/test_lower.py index 1d042610ac07..965ab80bebb2 100644 --- a/tests/python/integration/test_lower.py +++ b/tests/python/integration/test_lower.py @@ -22,8 +22,7 @@ from tvm.script import tir as T -# complains that index_i is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def tensorcore_gemm(handle_a: T.handle, handle_b: T.handle, handle_c: T.handle) -> None: # pylint: disable=missing-function-docstring # match buffer diff --git a/tests/python/micro/test_aot_legalize_packed_call.py b/tests/python/micro/test_aot_legalize_packed_call.py index 3e66a96dfb43..6f66f3a43283 100644 --- a/tests/python/micro/test_aot_legalize_packed_call.py +++ b/tests/python/micro/test_aot_legalize_packed_call.py @@ -22,8 +22,7 @@ from tvm.script import tir as T -# complains of an undefined var being used -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Module: @T.prim_func def tvm_test_cpacked( @@ -53,7 +52,7 @@ def tir_packed_call() -> None: ) -@tvm.script.ir_module(check_well_formed=False) +@tvm.script.ir_module class Expected: @T.prim_func def tvm_test_cpacked( diff --git a/tests/python/relax/distributed/test_distributed_transform_lower_distir.py b/tests/python/relax/distributed/test_distributed_transform_lower_distir.py index 54f7fa3c613a..3df65b3ea6ff 100644 --- a/tests/python/relax/distributed/test_distributed_transform_lower_distir.py +++ b/tests/python/relax/distributed/test_distributed_transform_lower_distir.py @@ -136,7 +136,7 @@ def foo( ) return lv3 - @I.ir_module(check_well_formed=False) + @I.ir_module class LoweredMLP: I.module_attrs({"device_num": 10}) I.module_global_infos( @@ -331,7 +331,7 @@ def foo( ) return lv4 - @I.ir_module(check_well_formed=False) + @I.ir_module class LoweredMLPWithTuple: I.module_attrs({"device_num": 10}) I.module_global_infos( diff --git a/tests/python/relax/distributed/test_distributed_transform_propagate_sharding.py b/tests/python/relax/distributed/test_distributed_transform_propagate_sharding.py index e1f45d278d6c..990a7b1557e5 100644 --- a/tests/python/relax/distributed/test_distributed_transform_propagate_sharding.py +++ b/tests/python/relax/distributed/test_distributed_transform_propagate_sharding.py @@ -1370,9 +1370,7 @@ def foo( gv: R.Tensor((1, 256, 4096), dtype="float16") = lv44 return gv - # the below uses global vars that are not yet defined but the definitions - # will be added later - @I.ir_module(check_well_formed=False) + @I.ir_module class ShardedLlamaAttentionLayerTIR: I.module_attrs({"device_num": 10}) I.module_global_infos( diff --git a/tests/python/relax/test_analysis.py b/tests/python/relax/test_analysis.py index 28ca13ad8991..abbe380d4839 100644 --- a/tests/python/relax/test_analysis.py +++ b/tests/python/relax/test_analysis.py @@ -97,7 +97,7 @@ def test_binding_block_remove_all_unused(): @tvm.script.ir_module class IdentityUnused: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: with R.dataflow(): lv0 = x @@ -113,7 +113,7 @@ def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: @tvm.script.ir_module class GroundTruth: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: with R.dataflow(): lv0 = x @@ -157,7 +157,7 @@ def test_binding_block_keep_impure_without_dataflow(): contain side effects. """ - @R.function(private=True, pure=False) + @R.function(private=True) def before(x: R.Tensor((32, 32), "float32")) -> R.Tensor: lv0 = x y = R.call_packed("vm.builtin.copy", lv0, sinfo_args=(R.Tensor((32, 32), "float32"))) @@ -185,7 +185,7 @@ def test_binding_block_keep_pure_func_used_only_for_impure(): it was required to evaluate the packed function. """ - @R.function(private=True, pure=False) + @R.function(private=True) def before(x: R.Tensor((32, 32), "int32")): y = x * R.const(2) z = R.call_packed( @@ -202,7 +202,7 @@ def before(x: R.Tensor((32, 32), "int32")): def test_binding_block_remove_all_unused_func_without_dataflow(): @tvm.script.ir_module class IdentityUnused: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: lv0 = x @@ -217,7 +217,7 @@ def internal_unused_func(A: R.Tensor((32, 32), "float32")) -> R.Tensor: @tvm.script.ir_module class GroundTruth: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: lv0 = x z = R.call_packed("vm.builtin.copy", lv0, sinfo_args=(R.Tensor((32, 32), "float32"))) @@ -229,7 +229,7 @@ def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: def test_binding_block_fake_unused_remove_all_unused(): @tvm.script.ir_module class IdentityUnused: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: with R.dataflow(): lv0 = x @@ -241,7 +241,7 @@ def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: @tvm.script.ir_module class GroundTruth: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: with R.dataflow(): lv0 = x @@ -256,7 +256,7 @@ def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor: def test_edge_binding_block_fake_unused_remove_all_unused(): @tvm.script.ir_module class IdentityUnused: - @R.function(pure=False) + @R.function def main(x: R.Tensor((32, 32), "float32")) -> R.Tensor((32, 32), "float32"): z = R.call_packed("vm.builtin.copy", x, sinfo_args=(R.Tensor((32, 32), "float32"))) return x @@ -335,14 +335,14 @@ def expected(x: R.Tensor((32, 32), "float32")) -> R.Tensor: def test_retain_impure_calls_unused_in_binding_block(): """An impure call may have side effects, and must be kept""" - @R.function(pure=False) + @R.function def before(x: R.Tensor((32, 32), "float32")) -> R.Tensor: lv0 = x unused0 = R.call_packed("my_impure_call", x, sinfo_args=R.Tensor((32, 32), dtype="float32")) unused1 = R.call_dps_packed("my_unused_call", (lv0,), R.Tensor((32, 32), dtype="float32")) return lv0 - @R.function(pure=False) + @R.function def expected(x: R.Tensor((32, 32), "float32")) -> R.Tensor: lv0 = x unused0 = R.call_packed("my_impure_call", x, sinfo_args=R.Tensor((32, 32), dtype="float32")) diff --git a/tests/python/relax/test_analysis_estimate_memory_usage.py b/tests/python/relax/test_analysis_estimate_memory_usage.py index ab036aab6141..31419b544d23 100644 --- a/tests/python/relax/test_analysis_estimate_memory_usage.py +++ b/tests/python/relax/test_analysis_estimate_memory_usage.py @@ -66,7 +66,7 @@ def pad( ): T.evaluate(0) - @R.function(pure=False) + @R.function def main(x: R.Tensor((2, 4), dtype="float32")) -> R.Tensor((10,), dtype="float32"): cls = Module storage: R.Object = R.memory.alloc_storage( diff --git a/tests/python/relax/test_ast_printer.py b/tests/python/relax/test_ast_printer.py index 97ad9f5dd034..2a554f16e23f 100644 --- a/tests/python/relax/test_ast_printer.py +++ b/tests/python/relax/test_ast_printer.py @@ -564,7 +564,7 @@ def foo(x: R.Tensor): # axis is -1 assert "PrimExpr(value=`T.int64(-1)`)" in foo_str - @R.function(pure=False) + @R.function def bar(x: R.Tensor): return R.print(x, format="{}") diff --git a/tests/python/relax/test_dataflow_pattern.py b/tests/python/relax/test_dataflow_pattern.py index 583e2a8d0822..a717e3da043f 100644 --- a/tests/python/relax/test_dataflow_pattern.py +++ b/tests/python/relax/test_dataflow_pattern.py @@ -314,7 +314,7 @@ def test_is_call_tir(): assert is_call_tir("tir_zeros", wildcard(), wildcard()).match(lv2_val, var2val=var2val) -@R.function(pure=False) +@R.function def simple_call_packed( x: R.Tensor((32, 32), "float32"), w: R.Tensor((32, 32), "float32") ) -> R.Tensor: diff --git a/tests/python/relax/test_frontend_nn_modules.py b/tests/python/relax/test_frontend_nn_modules.py index 5ddc10505591..9b357114d351 100644 --- a/tests/python/relax/test_frontend_nn_modules.py +++ b/tests/python/relax/test_frontend_nn_modules.py @@ -297,10 +297,9 @@ def forward( lv1: R.Tensor((n, 32, h - 2, w - 2), dtype="float32") = R.nn.conv2d(x, weight) lv2: R.Tensor((1, 32, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 32, 1, 1])) conv2d: R.Tensor((n, 32, h - 2, w - 2), dtype="float32") = R.add(lv1, lv2) - gv1: R.Tuple(R.Tensor((n, 32, h - 2, w - 2), dtype="float32"), R.Tuple(R.Object)) = ( - conv2d, - (_io,), - ) + gv1: R.Tuple( + R.Tensor((n, 32, h - 2, w - 2), dtype="float32"), R.Tuple(R.Object) + ) = conv2d, (_io,) R.output(gv1) return gv1 @@ -464,10 +463,9 @@ def forward( get_timestep_embedding: R.Tensor((3, 10), dtype="float32") = R.astype( lv11, dtype="float32" ) - gv1: R.Tuple(R.Tensor((3, 10), dtype="float32"), R.Tuple(R.Object)) = ( - get_timestep_embedding, - (_io,), - ) + gv1: R.Tuple( + R.Tensor((3, 10), dtype="float32"), R.Tuple(R.Object) + ) = get_timestep_embedding, (_io,) R.output(gv1) return gv1 @@ -491,7 +489,7 @@ def _initialize_effect() -> R.Tuple(R.Object, R.Object): lv, R.shape([8, 2, 4]), R.prim_value(0), - sinfo_args=[R.Object()], + sinfo_args=(R.Object,), ) lv1 = _io, cache gv = lv1 @@ -504,12 +502,8 @@ def forward( ) -> R.Tuple(R.Tensor((4, 2, 4), dtype="float32"), R.Tuple(R.Object, R.Object)): R.func_attr({"num_input": 3}) with R.dataflow(): - lv2: R.Object = R.call_inplace_packed( - "vm.builtin.attention_kv_cache_append", - cache, - x, - inplace_indices=[0], - sinfo_args=[R.Object()], + lv2: R.Object = R.call_pure_packed( + "vm.builtin.attention_kv_cache_append", cache, x, sinfo_args=(R.Object,) ) lv3: R.Tensor((4, 2, 4), dtype="float32") = R.call_pure_packed( "vm.builtin.attention_kv_cache_view", @@ -517,10 +511,9 @@ def forward( R.shape([4, 2, 4]), sinfo_args=(R.Tensor((4, 2, 4), dtype="float32"),), ) - gv1: R.Tuple(R.Tensor((4, 2, 4), dtype="float32"), R.Tuple(R.Object, R.Object)) = ( - lv3, - (_io, lv2), - ) + gv1: R.Tuple( + R.Tensor((4, 2, 4), dtype="float32"), R.Tuple(R.Object, R.Object) + ) = lv3, (_io, lv2) R.output(gv1) return gv1 diff --git a/tests/python/relax/test_runtime_builtin_paged_attention_kv_cache_tir.py b/tests/python/relax/test_runtime_builtin_paged_attention_kv_cache_tir.py index c33686d16e77..64887ca5b653 100644 --- a/tests/python/relax/test_runtime_builtin_paged_attention_kv_cache_tir.py +++ b/tests/python/relax/test_runtime_builtin_paged_attention_kv_cache_tir.py @@ -566,8 +566,7 @@ def test_paged_attention_kv_cache_sliding_window(kv_cache_and_config): def kv_cache_transpose_append(head_dim, dtype): - # undefined vars used - @T.prim_func(check_well_formed=False) + @T.prim_func def _kv_cache_transpose_append( var_pages: T.handle, var_k_data: T.handle, @@ -605,8 +604,7 @@ def _kv_cache_transpose_append( def copy_cache(head_dim, dtype): - # undefined vars used - @T.prim_func(check_well_formed=False) + @T.prim_func def _copy_cache( var_pages: T.handle, var_position_map: T.handle, @@ -679,8 +677,7 @@ def _rope( # pylint: disable=too-many-arguments ) return cos + sin - # undefined vars used - @T.prim_func(private=True, check_well_formed=False) + @T.prim_func(private=True) def fused_rope( # pylint: disable=too-many-locals var_qkv: T.handle, var_position_map: T.handle, @@ -855,10 +852,9 @@ def _attention_prefill( tile_z = 8 num_warps = 2 - # undefined vars used # pylint: disable=line-too-long,too-many-arguments,too-many-branches # fmt: off - @T.prim_func(check_well_formed=False) + @T.prim_func def batch_prefill_paged_kv( _0: T.int32, # pylint: disable=unused-argument var_q: T.handle, # [total_len, h_q, d] @@ -1218,10 +1214,9 @@ def _attention_decode( tile_size_per_bdx = TILE_SIZE_PER_BDX if GROUP_SIZE == 1 else 1 log2e = math.log2(math.exp(1)) - # undefined vars used # pylint: disable=line-too-long,too-many-arguments,too-many-branches # fmt: off - @T.prim_func(check_well_formed=False) + @T.prim_func def batch_decode_paged_kv( _0: T.int32, # pylint: disable=unused-argument Q_handle: T.handle, @@ -1462,10 +1457,9 @@ def _attention_prefill_ragged( tile_z = 8 num_warps = 2 - # undefined vars used # fmt: off - @T.prim_func(check_well_formed=False) - def batch_prefill_ragged_kv( # pylint: disable=too-many-arguments,too-many-branches + @T.prim_func + def batch_prefill_ragged_kv( # pylint: disable=too-many-arguments,too-many-branches var_q: T.handle, # [total_len, h_q, d] var_q_indptr: T.handle, # [batch_size + 1] var_k: T.handle, # [total_len, h_kv, d] @@ -1781,8 +1775,7 @@ def _merge_state_inplace( bdy //= 2 gdy = num_heads // bdy - # undefined vars used - @T.prim_func(check_well_formed=False) + @T.prim_func def merge_state_inplace( v: T.handle, s: T.handle, diff --git a/tests/python/relax/test_transform_normalize.py b/tests/python/relax/test_transform_normalize.py index 335ca7c70a12..f37df4d07969 100644 --- a/tests/python/relax/test_transform_normalize.py +++ b/tests/python/relax/test_transform_normalize.py @@ -571,11 +571,11 @@ def test_remove_usage_of_void_type_variables(): relax.VarBinding(x, R.assert_op(R.const(True, "bool"))), ] seq = relax.SeqExpr([relax.BindingBlock(bindings)], x) - before = relax.Function([], seq, ret_struct_info=R.Tuple([]), is_pure=False) + before = relax.Function([], seq, ret_struct_info=R.Tuple([])) after = relax.transform.Normalize()(tvm.IRModule({"main": before}))["main"] - @R.function(private=True, pure=False) + @R.function(private=True) def expected(): x = R.assert_op(R.const(True, "bool")) return R.tuple() diff --git a/tests/python/relax/test_transform_normalize_global_var.py b/tests/python/relax/test_transform_normalize_global_var.py index 0dddab02edcf..0a26ffc8e6f6 100644 --- a/tests/python/relax/test_transform_normalize_global_var.py +++ b/tests/python/relax/test_transform_normalize_global_var.py @@ -28,7 +28,7 @@ @pytest.mark.skip_well_formed_check_before_transform def test_normalize_relax_function(): - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @R.function(private=True) def f(): @@ -62,7 +62,7 @@ def f1(): @pytest.mark.skip_well_formed_check_before_transform def test_normalize_tir_function(): - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @T.prim_func(private=True) def f(x: T.Buffer((1,), "int32")): diff --git a/tests/python/relax/test_transform_operator_specific_normalization.py b/tests/python/relax/test_transform_operator_specific_normalization.py index beb1ee85946a..4ee17166452f 100644 --- a/tests/python/relax/test_transform_operator_specific_normalization.py +++ b/tests/python/relax/test_transform_operator_specific_normalization.py @@ -74,12 +74,11 @@ def test_normalization_suppressed_for_tvmscript(custom_op): """FNormalize isn't applied when parsing TVMScript TVMScript should be able to produce un-normalized Relax IR for - specifying test cases if the well-formed check is disabled, - and to ensure that no changes occur when performing a round-trip - through TVMScript. + specifying test cases, and to ensure that no changes occur when + performing a round-trip through TVMScript. """ - @R.function(check_well_formed=False) + @R.function def func(A: R.Tensor): return relax.Call(custom_op, [A]) @@ -96,7 +95,7 @@ def func(A: R.Tensor): def test_normalization_applied_during_cpp_mutator(custom_op): """FNormalize is applied by relax::ExprMutator subclasses""" - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @R.function def main(A: R.Tensor): @@ -117,7 +116,7 @@ def main(A: R.Tensor): def test_normalization_applied_during_python_mutator(custom_op): """FNormalize is applied by relax.ExprMutator subclasses""" - @R.function(private=True, check_well_formed=False) + @R.function(private=True) def before(A: R.Tensor): return relax.Call(custom_op, [A]) @@ -156,7 +155,7 @@ def test_un_normalized_call_node_is_ill_formed(custom_op, define_normalization): FNormalize has no corresponding check applied. """ - @I.ir_module(check_well_formed=False) + @I.ir_module class Module: @R.function def main(A: R.Tensor): @@ -172,7 +171,7 @@ def main(A: R.Tensor): def test_normalize_to_inline_tuple_for_call_tir(custom_op): """FNormalize in-lines the argument tuple for R.call_tir""" - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @R.function def main(A: R.Tensor([16], "float32")): @@ -220,7 +219,7 @@ def test_normalize_argument_to_inline_tuple_for_call_tir(custom_op): argument tuple is provided as a relax function argument. """ - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @R.function def main(args: R.Tuple([R.Tensor([16], "float32")])): @@ -262,9 +261,9 @@ def multiply_by_two(A: T.Buffer(16, "float32"), B: T.Buffer(16, "float32")): def test_normalize_to_inline_tuple_for_call_tir_inplace(custom_op): """FNormalize in-lines the argument tuple for R.call_tir_inplace""" - # The CallTIRInplaceAttrs is difficult to construct in the Python - # API, so it is more convenient to declare the expected one first - # and reuse its attributes + # The CallTIRInplaceAttrs cannot be constructed from the Python + # API. Therefore, declaring the Expected output first, so that + # the attributes can be used for the non-normalized Before. @I.ir_module class Expected: @R.function @@ -285,7 +284,7 @@ def multiply_by_two(A: T.Buffer(16, "float32")): inplace_attrs = Expected["main"].body.blocks[0].bindings[1].value.attrs - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @R.function def main(A: R.Tensor([16], "float32")): @@ -313,9 +312,9 @@ def multiply_by_two(A: T.Buffer(16, "float32")): def test_normalize_to_inline_tuple_for_call_tir_with_grad(custom_op): """FNormalize in-lines the argument tuple for R.call_tir_with_grad""" - # The CallTIRWithGradAttrs is difficult to construct in the Python - # API, so it is more convenient to declare the expected one first - # and reuse its attributes + # The CallTIRWithGradAttrs cannot be constructed from the Python + # API. Therefore, declaring the Expected output first, so that + # the attributes can be used for the non-normalized Before. @I.ir_module class Expected: @R.function @@ -343,7 +342,7 @@ def f_grad( with_grad_attrs = Expected["main"].body.blocks[0].bindings[1].value.attrs - @I.ir_module(check_well_formed=False) + @I.ir_module class Before: @R.function def main(A: R.Tensor([16], "float32")): diff --git a/tests/python/relax/test_transform_rewrite_cuda_graph.py b/tests/python/relax/test_transform_rewrite_cuda_graph.py index 91b3fce2640a..dc115939a7e4 100644 --- a/tests/python/relax/test_transform_rewrite_cuda_graph.py +++ b/tests/python/relax/test_transform_rewrite_cuda_graph.py @@ -709,7 +709,7 @@ def main(): def test_static_args(): @I.ir_module class Before: - @R.function(pure=False) + @R.function def main(): storage0 = R.memory.alloc_storage(R.shape([8]), 0, "global", "float32") alloc0 = R.memory.alloc_tensor(storage0, 0, R.shape([8]), "float32") @@ -734,7 +734,7 @@ def cuda_graph_capture(alloc0: R.Tensor((8,), dtype="float32")) -> R.Tuple: gv: R.Tuple = R.tuple() return gv - @R.function(pure=False) + @R.function def main() -> R.Tuple: cls = Expected gv: R.Tuple(R.Object) = R.call_builtin_with_ctx( diff --git a/tests/python/relax/test_tvmscript_parser.py b/tests/python/relax/test_tvmscript_parser.py index 3f806de28dbd..48d087c18a20 100644 --- a/tests/python/relax/test_tvmscript_parser.py +++ b/tests/python/relax/test_tvmscript_parser.py @@ -821,14 +821,14 @@ def foo(x: R.Tensor((32, 32), "float32")) -> R.Tensor((32, 32), "float32"): def test_call_packed(): - @R.function(pure=False) + @R.function def foo(x: R.Tensor((32, 32), "float32")) -> R.Tensor: z = R.call_packed("vm.builtin.copy", x, sinfo_args=R.Tensor((32, 32), "float32")) return z x = relax.Var("x", R.Tensor((32, 32), "float32")) bb = relax.BlockBuilder() - with bb.function("foo", (x), pure=False): + with bb.function("foo", (x)): z = bb.emit( relax.Call( relax.ExternFunc("vm.builtin.copy"), @@ -843,14 +843,14 @@ def foo(x: R.Tensor((32, 32), "float32")) -> R.Tensor: def test_call_packed_without_sinfo_args(): - @R.function(pure=False) + @R.function def foo(x: R.Object) -> R.Object: z = R.call_packed("test", x) return z x = relax.Var("x", R.Object()) bb = relax.BlockBuilder() - with bb.function("foo", (x), pure=False): + with bb.function("foo", (x)): z = bb.emit( relax.Call( relax.ExternFunc("test"), @@ -865,7 +865,7 @@ def foo(x: R.Object) -> R.Object: def test_annotation(): - @R.function(pure=False) + @R.function def foo( x: R.Tensor((32, "m"), "float32"), y: R.Tensor(("m",), "float32"), @@ -1576,7 +1576,7 @@ def foo(x: R.Tensor(("m", "n"), dtype="float32")): def test_prim_value(): - @R.function(pure=False) + @R.function def foo(): gv = R.call_packed("test", 1, sinfo_args=R.Tensor((32, 32), "float32")) return gv @@ -1585,7 +1585,7 @@ def foo(): def test_string_imm(): - @R.function(pure=False) + @R.function def foo(): gv = R.call_packed("test", "hello", sinfo_args=R.Tensor((32, 32), "float32")) return gv @@ -1594,7 +1594,7 @@ def foo(): def test_datatype_imm(): - @R.function(pure=False) + @R.function def foo(): gv = R.call_packed("test", R.dtype("float32"), sinfo_args=R.Tensor((32, 32), "float32")) return gv diff --git a/tests/python/relax/test_vm_alloc_storage_with_scope.py b/tests/python/relax/test_vm_alloc_storage_with_scope.py index 17ae449a5d6a..ca1802b1f527 100644 --- a/tests/python/relax/test_vm_alloc_storage_with_scope.py +++ b/tests/python/relax/test_vm_alloc_storage_with_scope.py @@ -44,7 +44,7 @@ def add( T.writes(output[v_ax0, v_ax1]) output[v_ax0, v_ax1] = arg0[v_ax0, v_ax1] + arg1[v_ax0, v_ax1] - @R.function(pure=False) + @R.function def main(x: R.Tensor((2, 2), dtype="float32")): cls = Module storage = R.vm.alloc_storage( diff --git a/tests/python/relax/test_vm_codegen_only.py b/tests/python/relax/test_vm_codegen_only.py index a93eb8350ce2..0d461f0713c2 100644 --- a/tests/python/relax/test_vm_codegen_only.py +++ b/tests/python/relax/test_vm_codegen_only.py @@ -42,7 +42,7 @@ def codegen(mod, target, exec_mode="bytecode"): def test_vm_copy(exec_mode): @tvm.script.ir_module class TestVMMove: - @R.function(pure=False) + @R.function def foo(x: R.Tensor((3, 4), "float32")): R.func_attr({"global_symbol": "foo"}) z = R.call_packed("vm.builtin.copy", x, sinfo_args=(R.Tensor((3, 4), dtype="float32"))) @@ -61,7 +61,7 @@ def foo(x: R.Tensor((3, 4), "float32")): def test_vm_to_device(exec_mode): @tvm.script.ir_module class TestVMToDevice: - @R.function(pure=False) + @R.function def foo(x: R.Tensor((3, 4), "float32")): R.func_attr({"global_symbol": "foo"}) # Copy x to the first cpu: device_type=1 and device_id=0. @@ -110,7 +110,7 @@ def main(x: R.Tensor(ndim=2, dtype="float32")) -> R.Tensor(ndim=2, dtype="float3 def test_vm_exec_serialize_export_library(exec_mode): @tvm.script.ir_module class TestVMMove: - @R.function(pure=False) + @R.function def foo(x: R.Tensor((3, 4), "float32")): R.func_attr({"global_symbol": "foo"}) z = R.call_packed("vm.builtin.copy", x, sinfo_args=(R.Tensor((3, 4), dtype="float32"))) @@ -133,7 +133,7 @@ def foo(x: R.Tensor((3, 4), "float32")): def test_if_cond(exec_mode): @tvm.script.ir_module class TestVMCompileIf: - @R.function(pure=False) + @R.function def ife(cond: R.Tensor((), "bool"), x: R.Tensor((3, 4), "float32")) -> R.Tensor: R.func_attr({"global_symbol": "ife"}) if cond: @@ -183,7 +183,7 @@ def main(x: R.Tensor(ndim=2, dtype="float32")): def test_vm_const_as_call_arg(exec_mode): @tvm.script.ir_module class TestVMConstAsCallArg: - @R.function(pure=False) + @R.function def main(x: R.Tensor(ndim=2, dtype="float32")): R.func_attr({"global_symbol": "main"}) a = R.call_packed( @@ -219,7 +219,7 @@ def test_shape_check_builtin(exec_mode): @tvm.script.ir_module class TestVMShapeCheck: - @R.function(pure=False) + @R.function def main(x: R.Tensor(["n", "m"], "float32")) -> R.Shape(ndim=3): R.func_attr({"global_symbol": "main"}) n = T.int64() @@ -338,7 +338,7 @@ def main(): def test_vm_builtin_reshape(exec_mode): @tvm.script.ir_module class TestVMBuiltinReshape: - @R.function(pure=False) + @R.function def main(x: R.Tensor((3, 4), "float32")): R.func_attr({"global_symbol": "main"}) y = R.call_packed( @@ -383,8 +383,7 @@ def full1(T_full: T.Buffer((T.int64(4),), "float32")): T.writes(T_full[v_ax0]) T_full[v_ax0] = T.float32(1) - # PrimFuncs called directly are treated as impure - @R.function(pure=False) + @R.function def main() -> R.Tensor((4,), dtype="float32"): R.func_attr({"global_symbol": "main"}) cls = TestKillObject @@ -426,7 +425,7 @@ def main() -> R.Tensor((4,), dtype="float32"): def test_preserve_trivial_bindings(exec_mode): @I.ir_module class mod: - @R.function(pure=False) + @R.function def main(): callback = R.ExternFunc("test.vm.check_if_defined") diff --git a/tests/python/relax/test_vm_codegen_tir.py b/tests/python/relax/test_vm_codegen_tir.py index 21e192955b93..d82715a3946f 100644 --- a/tests/python/relax/test_vm_codegen_tir.py +++ b/tests/python/relax/test_vm_codegen_tir.py @@ -34,7 +34,7 @@ def get_tir_mod(mod): def test_add(): @tvm.script.ir_module class Before: - @R.function(pure=False) + @R.function def foo(x: R.Tensor): R.func_attr({"global_symbol": "foo"}) z = R.call_packed("test.vm.add", x, x, sinfo_args=(R.Tensor)) @@ -71,7 +71,7 @@ def shape_func(H: T.Buffer(T.int64(4), "int64")): # generated compute function H[T.int64(0)] = H[T.int64(0)] + T.int64(1) - @R.function(pure=False) + @R.function def foo(x: R.Tensor): R.func_attr({"global_symbol": "foo"}) _ = Before.shape_func(x) @@ -104,7 +104,7 @@ def __vmtir__foo(ctx_ptr: T.handle, r: T.handle, c: T.handle, f: T.handle): def test_if_cond(): @tvm.script.ir_module class Before: - @R.function(pure=False) + @R.function def ife(cond: R.Tensor((), "bool"), x: R.Tensor) -> R.Tensor: R.func_attr({"global_symbol": "ife"}) if cond: @@ -191,7 +191,7 @@ def __vmtir__main(ctx_ptr: T.handle, r: T.handle, c: T.handle, f: T.handle): def test_const_call(): @tvm.script.ir_module class Before: - @R.function(pure=False) + @R.function def main(x: R.Tensor): R.func_attr({"global_symbol": "main"}) y = R.const([1, 2]) diff --git a/tests/python/relax/test_vm_cuda_graph.py b/tests/python/relax/test_vm_cuda_graph.py index 6a20b6b1f892..8406b9df15d3 100644 --- a/tests/python/relax/test_vm_cuda_graph.py +++ b/tests/python/relax/test_vm_cuda_graph.py @@ -27,7 +27,7 @@ @I.ir_module class Module: - @R.function(pure=False) + @R.function def main(x: R.Tensor((16, 16), dtype="float32")) -> R.Tensor((16, 16), dtype="float32"): cls = Module R.func_attr({"global_symbol": "main"}) @@ -63,7 +63,7 @@ def cuda_graph_alloc() -> R.Tuple(R.Object, R.Object): gv: R.Tuple(R.Object, R.Object) = (storage, storage1) return gv - @R.function(pure=False) + @R.function def cuda_graph_capture(alloc: R.Tensor((16, 16), dtype="float32"), storage1: R.Object, storage: R.Object) -> R.Tuple(R.Tensor((16, 16), dtype="float32")): cls = Module R.func_attr({"global_symbol": "cuda_graph_capture"}) diff --git a/tests/python/tir-analysis/test_tir_analysis_identify_memcpy.py b/tests/python/tir-analysis/test_tir_analysis_identify_memcpy.py index 8510a66d308d..b69d3aea3ea3 100644 --- a/tests/python/tir-analysis/test_tir_analysis_identify_memcpy.py +++ b/tests/python/tir-analysis/test_tir_analysis_identify_memcpy.py @@ -32,7 +32,6 @@ class BaseTest: """Utility class for defining unit tests for memcpy""" def __init_subclass__(cls): - cls.check_well_formed = True # CompareBeforeAfter has a member var cls.func = tvm.testing.CompareBeforeAfter._normalize_before(cls.func) cls.expected = pytest.fixture(cls.expected) diff --git a/tests/python/tir-analysis/test_tir_analysis_oob.py b/tests/python/tir-analysis/test_tir_analysis_oob.py index c4d520881797..7c8ceed36e10 100644 --- a/tests/python/tir-analysis/test_tir_analysis_oob.py +++ b/tests/python/tir-analysis/test_tir_analysis_oob.py @@ -43,7 +43,8 @@ def bad_store_loop(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32" @T.prim_func -def unknown_bounds(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32"), N: T.int32): +def unknown_bounds(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32")): + N = T.int32() for i in range(3): B[0, N] = A[1, i] diff --git a/tests/python/tir-analysis/test_tir_analysis_verify_well_formed.py b/tests/python/tir-analysis/test_tir_analysis_verify_well_formed.py index 629549721e4a..a1b3bee1b282 100644 --- a/tests/python/tir-analysis/test_tir_analysis_verify_well_formed.py +++ b/tests/python/tir-analysis/test_tir_analysis_verify_well_formed.py @@ -43,7 +43,7 @@ def element_wise( def test_fail_use_out_loop_var(): - @T.prim_func(check_well_formed=False) + @T.prim_func def element_wise( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), @@ -60,7 +60,7 @@ def element_wise( def test_error_for_out_of_scope_usage(): """A variable may not be used after its scope ends""" - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): i = T.int32() with T.LetStmt(42, var=i): @@ -76,7 +76,7 @@ def func(): def test_error_for_nested_rebind_usage(): """A variable may not be re-defined within the initial scope""" - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): i = T.int32() with T.LetStmt(42, var=i): @@ -92,7 +92,7 @@ def func(): def test_error_for_repeated_binding(): """A variable may not be re-defined after the scope ends""" - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): i = T.int32() with T.LetStmt(42, var=i): @@ -109,7 +109,7 @@ def test_error_for_cross_function_reuse(): i = tvm.tir.Var("i", "int32") - @I.ir_module(check_well_formed=False) + @I.ir_module class mod: @T.prim_func def func1(): @@ -175,7 +175,7 @@ def test_reuse_of_env_thread_across_functions_is_ill_formed(): threadIdx_x = tvm.tir.Var("threadIdx_x", "int32") - @I.ir_module(check_well_formed=False) + @I.ir_module class mod: @T.prim_func def kernel_1(A: T.Buffer([256], "float32")): diff --git a/tests/python/tir-base/test_tir_renew_defs.py b/tests/python/tir-base/test_tir_renew_defs.py index 7fe8d7c679fa..22f7b65ca17b 100644 --- a/tests/python/tir-base/test_tir_renew_defs.py +++ b/tests/python/tir-base/test_tir_renew_defs.py @@ -82,9 +82,7 @@ def _get_block(f): def test_match_buffer(): - # well-formed checker complains about multiple definitions for variable A0_s1, - # likely stemming from strides=[s, s] - @T.prim_func(check_well_formed=False) + @T.prim_func # A and B should be remapped def func_match_buffer(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")): with T.block("root"): diff --git a/tests/python/tir-base/test_tir_specialize.py b/tests/python/tir-base/test_tir_specialize.py index 042288723376..fd2843f743be 100644 --- a/tests/python/tir-base/test_tir_specialize.py +++ b/tests/python/tir-base/test_tir_specialize.py @@ -67,9 +67,7 @@ def matmul_m_128(a: T.handle, b: T.handle, c: T.handle) -> None: C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] -# x is considered undefined because it appears as part of x*8, -# but not on its own -@T.prim_func(check_well_formed=False) +@T.prim_func def matmul_m_8x(a: T.handle, b: T.handle, c: T.handle) -> None: x = T.int32() m = T.int32() @@ -279,9 +277,7 @@ def before(A: T.Buffer([16, 16], "float32"), B: T.Buffer([16, 16], "float32")): for i in range(256): B_flat[i] = A_flat[i] * 2.0 - # well-formed checker complains about multiple nested definitions of B_flat - # since it appears in the buffer map twice - @T.prim_func(private=True, check_well_formed=False) + @T.prim_func(private=True) def expected(A: T.Buffer([16, 16], "float32"), B_handle: T.handle): B = T.match_buffer(B_handle, [16, 16], "float32", data=A.data) A_flat = T.decl_buffer([256], "float32", data=A.data) diff --git a/tests/python/tir-schedule/test_tir_schedule_rfactor.py b/tests/python/tir-schedule/test_tir_schedule_rfactor.py index a15bd3d9137b..37e68fa21a0e 100644 --- a/tests/python/tir-schedule/test_tir_schedule_rfactor.py +++ b/tests/python/tir-schedule/test_tir_schedule_rfactor.py @@ -951,8 +951,7 @@ def argmax_split_body_bufferstore_value_not_var( argmax_v1[i] = v_argmax_v1 -# v_unbound is unbound -@T.prim_func(check_well_formed=False) +@T.prim_func def argmax_split_body_bufferstore_value_unbound_var( idx: T.Buffer((128, 128), "int32"), val: T.Buffer((128, 128), "float32"), diff --git a/tests/python/tir-transform/test_tir_transform_common_subexpr_elim.py b/tests/python/tir-transform/test_tir_transform_common_subexpr_elim.py index e64d3c74932b..7be1038ce5d4 100644 --- a/tests/python/tir-transform/test_tir_transform_common_subexpr_elim.py +++ b/tests/python/tir-transform/test_tir_transform_common_subexpr_elim.py @@ -349,34 +349,34 @@ def test_no_normalization_without_commoning(): # Part for testing the commoning with equivalences # ------------------------------------------------- @T.prim_func -def func_distributivity( - B: T.Buffer((50,), "int32"), i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 -) -> None: +def func_distributivity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None: + B = T.Buffer((50,), "int32") B[i1] = x * (y + z) B[i2] = x * y + x * z @T.prim_func def func_distributivity_expected( - B: T.Buffer((50,), "int32"), i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 + i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 ) -> None: + B = T.Buffer((50,), "int32") with T.LetStmt(x * y + x * z) as cse_var_1: B[i1] = cse_var_1 B[i2] = cse_var_1 @T.prim_func -def func_associativity( - B: T.Buffer((50,), "int32"), i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 -) -> None: +def func_associativity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None: + B = T.Buffer((50,), "int32") B[i1] = (x + y) + z B[i2] = x + (y + z) @T.prim_func def func_associativity_expected( - B: T.Buffer((50,), "int32"), i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 + i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 ) -> None: + B = T.Buffer((50,), "int32") with T.LetStmt((x + y) + z) as cse_var_1: B[i1] = cse_var_1 B[i2] = cse_var_1 @@ -460,7 +460,6 @@ def test_deterministic_cse(): ["PR", 3, 0, "auto_unroll_max_step$512"], ["AN", 1, 3, 2], ["AN", 3, 21, 2], \ ["AN", 6, 6, 2]]]], "r": [[0.0331129], 0, 0.900362, 1647464342], "v": "v0.6"}\n' - # The workload associated with the log @auto_scheduler.register_workload def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): diff --git a/tests/python/tir-transform/test_tir_transform_convert_blocks_to_opaque.py b/tests/python/tir-transform/test_tir_transform_convert_blocks_to_opaque.py index f920a46ba57e..8fbbaf59bb58 100644 --- a/tests/python/tir-transform/test_tir_transform_convert_blocks_to_opaque.py +++ b/tests/python/tir-transform/test_tir_transform_convert_blocks_to_opaque.py @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. import tvm -import tvm.testing from tvm import tir, te from tvm.script import tir as T @@ -85,7 +84,6 @@ def test_lower_te(): class TestErrorIfPredicateUsesBlockVariables(tvm.testing.CompareBeforeAfter): transform = tvm.tir.transform.ConvertBlocksToOpaque() - check_well_formed = False def before(A: T.Buffer(8, "int32")): for i in T.serial(8): diff --git a/tests/python/tir-transform/test_tir_transform_convert_ssa.py b/tests/python/tir-transform/test_tir_transform_convert_ssa.py index ec768ba74f7b..644ab3b624ef 100644 --- a/tests/python/tir-transform/test_tir_transform_convert_ssa.py +++ b/tests/python/tir-transform/test_tir_transform_convert_ssa.py @@ -327,8 +327,7 @@ class TestDeDuplicateThreadIdxAcrossMultipleFunctions(BaseBeforeAfter): def before(self): threadIdx_x = tvm.tir.Var("threadIdx_x", "int32") - # threadIdx_x is defined outside - @I.ir_module(check_well_formed=False) + @I.ir_module class mod: @T.prim_func def kernel_1(A: T.Buffer([256], "float32")): @@ -390,8 +389,7 @@ def before(self): tvm.ir.Range(0, 256), threadIdx_x, tvm.tir.IterVar.ThreadIndex, "threadIdx.x" ) - # complaints of multiple definitions for threadIdx_x - @I.ir_module(check_well_formed=False) + @I.ir_module class mod: @T.prim_func def kernel_1(A: T.Buffer([256], "float32")): @@ -406,7 +404,7 @@ def kernel_2(A: T.Buffer([256], "float32")): return mod def expected(self): - @I.ir_module(check_well_formed=False) + @I.ir_module class mod: @T.prim_func def kernel_1(A: T.Buffer([256], "float32")): @@ -447,8 +445,7 @@ def before(self): tvm.ir.Range(0, 256), threadIdx_x, tvm.tir.IterVar.ThreadIndex, "threadIdx.x" ) - # complaints of multiple definitions of threadIdx_x - @I.ir_module(check_well_formed=False) + @I.ir_module class mod: @T.prim_func def kernel_1(A: T.Buffer([256], "float32")): diff --git a/tests/python/tir-transform/test_tir_transform_fp8_legalize.py b/tests/python/tir-transform/test_tir_transform_fp8_legalize.py index 6e44b53d0cae..f5786808a6f3 100644 --- a/tests/python/tir-transform/test_tir_transform_fp8_legalize.py +++ b/tests/python/tir-transform/test_tir_transform_fp8_legalize.py @@ -17,7 +17,6 @@ import tvm import tvm.script import tvm.testing -from tvm.target import Target from tvm.script import tir as T # pylint: disable=no-member,invalid-name,unused-variable @@ -205,20 +204,18 @@ def main(Aptr: T.handle("uint8"), Bptr: T.handle("uint8"), Dptr: T.handle("uint8 def test_fp8_compute_legalize(dtype, promote_dtype): - target = Target("cuda") before = get_before(dtype) expected = get_after_compute_legalize(dtype, promote_dtype) # run the transform twice to ensure we can afford to deal # with this repeative optimizations - after = tvm.tir.transform.FP8ComputeLegalize(target, promote_dtype)(before) - after = tvm.tir.transform.FP8ComputeLegalize(target, promote_dtype)(after) + after = tvm.tir.transform.FP8ComputeLegalize(promote_dtype)(before) + after = tvm.tir.transform.FP8ComputeLegalize(promote_dtype)(after) tvm.ir.assert_structural_equal(after, expected) def test_fp8_storage_legalize(dtype, promote_dtype): - target = Target("cuda") before = get_after_compute_legalize(dtype, promote_dtype) - after = tvm.tir.transform.FP8StorageLegalize(target)(before) + after = tvm.tir.transform.FP8StorageLegalize()(before) expected = get_after_storage_legalize(dtype, promote_dtype) tvm.ir.assert_structural_equal(after, expected) diff --git a/tests/python/tir-transform/test_tir_transform_inject_rolling_buffer.py b/tests/python/tir-transform/test_tir_transform_inject_rolling_buffer.py index c1c8141f70a7..b7bd6cb46fd6 100644 --- a/tests/python/tir-transform/test_tir_transform_inject_rolling_buffer.py +++ b/tests/python/tir-transform/test_tir_transform_inject_rolling_buffer.py @@ -199,109 +199,49 @@ def test_mixed_buffers(make_rolling): _verify_schedule(sch, [A], pool_c) +# fmt: off @tvm.script.ir_module class PreRollingBuffer: @T.prim_func - def main( - A: T.handle, - tensor: T.handle, - tensor_2: T.Buffer( - [1, 10, 12, 16], - dtype="int8", - elem_offset=0, - align=64, - offset_factor=1, - ), - ) -> None: + def main(A: T.handle, tensor: T.handle) -> None: # function attr dict - T.func_attr( - { - "from_legacy_te_schedule": True, - "global_symbol": "main", - "tir.noalias": True, - } - ) - A_1 = T.match_buffer( - A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1 - ) - tensor_1 = T.match_buffer( - tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1 - ) + T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True}) + # buffer definition + tensor_2 = T.Buffer([1, 10, 12, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1) + A_1 = T.match_buffer(A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1) + tensor_1 = T.match_buffer(tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.realize(tensor_1[0:1, 0:8, 0:8, 0:16], "") for ax1_outer in T.serial(0, 2): - T.realize(tensor_2[0:1, (ax1_outer * 4) : ((ax1_outer * 4) + 6), 0:12, 0:16], "") + T.realize(tensor_2[0:1, (ax1_outer*4):((ax1_outer*4) + 6), 0:12, 0:16], "") T.attr(tensor_2, "rolling_buffer_scope", True) for ax1 in T.serial(0, 6): for ax2 in T.serial(0, 12): for ax3 in T.serial(0, 16): - tensor_2[0, (ax1 + (ax1_outer * 4)), ax2, ax3] = T.int8(0) + tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3] = T.int8(0) for dh in T.serial(0, 3): for dw in T.serial(0, 3): - tensor_2[0, (ax1 + (ax1_outer * 4)), ax2, ax3] = T.max( - tensor_2[0, (ax1 + (ax1_outer * 4)), ax2, ax3], - A_1[0, ((ax1 + (ax1_outer * 4)) + dh), (ax2 + dw), ax3], - ) + tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3] = T.max(tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3], A_1[0, ((ax1 + (ax1_outer*4)) + dh), (ax2 + dw), ax3]) for ax1_inner in T.serial(0, 4): for ax2_inner in T.serial(0, 8): for ax3_inner in T.serial(0, 16): - tensor_1[ - 0, - (ax1_inner + (ax1_outer * 4)), - ax2_inner, - ax3_inner, - ] = T.int8(0) + tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.int8(0) for dh_1 in T.serial(0, 3): for dw_1 in T.serial(0, 5): - tensor_1[ - 0, - (ax1_inner + (ax1_outer * 4)), - ax2_inner, - ax3_inner, - ] = T.max( - tensor_1[ - 0, - (ax1_inner + (ax1_outer * 4)), - ax2_inner, - ax3_inner, - ], - tensor_2[ - 0, - ((ax1_inner + (ax1_outer * 4)) + dh_1), - (ax2_inner + dw_1), - ax3_inner, - ], - ) + tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.max(tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner], tensor_2[0, ((ax1_inner + (ax1_outer*4)) + dh_1), (ax2_inner + dw_1), ax3_inner]) + __tvm_meta__ = None @tvm.script.ir_module class PostRollingBuffer: @T.prim_func - def main( - A: T.handle, - tensor: T.handle, - tensor_2: T.Buffer( - [1, 10, 12, 16], - dtype="int8", - elem_offset=0, - align=64, - offset_factor=1, - ), - ) -> None: + def main(A: T.handle, tensor: T.handle) -> None: # function attr dict - T.func_attr( - { - "from_legacy_te_schedule": True, - "global_symbol": "main", - "tir.noalias": True, - } - ) - A_1 = T.match_buffer( - A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1 - ) - tensor_1 = T.match_buffer( - tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1 - ) + T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True}) + # buffer definition + tensor_2 = T.Buffer([1, 10, 12, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1) + A_1 = T.match_buffer(A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1) + tensor_1 = T.match_buffer(tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1) # body T.realize(tensor_1[0:1, 0:8, 0:8, 0:16], "") T.realize(tensor_2[0:1, 0:6, 0:12, 0:16], "") @@ -309,51 +249,21 @@ def main( for ax1 in T.serial(0, 6): for ax2 in T.serial(0, 12): for ax3 in T.serial(0, 16): - if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype="bool"): - tensor_2[ - 0, - T.floormod((ax1 + (ax1_outer * 4)), 6), - ax2, - ax3, - ] = T.int8(0) + if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype='bool') : + tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3] = T.int8(0) for dh in T.serial(0, 3): for dw in T.serial(0, 3): - if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype="bool"): - tensor_2[ - 0, T.floormod((ax1 + (ax1_outer * 4)), 6), ax2, ax3 - ] = T.max( - tensor_2[ - 0, T.floormod((ax1 + (ax1_outer * 4)), 6), ax2, ax3 - ], - A_1[0, ((ax1 + (ax1_outer * 4)) + dh), (ax2 + dw), ax3], - ) + if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype='bool'): + tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3] = T.max(tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3], A_1[0, ((ax1 + (ax1_outer*4)) + dh), (ax2 + dw), ax3]) for ax1_inner in T.serial(0, 4): for ax2_inner in T.serial(0, 8): for ax3_inner in T.serial(0, 16): - tensor_1[ - 0, - (ax1_inner + (ax1_outer * 4)), - ax2_inner, - ax3_inner, - ] = T.int8(0) + tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.int8(0) for dh_1 in T.serial(0, 3): for dw_1 in T.serial(0, 5): - tensor_1[ - 0, - (ax1_inner + (ax1_outer * 4)), - ax2_inner, - ax3_inner, - ] = T.max( - tensor_1[ - 0, (ax1_inner + (ax1_outer * 4)), ax2_inner, ax3_inner - ], - tensor_2[ - 0, - T.floormod(((ax1_inner + (ax1_outer * 4)) + dh_1), 6), - (ax2_inner + dw_1), - ax3_inner, - ], - ) + tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.max(tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner], tensor_2[0, T.floormod(((ax1_inner + (ax1_outer*4)) + dh_1), 6), (ax2_inner + dw_1), ax3_inner]) + __tvm_meta__ = None +# fmt: on def test_rolling_buffer_ir_transform(): diff --git a/tests/python/tir-transform/test_tir_transform_lower_cross_thread_reduction.py b/tests/python/tir-transform/test_tir_transform_lower_cross_thread_reduction.py index 35b4d55ea51d..aa55b25f1668 100644 --- a/tests/python/tir-transform/test_tir_transform_lower_cross_thread_reduction.py +++ b/tests/python/tir-transform/test_tir_transform_lower_cross_thread_reduction.py @@ -116,8 +116,7 @@ def no_normal_reduction(a: T.handle, b: T.handle) -> None: B[vi] = B[vi] + A[vi, vk] -# complains that k is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def lowered_no_normal_reduction(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128], dtype="float32") B = T.match_buffer(b, [128], dtype="float32") @@ -163,8 +162,7 @@ def two_bound_loops(a: T.handle, b: T.handle) -> None: B[vi] = B[vi] + A[vi, vk] -# complains that ko is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def lowered_two_bound_loops(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128], dtype="float32") B = T.match_buffer(b, [128], dtype="float32") @@ -901,8 +899,7 @@ def reducer_max(a: T.handle, b: T.handle) -> None: B[vi] = T.max(B[vi], A[vi, vk]) -# complains that k is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def lowered_reducer_max(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128], dtype="float32") B = T.match_buffer(b, [128], dtype="float32") @@ -945,8 +942,7 @@ def zero_rank_buffer(a: T.handle, b: T.handle) -> None: B[()] = B[()] + A[vk] -# complains that k is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def lowered_zero_rank_buffer(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128], dtype="float32") B = T.match_buffer(b, [], dtype="float32") @@ -1576,8 +1572,7 @@ def thread_broadcast_1(A: T.Buffer((256, 256), "float32"), B: T.Buffer((256,), " B[vi] = temp_local[vi] + T.float32(1) -# complains that k is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def lowered_thread_broadcast_1(A: T.Buffer((256, 256), "float32"), B: T.Buffer((256,), "float32")): temp_local = T.alloc_buffer((256,), scope="local") cross_thread_temp_local = T.alloc_buffer((1,), strides=(1,), scope="local") @@ -1750,8 +1745,7 @@ def no_thread_broadcast(A: T.Buffer((256, 256), "float32"), B: T.Buffer((256, 25 B[vi, vj] = A[vi, vj] + temp_2_local[0] -# complains that k is defined outside of a block -@T.prim_func(check_well_formed=False) +@T.prim_func def lowered_no_thread_broadcast( A: T.Buffer((256, 256), "float32"), B: T.Buffer((256, 256), "float32") ): diff --git a/tests/python/tir-transform/test_tir_transform_lower_match_buffer.py b/tests/python/tir-transform/test_tir_transform_lower_match_buffer.py index 410269ffae5c..7dc164496501 100644 --- a/tests/python/tir-transform/test_tir_transform_lower_match_buffer.py +++ b/tests/python/tir-transform/test_tir_transform_lower_match_buffer.py @@ -466,8 +466,7 @@ def fail_match_store(a: T.handle) -> None: sub_A[()] = 1 -# well-formed checker complains about redefinition of a stride variable -@T.prim_func(check_well_formed=False) +@T.prim_func def fail_buffer_bind(a: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 2): @@ -480,8 +479,7 @@ def fail_buffer_bind(a: T.handle) -> None: sub_A[i, j * 4 + jj] = 1 -# well-formed checker complains about redefinition of a stride variable -@T.prim_func(check_well_formed=False) +@T.prim_func def fail_match_func_param(a: T.handle, m: T.handle, n: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 2): diff --git a/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py b/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py index 8661843d39c1..efe2944aaa48 100644 --- a/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py +++ b/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py @@ -456,7 +456,7 @@ def func( class TestSimpleAllocNoReuse(tvm.testing.CompareBeforeAfter): """Test alloc and free within the same scope.""" - transform = tvm.tir.transform.MergeSharedMemoryAllocations() + transform = tvm.tir.transform.MergeDynamicSharedMemoryAllocations() def before(self): @T.prim_func @@ -485,7 +485,7 @@ def func(): class TestSimpleAllocReuse(tvm.testing.CompareBeforeAfter): """Test alloc and free within the same scope with a reuse chance.""" - transform = tvm.tir.transform.MergeSharedMemoryAllocations() + transform = tvm.tir.transform.MergeDynamicSharedMemoryAllocations() def before(self): @T.prim_func diff --git a/tests/python/tir-transform/test_tir_transform_simplify.py b/tests/python/tir-transform/test_tir_transform_simplify.py index f7887bc61137..6bad817c4955 100644 --- a/tests/python/tir-transform/test_tir_transform_simplify.py +++ b/tests/python/tir-transform/test_tir_transform_simplify.py @@ -142,8 +142,6 @@ class BaseBeforeAfter(tvm.testing.CompareBeforeAfter): apply_constraints_to_boolean_branches = False propagate_knowns_to_prove_conditional = False propagate_knowns_to_simplify_expressions = False - # from base class - check_well_formed = False def transform(self): def inner(mod): @@ -652,8 +650,7 @@ class TestRemoveTransitivelyProvableCondition(BaseBeforeAfter): def before(self, test_case): priors, postulate, _ = test_case - # well formed checker complains of undefined variables in condition - @T.prim_func(check_well_formed=False) + @T.prim_func def func(A: T.Buffer(1, "bool")): if priors: A[0] = postulate @@ -669,8 +666,7 @@ def expected(self, test_case): if provable: - # well formed checker complains of undefined variables in condition - @T.prim_func(check_well_formed=False) + @T.prim_func def func(A: T.Buffer(1, "bool")): if priors: A[0] = True @@ -680,8 +676,7 @@ def func(A: T.Buffer(1, "bool")): else: postulate = analyzer.canonical_simplify(postulate) - # well formed checker complains of undefined variables in condition - @T.prim_func(check_well_formed=False) + @T.prim_func def func(A: T.Buffer(1, "bool")): if priors: A[0] = postulate @@ -1039,8 +1034,7 @@ class TestMostRestrictiveConditional(BaseBeforeAfter): def before(self, test_case): priors, expr_before, _ = test_case - # well formed checker complains of undefined variables in condition - @T.prim_func(check_well_formed=False) + @T.prim_func def func(A: T.Buffer(1, "bool")): if priors: A[0] = expr_before @@ -1051,8 +1045,7 @@ def func(A: T.Buffer(1, "bool")): def expected(self, test_case): priors, _, expr_after = test_case - # well formed checker complains of undefined variables in condition - @T.prim_func(check_well_formed=False) + @T.prim_func def func(A: T.Buffer(1, "bool")): if priors: A[0] = expr_after diff --git a/tests/python/tir-transform/test_tir_transform_storage_flatten.py b/tests/python/tir-transform/test_tir_transform_storage_flatten.py index 8ddfbb5adfd3..f09645462366 100644 --- a/tests/python/tir-transform/test_tir_transform_storage_flatten.py +++ b/tests/python/tir-transform/test_tir_transform_storage_flatten.py @@ -153,7 +153,7 @@ def main(): @T.prim_func def tir_func(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [2, 2]) - B = T.match_buffer(b, [2, 2]) + B = T.match_buffer(a, [2, 2]) A[0, 1] = B[1, 1] diff --git a/tests/python/tir-usmp/test_tir_usmp_algo.py b/tests/python/tir-usmp/test_tir_usmp_algo.py index b9cfde485633..265e6fe5d5d5 100644 --- a/tests/python/tir-usmp/test_tir_usmp_algo.py +++ b/tests/python/tir-usmp/test_tir_usmp_algo.py @@ -359,6 +359,7 @@ def run_model(input: T.handle, output: T.handle) -> None: T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32")) + __tvm_meta__ = None # fmt: on @@ -529,6 +530,7 @@ def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(place Conv2dOutput[ff] = Conv2dOutput[ff] + T.cast(PaddedInput[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32") for ax3_inner_1 in T.serial(0, 64): T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16") + __tvm_meta__ = None # fmt: on diff --git a/tests/python/tir-usmp/test_tir_usmp_analysis_extract_bufferinfo.py b/tests/python/tir-usmp/test_tir_usmp_analysis_extract_bufferinfo.py index f8da0ef9f42d..662f86479c09 100644 --- a/tests/python/tir-usmp/test_tir_usmp_analysis_extract_bufferinfo.py +++ b/tests/python/tir-usmp/test_tir_usmp_analysis_extract_bufferinfo.py @@ -18,7 +18,6 @@ import sys import tvm -import tvm.testing from tvm import tir, script from tvm.ir import Range from tvm.script import tir as T @@ -172,6 +171,7 @@ def run_model(input: T.handle, output: T.handle) -> None: T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32")) + __tvm_meta__ = None # fmt: on @@ -245,6 +245,7 @@ def run_model(input: T.handle, output: T.handle) -> None: T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", input, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), output, dtype="int32")) +__tvm_meta__ = None # fmt: on @@ -285,6 +286,7 @@ def run_model(input: T.handle, output: T.handle) -> None: T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", input, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), output, dtype="int32")) +__tvm_meta__ = None # fmt: on @@ -651,6 +653,7 @@ def run_model(input: T.handle, output: T.handle) -> None: T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast_1", sid_4, sid_32, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2", sid_32, T.lookup_param("p17", dtype="handle"), T.lookup_param("p18", dtype="handle"), sid_31, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_concatenate", sid_2, sid_19, sid_25, sid_31, output, dtype="int32")) + __tvm_meta__ = None # fmt: on diff --git a/tests/python/tir-usmp/test_tir_usmp_transform_convert_pool_allocations_to_offsets.py b/tests/python/tir-usmp/test_tir_usmp_transform_convert_pool_allocations_to_offsets.py index 9e9fea7c8152..03929c5436be 100644 --- a/tests/python/tir-usmp/test_tir_usmp_transform_convert_pool_allocations_to_offsets.py +++ b/tests/python/tir-usmp/test_tir_usmp_transform_convert_pool_allocations_to_offsets.py @@ -509,6 +509,7 @@ def __tvm_main__(input: T.handle, global_workspace_0_var: T.handle("uint8"), out T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8_let, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7_let, global_workspace_0_buffer_var.data, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7_let, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6_let, global_workspace_0_buffer_var.data, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2_let, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6_let, output, global_workspace_0_buffer_var.data, dtype="int32")) + __tvm_meta__ = None # fmt: on diff --git a/tests/python/tir-usmp/test_tir_usmp_utils.py b/tests/python/tir-usmp/test_tir_usmp_utils.py index 635c9a760f87..0fece9dcd263 100644 --- a/tests/python/tir-usmp/test_tir_usmp_utils.py +++ b/tests/python/tir-usmp/test_tir_usmp_utils.py @@ -91,6 +91,7 @@ def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None: T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32")) T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32")) + __tvm_meta__ = None # fmt: on diff --git a/tests/python/tvmscript/test_tvmscript_parser_tir.py b/tests/python/tvmscript/test_tvmscript_parser_tir.py index 074603681f34..b2b534064605 100644 --- a/tests/python/tvmscript/test_tvmscript_parser_tir.py +++ b/tests/python/tvmscript/test_tvmscript_parser_tir.py @@ -272,7 +272,7 @@ def test_tir_starred_for_loop(): @T.prim_func(private=True) def starred(a: T.handle, b: T.handle): A = T.match_buffer(a, [*dims, 128], "int32") - B = T.match_buffer(b, dims, "int32") + B = T.match_buffer(a, dims, "int32") for *spatial, reduction in T.grid(*A.shape): with T.block("reduce"): with T.init(): @@ -282,7 +282,7 @@ def starred(a: T.handle, b: T.handle): @T.prim_func(private=True) def non_starred(a: T.handle, b: T.handle): A = T.match_buffer(a, [128, 128, 128], "int32") - B = T.match_buffer(b, [128, 128], "int32") + B = T.match_buffer(a, [128, 128], "int32") for i, j, k in T.grid(128, 128, 128): with T.block("reduce"): with T.init(): diff --git a/tests/python/tvmscript/test_tvmscript_roundtrip.py b/tests/python/tvmscript/test_tvmscript_roundtrip.py index 73bf200bb22a..85526f871bf1 100644 --- a/tests/python/tvmscript/test_tvmscript_roundtrip.py +++ b/tests/python/tvmscript/test_tvmscript_roundtrip.py @@ -27,9 +27,8 @@ def opt_gemm_normalize(): - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class Module: - # packedB is treated as undefined @T.prim_func def mmult(A: T.handle, B: T.handle, C: T.handle) -> None: # function attr dict @@ -181,9 +180,8 @@ def main(inputs: T.Buffer((64, 2, 4), "float32")) -> None: def opt_gemm_mod_host(): - @tvm.script.ir_module(check_well_formed=False) + @tvm.script.ir_module class Module: - # packedB is treated as undefined @T.prim_func def mmult( args: T.handle, @@ -480,7 +478,7 @@ def mmult( def opt_conv_tensorcore_normalize(): - @T.prim_func(check_well_formed=False) + @T.prim_func def func(A: T.handle, W: T.handle, Conv: T.handle) -> None: # function attr dict T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) @@ -1600,7 +1598,7 @@ def func( ( ( ( - 1 <= (T.floordiv(bz, 14) + kh) + (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) @@ -2911,8 +2909,7 @@ def constant_folding(a: T.handle) -> None: def simplify_bracket(): - # uninitialized variables - @T.prim_func(check_well_formed=False) + @T.prim_func def simplify_bracket() -> None: a = T.int32() b = T.int32() @@ -3027,8 +3024,7 @@ def comm_reducer_multiple_reduce_groups(a: T.handle, b: T.handle) -> None: def multiple_commreducer(): - # normal_reduce_temp0 is treated as uninitialized value - @T.prim_func(check_well_formed=False) + @T.prim_func def multiple_commreducer() -> None: normal_reduce_temp0 = T.Buffer([1], dtype="float32", strides=[1], scope="local") normal_reduce_temp1 = T.Buffer([1], dtype="float32", strides=[1], scope="local") @@ -3048,8 +3044,7 @@ def multiple_commreducer() -> None: def func_div_mod(): - # not well-formed: free variables - @T.prim_func(check_well_formed=False) + @T.prim_func def func_div_mod(): a = T.int32() b = T.int32() @@ -3062,7 +3057,7 @@ def func_div_mod(): def test_div_mod(): func = func_div_mod() - rt_func = tvm.script.from_source(func.script(), check_well_formed=False) + rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func, True) assert isinstance(func.body[0].value, tvm.tir.FloorDiv) @@ -3225,8 +3220,7 @@ def ctpop(A: T.Buffer((16,), "uint8"), B: T.Buffer((16,), "uint8")) -> None: def parse_bufferslice_as_range_bound(): - # apparently the use of i in the "outer" block when it is defined outside of a block is wrong - @T.prim_func(check_well_formed=False) + @T.prim_func def segment_sum( A_ptr: T.handle, B_ptr: T.handle, indptr_ptr: T.handle, n: T.int32, m: T.int32 ) -> None: @@ -3491,8 +3485,7 @@ def func() -> None: def bool_cast(): - # uninitialized var - @T.prim_func(check_well_formed=False) + @T.prim_func def func() -> None: a = T.bool() T.evaluate(T.bool(T.int32(0))) @@ -3615,8 +3608,7 @@ def func(): def let_stmt_value(): - # uninitialized var - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): y = T.int32() with T.LetStmt(y) as x: @@ -3662,8 +3654,7 @@ def main(a: T.handle, b: T.handle): def merge_shape_var_def(): - # uninitialized vars - @T.prim_func(check_well_formed=False) + @T.prim_func def main(A: T.handle, B: T.handle): T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True}) m, n = T.int32(), T.int32() @@ -3881,8 +3872,8 @@ def undefined_data_ptr_in_decl_buffer(): Allocate/DeclBuffer pair, performing a round-trip through TVMScript should not introduce an Allocate node. """ - # uninitialized var - @T.prim_func(check_well_formed=False) + + @T.prim_func def func(): data_ptr = T.handle("float32") buf = T.decl_buffer(shape=[1], dtype="float32", data=data_ptr) @@ -3892,8 +3883,7 @@ def func(): def undefined_shape_in_decl_buffer(): - # uninitialized var - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): size = T.int32() buf = T.decl_buffer(shape=[size], dtype="float32") @@ -3903,8 +3893,7 @@ def func(): def undefined_stride_in_decl_buffer(): - # uninitialized var - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): stride = T.int32() buf = T.decl_buffer(shape=[1], dtype="float32", strides=[stride]) @@ -3914,8 +3903,7 @@ def func(): def undefined_elem_offset_in_decl_buffer(): - # uninitialized var - @T.prim_func(check_well_formed=False) + @T.prim_func def func(): elem_offset = T.int32() buf = T.decl_buffer(shape=[1], dtype="float32", elem_offset=elem_offset) @@ -4174,9 +4162,7 @@ def func(A: R.Object): def test_roundtrip(ir_generator): original = ir_generator() - after_roundtrip = tvm.script.from_source( - original.script(show_meta=True), check_well_formed=False - ) + after_roundtrip = tvm.script.from_source(original.script(show_meta=True)) tvm.ir.assert_structural_equal(original, after_roundtrip, True)