diff --git a/include/tvm/ir/memory_pools.h b/include/tvm/ir/memory_pools.h index 3422c1fe719b..ee07841de412 100644 --- a/include/tvm/ir/memory_pools.h +++ b/include/tvm/ir/memory_pools.h @@ -220,6 +220,14 @@ class PoolInfoProperties : public ObjectRef { /* \brief Represents RW memory area */ struct WorkspacePoolInfoNode : public PoolInfoNode { + void VisitAttrs(tvm::AttrVisitor* v) { PoolInfoNode::VisitAttrs(v); } + + bool SEqualReduce(const WorkspacePoolInfoNode* other, SEqualReducer equal) const { + return PoolInfoNode::SEqualReduce(other, equal); + } + + void SHashReduce(SHashReducer hash_reduce) const { PoolInfoNode::SHashReduce(hash_reduce); } + static constexpr const char* _type_key = "ir.WorkspacePoolInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(WorkspacePoolInfoNode, PoolInfoNode); }; @@ -275,6 +283,22 @@ class ConstantInfo : public ObjectRef { * data from constant_info_array */ struct ConstantPoolInfoNode : public PoolInfoNode { Array constant_info_array; + + void VisitAttrs(tvm::AttrVisitor* v) { + PoolInfoNode::VisitAttrs(v); + v->Visit("constant_info_array", &constant_info_array); + } + + bool SEqualReduce(const ConstantPoolInfoNode* other, SEqualReducer equal) const { + return PoolInfoNode::SEqualReduce(other, equal) && + equal(constant_info_array, other->constant_info_array); + } + + void SHashReduce(SHashReducer hash_reduce) const { + PoolInfoNode::SHashReduce(hash_reduce); + hash_reduce(constant_info_array); + } + static constexpr const char* _type_key = "ir.ConstantPoolInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantPoolInfoNode, PoolInfoNode); }; diff --git a/python/tvm/testing/aot.py b/python/tvm/testing/aot.py index 583286bf273a..a87e61666d35 100644 --- a/python/tvm/testing/aot.py +++ b/python/tvm/testing/aot.py @@ -286,8 +286,11 @@ def _emit_main_workspace_pool_structs(main_file, workspace_pool_names, mod_name) f"struct {_mangle_name(mod_name, 'workspace_pools')} " f"{_mangle_name(mod_name, 'workspace_pools')} = {{" ) - for workspace_pool_name in workspace_pool_names: - main_file.write(f"\t.{workspace_pool_name} = {workspace_pool_name},\n") + for workspace_pool_name in workspace_pool_names.keys(): + main_file.write( + f"\t.{workspace_pool_name} = {workspace_pool_names[workspace_pool_name]}" + f"{workspace_pool_name},\n" + ) main_file.write("};\n") @@ -507,19 +510,27 @@ def _create_main( compiled_model.executor_factory.executor_codegen_metadata ) devices = compiled_model.executor_factory.get_devices() - workspace_pool_names = None + workspace_pool_names = {} if executor_codegen_metadata.pool_inputs: - workspace_pool_names = [ - allocated_pool.pool_info.pool_name + workspace_pool_names = { + allocated_pool.pool_info.pool_name: "&" + if isinstance( + allocated_pool.pool_info, tvm.ir.memory_pools.ConstantPoolInfo + ) + else "" for allocated_pool in dict(executor_codegen_metadata.pool_inputs).values() if not allocated_pool.pool_info.is_internal - ] + } _emit_main_device_structs(main_file, devices, model.name) if not use_workspace_io: _emit_main_workspace_pool_structs(main_file, workspace_pool_names, model.name) _emit_main_data_structs(main_file, model.inputs, model.outputs, model.name) _emit_main_c_interface_call( - main_file, devices, workspace_pool_names, model.name, use_workspace_io + main_file, + devices, + list(workspace_pool_names.keys()), + model.name, + use_workspace_io, ) else: _emit_main_fake_packed_values(main_file) diff --git a/src/runtime/crt/microtvm_rpc_server/rpc_server.cc b/src/runtime/crt/microtvm_rpc_server/rpc_server.cc index 1e5f625998ab..cd2fb03ed7f9 100644 --- a/src/runtime/crt/microtvm_rpc_server/rpc_server.cc +++ b/src/runtime/crt/microtvm_rpc_server/rpc_server.cc @@ -119,8 +119,6 @@ class MicroRPCServer { rpc_server_{&io_}, is_running_{true} {} - void* operator new(size_t count, void* ptr) { return ptr; } - void Initialize() { uint8_t initial_session_nonce = Session::kInvalidNonce; tvm_crt_error_t error = diff --git a/src/target/source/codegen_params.cc b/src/target/source/codegen_params.cc index b052727e5d2e..cd2bcd769c04 100644 --- a/src/target/source/codegen_params.cc +++ b/src/target/source/codegen_params.cc @@ -53,7 +53,8 @@ static int ComputeNumElementsPerRow(int one_element_size_bytes, int indent_chars } template ::value>> -void PrintIntegralArray(void* data, size_t num_elements, int indent_chars, std::ostream& os) { +void PrintIntegralArray(void* data, size_t num_elements, int indent_chars, std::ostream& os, + const std::string& eol) { int one_element_size_bytes = (sizeof(T) / 4) + (2 /* "0x" */) + (2 /* ", " */); if (std::is_signed::value) { one_element_size_bytes += 1; // sign character @@ -97,17 +98,18 @@ void PrintIntegralArray(void* data, size_t num_elements, int indent_chars, std:: os << ", "; } if ((i % elements_per_row) == elements_per_row - 1) { - os << "\n"; + os << eol; } } if ((num_elements % elements_per_row) != 0) { - os << "\n"; + os << eol; } } template ::value>> -void PrintFloatingPointArray(void* data, size_t num_elements, int indent_chars, std::ostream& os) { +void PrintFloatingPointArray(void* data, size_t num_elements, int indent_chars, std::ostream& os, + const std::string& eol) { // Floats and doubles are printed as hex but casted. int one_element_size_bytes = (sizeof(T) / 4) + (2 /* "0x" */) + (2 /* ", " */) + 1 /* sign */ + 1 /* decimal point */ + 1 /* exponent sign */; @@ -149,16 +151,17 @@ void PrintFloatingPointArray(void* data, size_t num_elements, int indent_chars, os << ", "; } if ((i % elements_per_row) == elements_per_row - 1) { - os << "\n"; + os << eol; } } if ((num_elements % elements_per_row) != 0) { - os << "\n"; + os << eol; } } -void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& os) { +void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& os, + const std::string& eol) { auto arr_type = arr.DataType(); CHECK_EQ(arr_type.lanes(), 1) << "CodegenParams: only support generating 1-lane parameters; saw " << arr_type.lanes(); @@ -180,13 +183,13 @@ void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& << "CodegenParams: only support generating 8-, 16-, 32-, or 64-bit integer params; saw " << arr_type.bits() << "-bit array"; if (arr_type.bits() == 8) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 16) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 32) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 64) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else { CHECK(false) << "should not get here"; } @@ -199,13 +202,13 @@ void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& << arr_type.bits() << "-bit array"; if (arr_type.bits() == 8) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 16) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 32) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 64) { - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else { CHECK(false) << "should not get here"; } @@ -216,11 +219,11 @@ void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& os.setf(std::ios::left, std::ios::adjustfield); if (arr_type.bits() == 16) { // NOTE: print types not widely supported by C as uint16_t. - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 32) { - PrintFloatingPointArray(arr->data, num_elements, indent_chars, os); + PrintFloatingPointArray(arr->data, num_elements, indent_chars, os, eol); } else if (arr_type.bits() == 64) { - PrintFloatingPointArray(arr->data, num_elements, indent_chars, os); + PrintFloatingPointArray(arr->data, num_elements, indent_chars, os, eol); } else { CHECK(false) << "CodegenParams: only support 32- or 64-bit floating point; saw " << arr_type.bits() << "-bit array"; @@ -233,7 +236,7 @@ void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& CHECK(arr_type.bits() == 16) << "CodegenParams: only support generating 16-bit bfloat params; saw " << arr_type.bits() << "-bit array"; - PrintIntegralArray(arr->data, num_elements, indent_chars, os); + PrintIntegralArray(arr->data, num_elements, indent_chars, os, eol); break; } diff --git a/src/target/source/codegen_params.h b/src/target/source/codegen_params.h index cc126c767c58..6df800ed1721 100644 --- a/src/target/source/codegen_params.h +++ b/src/target/source/codegen_params.h @@ -27,6 +27,7 @@ #include #include +#include namespace tvm { namespace codegen { @@ -44,7 +45,8 @@ namespace codegen { * \param indent_chars Number of chars to indent * \param os Output stream where the array data should be written. */ -void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& os); +void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& os, + const std::string& eol = "\n"); } // namespace codegen } // namespace tvm diff --git a/src/target/source/interface_c.cc b/src/target/source/interface_c.cc index 19b37fe21c3a..fef81c9bd69f 100644 --- a/src/target/source/interface_c.cc +++ b/src/target/source/interface_c.cc @@ -29,9 +29,11 @@ #include #include +#include #include #include "../../relay/backend/name_transforms.h" +#include "codegen_params.h" namespace tvm { namespace codegen { @@ -90,8 +92,13 @@ class InterfaceCNode : public runtime::ModuleNode { for (const tir::usmp::AllocatedPoolInfo pool : pools_) { String pool_name = pool->pool_info->pool_name; Integer pool_size = pool->allocated_size; - EmitIntegerValueMacro(code, SanitizeName(pool_name) + " size", - SanitizeName(pool_name) + "_WORKSPACE_POOL_SIZE", pool_size->value); + if (const auto* pool_info = pool->pool_info.as()) { + EmitConstantPool(code, SanitizeName(pool_name) + " initialization data", pool_info); + } else { + EmitIntegerValueMacro(code, SanitizeName(pool_name) + " size", + SanitizeName(pool_name) + _macro_workspace_pool_size_postfix, + pool_size->value); + } } EmitLowerHeaderGuard(code); @@ -103,6 +110,10 @@ class InterfaceCNode : public runtime::ModuleNode { } private: + constexpr static const char* _macro_workspace_pool_size_postfix = "_WORKSPACE_POOL_SIZE"; + constexpr static const char* _macro_constant_pool_size_postfix = "_CONSTANT_POOL_SIZE"; + constexpr static const char* _macro_constant_pool_data_postfix = "_CONSTANT_POOL_DATA"; + void EmitUpperHeaderGuard(std::stringstream& code_stream) { std::string header_guard_name = ToCConstantStyle(PrefixGeneratedName({module_name_, "H"})); code_stream << "#ifndef " << header_guard_name << "_\n" @@ -152,6 +163,43 @@ class InterfaceCNode : public runtime::ModuleNode { code_stream << "#define " << macro_name_prefixed << " " << macro_value << "\n"; } + void EmitConstantPool(std::stringstream& code_, const std::string& brief_description, + const ConstantPoolInfoNode* pool_info) { + EmitBrief(code_, brief_description); + std::string name_prefixed = + ToCConstantStyle(PrefixGeneratedName({module_name_, SanitizeName(pool_info->pool_name)})); + + if (pool_info->constant_info_array.size() > 0) { + std::vector const_info_vec(pool_info->constant_info_array.begin(), + pool_info->constant_info_array.end()); + std::sort(const_info_vec.begin(), const_info_vec.end(), + [](const ConstantInfo& a, const ConstantInfo& b) { + return a->byte_offset->value < b->byte_offset->value; + }); + int64_t accumulated_pool_len = + const_info_vec.back()->byte_offset + + runtime::GetDataSize(*const_info_vec.back()->data.operator->()); + const auto& accumulated_pool = runtime::NDArray::Empty( + {accumulated_pool_len}, DataType::UInt(8), const_info_vec.back()->data->device); + for (const auto& const_info : const_info_vec) { + const auto& data = const_info->data; + const auto& offs = const_info->byte_offset; + data.CopyToBytes(static_cast(accumulated_pool->data) + offs, + runtime::GetDataSize(*data.operator->())); + } + + code_ << "#define " << name_prefixed << _macro_constant_pool_size_postfix << " " + << accumulated_pool_len << "\n"; + code_ << "#define " << name_prefixed << _macro_constant_pool_data_postfix << " \\\n"; + codegen::NDArrayDataToC(accumulated_pool, 4, code_, "\\\n"); + code_ << '\n'; + + } else { + LOG(FATAL) << "No constant data in constant pool found " + << PrettyPrint(GetRef(pool_info)); + } + } + void EmitRunFunction(std::stringstream& code_stream) { std::string run_function = ToCVariableStyle(PrefixGeneratedName({module_name_, "run"})); std::string inputs_struct = ToCVariableStyle(PrefixGeneratedName({module_name_, "inputs"})); diff --git a/src/target/source/source_module.cc b/src/target/source/source_module.cc index 41269cab64de..6495c39ef140 100644 --- a/src/target/source/source_module.cc +++ b/src/target/source/source_module.cc @@ -292,7 +292,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode { } void GenerateConstantBuffer(const ConstantPoolInfoNode* pool_info, size_t allocated_size) { - size_t offset = 0; + size_t ord = 0; if (pool_info->constant_info_array.size() > 0) { // Pool is RO, form an initialized struct code_ << "__attribute__((section(\".rodata.tvm\"), "; @@ -312,8 +312,8 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode { std::multiplies()); code_ << " "; codegen_c_base_.PrintType(data.DataType(), code_); - code_ << " " << const_info->name_hint << "[" << num_elements - << "] __attribute__((packed, aligned(" << metadata_->constant_alignment << ")));"; + code_ << " " << const_info->name_hint << "[" << num_elements << "] __attribute__((" + << (ord++ ? "packed, " : "") << "aligned(" << metadata_->constant_alignment << ")));"; code_ << " // " << num_elements * data.DataType().bytes() << " bytes, aligned offset: " << offs << "\n"; } @@ -326,7 +326,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode { code_ << " },\n"; } code_ << "};"; - code_ << "// of total size " << allocated_size << " bytes, aligned: " << offset << " bytes\n"; + code_ << "// of total size " << allocated_size << " bytes\n"; } else { LOG(FATAL) << "No constant data in constant pool found " << PrettyPrint(GetRef(pool_info)); diff --git a/tests/cpp/target/source/interface_c_test.cc b/tests/cpp/target/source/interface_c_test.cc index 4fb9df3d0557..d575bfeaf0c7 100644 --- a/tests/cpp/target/source/interface_c_test.cc +++ b/tests/cpp/target/source/interface_c_test.cc @@ -24,6 +24,7 @@ #include #include +using ::testing::ContainsRegex; using ::testing::HasSubstr; namespace tvm { @@ -126,6 +127,48 @@ TEST(InterfaceAPI, ContainsRunFunctionWithWorkspacePools) { ASSERT_THAT(header_source, HasSubstr(run_function.str())); } +TEST(InterfaceAPI, ContainsRunFunctionWithWorkspaceAndConstantPools) { + std::stringstream run_function; + + run_function << "/*!\n" + << " * \\brief entrypoint function for TVM module \"ultimate_cat_spotter\"\n" + << " * \\param inputs Input tensors for the module \n" + << " * \\param outputs Output tensors for the module \n" + << " * \\param workspace_pools Workspace memory pool pointers for the module \n" + << " */\n" + << "int32_t tvmgen_ultimate_cat_spotter_run(\n" + << " struct tvmgen_ultimate_cat_spotter_inputs* inputs,\n" + << " struct tvmgen_ultimate_cat_spotter_outputs* outputs,\n" + << " struct tvmgen_ultimate_cat_spotter_workspace_pools* workspace_pools\n" + << ");\n"; + + PoolInfo pool_info = WorkspacePoolInfo("my_memory_pool", {}); + PoolInfo const_info = ConstantPoolInfo( + "my_constant_pool", {}, + {{"const1", 0, runtime::NDArray::Empty({1}, DataType::Int(32), {kDLCPU, 0})}, + {"const2", 16, runtime::NDArray::Empty({1}, DataType::Float(64), {kDLCPU, 0})}}); + tir::usmp::AllocatedPoolInfo allocated_pool_info = + tir::usmp::AllocatedPoolInfo(pool_info, 100000); + tir::usmp::AllocatedPoolInfo allocated_const_info = + tir::usmp::AllocatedPoolInfo(const_info, 100000); + runtime::Module test_module = + InterfaceCCreate("ultimate_cat_spotter", {"input"}, {"output"}, + {allocated_pool_info, allocated_const_info}, {}, {}, 0); + std::string header_source = test_module->GetSource(); + ASSERT_THAT(header_source, HasSubstr(run_function.str())); + ASSERT_THAT( + header_source, + HasSubstr("#define TVMGEN_ULTIMATE_CAT_SPOTTER_MY_CONSTANT_POOL_CONSTANT_POOL_SIZE 24")); + ASSERT_THAT( + header_source, + ContainsRegex( + "#define TVMGEN_ULTIMATE_CAT_SPOTTER_MY_CONSTANT_POOL_CONSTANT_POOL_DATA \\\\\\\n " + "0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, " + "0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, " + "0x\\w\\w, 0x\\w\\w, 0x\\w\\w, \\\\\\\n 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w, " + "0x\\w\\w, 0x\\w\\w, 0x\\w\\w, 0x\\w\\w\\\\\\\n")); +} + TEST(InterfaceAPI, ContainsRunFunctionWithWorkspacePoolsAndDevices) { std::stringstream run_function; diff --git a/tests/python/relay/aot/test_crt_aot_usmp.py b/tests/python/relay/aot/test_crt_aot_usmp.py index a2f9ee5eb0f7..0d3426dceeaf 100644 --- a/tests/python/relay/aot/test_crt_aot_usmp.py +++ b/tests/python/relay/aot/test_crt_aot_usmp.py @@ -18,16 +18,22 @@ from collections import OrderedDict import re - import numpy as np import pytest import tvm from tvm import relay +from tvm.relay import testing # pylint: disable=W0611 from tvm.relay import transform from tvm.relay.op.annotation import compiler_begin, compiler_end from tvm.relay.backend import Executor, Runtime -from tvm import WorkspaceMemoryPools, WorkspacePoolInfo, PoolInfoProperties +from tvm import ( + WorkspaceMemoryPools, + ConstantMemoryPools, + WorkspacePoolInfo, + ConstantPoolInfo, + PoolInfoProperties, +) from tvm.micro import model_library_format as mlf from tvm.micro.testing.aot_test_utils import parametrize_aot_options from tvm.testing.aot import ( @@ -335,13 +341,31 @@ def test_tflite_model_u1_usecase(model_url, usmp_algo, workspace_size, constant_ def _get_workspace_size_define_macro(pool_name: str, model_name="default") -> str: """This function converts pool names to compiler generated - workspace pool size macros""" + pool size macros""" prefix = "TVMGEN_" + model_name.upper() + "_" postfix = "_WORKSPACE_POOL_SIZE" return prefix + pool_name.upper() + postfix +def _get_constant_size_define_macro(pool_name: str, model_name="default") -> str: + """This function converts pool names to compiler generated + pool size macros""" + + prefix = "TVMGEN_" + model_name.upper() + "_" + postfix = "_CONSTANT_POOL_SIZE" + return prefix + pool_name.upper() + postfix + + +def _get_constant_data_define_macro(pool_name: str, model_name="default") -> str: + """This function converts pool names to compiler generated + pool data macros""" + + prefix = "TVMGEN_" + model_name.upper() + "_" + postfix = "_CONSTANT_POOL_DATA" + return prefix + pool_name.upper() + postfix + + def _add_module_prefix(suffix: str, model_name="default") -> str: """A helper function create struct types""" return "tvmgen_" + model_name + "_" + suffix @@ -399,6 +423,169 @@ def test_tflite_model_u3_usecase_single_external_pool(model_url, usmp_algo): ) +@pytest.mark.parametrize( + "usmp_algo", + [("greedy_by_size"), ("hill_climb")], +) +def test_tflite_model_u3_usecase_conv2d_var_cons(usmp_algo): + """This checks for inference using workspace and constant pools placed in the application""" + + mod = tvm.parser.fromtext( + """\ + #[version = "0.0.5"] + def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) { + %1 = nn.conv2d( + %data, + %weight, + padding=[2, 2], + channels=3, + kernel_size=[5, 5], + data_layout="NCHW", + kernel_layout="OIHW", + out_dtype="int32"); + %2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8"); + %3 = nn.conv2d( + %2, + %weight, + padding=[2, 2], + channels=3, + kernel_size=[5, 5], + data_layout="NCHW", + kernel_layout="OIHW", + out_dtype="int32"); + %4 = nn.max_pool2d(%3, pool_size=[3, 3]); + %4 + } + """ + ) + + main_func = mod["main"] + shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params} + type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params} + + weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(type_dict["weight"]) + input_data = np.ones(shape_dict["data"]).astype(type_dict["data"]) + params = {"weight": weight_data} + inputs = {"data": input_data} + + use_unpacked_api = True + interface_api = "c" + + target = tvm.target.Target("c") + workspace_mem_pools = WorkspaceMemoryPools( + [ + WorkspacePoolInfo( + "my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000) + ), + ] + ) + + constant_mem_pools = ConstantMemoryPools( + [ + ConstantPoolInfo("my_const_pool_1", [target], []), + ] + ) + + test_runner = AOTTestRunner( + pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo}, + prologue=f""" + __attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES))) + static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}]; + __attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES))) + static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }}; + """, + ) + + output_list = generate_ref_data(mod, inputs, params) + + compiled_test_mods = compile_models( + AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params), + interface_api=interface_api, + use_unpacked_api=use_unpacked_api, + pass_config=test_runner.pass_config, + workspace_memory_pools=workspace_mem_pools, + constant_memory_pools=constant_mem_pools, + target=target, + ) + + for compiled_model in compiled_test_mods: + _check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib) + + run_and_check( + models=compiled_test_mods, + runner=test_runner, + interface_api=interface_api, + ) + + +@pytest.mark.parametrize( + "model_url, usmp_algo", + [ + (MOBILENET_V1_URL, "greedy_by_size"), + ], +) +def test_tflite_model_u3_usecase_var_cons_ext_pools(model_url, usmp_algo): + """This checks for inference using one external workspace and one external constant + pools placed in the application""" + pytest.importorskip("tflite") + + import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel + + use_unpacked_api = True + interface_api = "c" + + target = tvm.target.Target("c") + workspace_mem_pools = WorkspaceMemoryPools( + [ + WorkspacePoolInfo( + "my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000) + ), + ] + ) + + constant_mem_pools = ConstantMemoryPools( + [ + ConstantPoolInfo("my_const_pool_1", [target], []), + ] + ) + + test_runner = AOTTestRunner( + pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo}, + prologue=f""" + __attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES))) + static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}]; + __attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES))) + static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }}; + """, + ) + + tflite_model_file = tf_testing.get_workload_official( + model_url[0], + model_url[1], + ) + mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file) + output_list = generate_ref_data(mod, inputs, params) + + compiled_test_mods = compile_models( + AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params), + interface_api=interface_api, + use_unpacked_api=use_unpacked_api, + pass_config=test_runner.pass_config, + workspace_memory_pools=workspace_mem_pools, + constant_memory_pools=constant_mem_pools, + target=target, + ) + + for compiled_model in compiled_test_mods: + _check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib) + + run_and_check( + models=compiled_test_mods, + runner=test_runner, + interface_api=interface_api, + ) + + @pytest.mark.parametrize( "model_url, usmp_algo", [ diff --git a/tests/python/unittest/test_crt.py b/tests/python/unittest/test_crt.py index 84bb17bf7d44..b11f7a5fac5e 100644 --- a/tests/python/unittest/test_crt.py +++ b/tests/python/unittest/test_crt.py @@ -15,16 +15,14 @@ # specific language governing permissions and limitations # under the License. +import numpy as np import os import pathlib -import pytest import shutil -import json +import pytest pytest.importorskip("pty") -import sys -import numpy as np import pytest import tvm @@ -34,9 +32,6 @@ from tvm.relay.backend import Runtime from tvm.relay.backend import Executor -from tvm.topi.utils import get_const_tuple -from tvm.topi.testing import conv2d_nchw_python - BUILD = True DEBUG = False @@ -234,6 +229,84 @@ def do_test(): do_test() +enable_usmp, expect_exception = tvm.testing.parameters((True, True), (False, False)) + + +@tvm.testing.requires_micro +def test_aot_executor_usmp_const_pool(enable_usmp, expect_exception): + """Test the AOT executor with microTVM using usmp. + Test should fail if const pool is supplied to executor + as these are currently not supported + """ + ws_root = pathlib.Path(os.path.dirname(__file__) + "/micro-workspace-usmp") + if ws_root.exists(): + shutil.rmtree(ws_root) + temp_dir = tvm.contrib.utils.tempdir(ws_root.resolve()) + relay_mod = tvm.parser.fromtext( + """ + #[version = "0.0.5"] + def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8], %c : Tensor[(1,2), uint8]) { + %0 = %a + %b; + %1 = %0 + %c; + %1 + }""" + ) + + runtime = Runtime("crt", {"system-lib": True}) + executor = Executor("aot") + main_func = relay_mod["main"] + type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params} + B_np = np.array([[4, 7]], dtype="uint8").astype(type_dict["b"]) + C_np = np.array([[8, 9]], dtype="uint8").astype(type_dict["c"]) + params = {"c": C_np} + with tvm.transform.PassContext( + opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": enable_usmp} + ): + factory = tvm.relay.build( + relay_mod, + target=TARGET, + runtime=runtime, + executor=executor, + params=params, + ) + + def do_test(): + try: + aot_executor = tvm.runtime.executor.aot_executor.AotModule( + sess._rpc.get_function("tvm.aot_executor.create")( + sess.get_system_lib(), sess.device, "default" + ) + ) + except tvm._ffi.base.TVMError as e: + if expect_exception: + return + else: + raise e + + assert aot_executor.get_input_index("a") == 0 + assert aot_executor.get_input_index("b") == 1 + + assert aot_executor.get_num_inputs() == 2 + assert aot_executor.get_num_outputs() == 1 + + A_np = np.array([[2, 3]], dtype="uint8") + B_np = np.array([[4, 7]], dtype="uint8") + + A_data = aot_executor.get_input("a").copyfrom(A_np) + B_data = aot_executor.get_input("b").copyfrom(B_np) + aot_executor.run() + + out = aot_executor.get_output(0) + assert (out.numpy() == np.array([14, 19])).all() + + B_np_new = np.array([[5, 8]]) + aot_executor.set_input("b", B_np_new) + assert (B_data.numpy() == B_np_new).all() + + with _make_session(temp_dir, factory) as sess: + do_test() + + @tvm.testing.requires_micro def test_std_math_functions(): """Verify that standard math functions can be used."""