From e88b33bb9ef9d11564618c272353951fb08429cd Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Fri, 8 Jul 2022 12:56:25 -0700 Subject: [PATCH 1/4] Add aot tutorial init --- docs/conf.py | 1 + .../how_to/work_with_microtvm/micro_aot.py | 162 ++++++++++++++++++ tests/scripts/task_python_microtvm.sh | 2 + 3 files changed, 165 insertions(+) create mode 100644 gallery/how_to/work_with_microtvm/micro_aot.py diff --git a/docs/conf.py b/docs/conf.py index eb292e56400e..82b0d2962338 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -318,6 +318,7 @@ def git_describe_version(original_version): "micro_tflite.py", "micro_ethosu.py", "micro_tvmc.py", + "micro_aot.py", ], } diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py b/gallery/how_to/work_with_microtvm/micro_aot.py new file mode 100644 index 000000000000..a1f61ffa06fb --- /dev/null +++ b/gallery/how_to/work_with_microtvm/micro_aot.py @@ -0,0 +1,162 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +.. _tutorial-micro-AoT: + +microTVM Host-Driven AoT +=========================== +**Authors**: +`Mehrdad Hessar `_, +`Alan MacDonald `_ + +This tutorial is showcasing microTVM host-driven AoT compilation with +a TFLite model. This tutorial can be executed on a X86 CPU using C runtime (CRT) +or on Zephyr plarform on a microcontroller that supports Zephyr platform. +""" + +import numpy as np +import pathlib +import json +import os + +import tvm +from tvm import relay +from tvm.relay.backend import Executor, Runtime +from tvm.contrib.download import download_testdata + +###################################################################### +# Import a TFLite model +# --------------------- +# +# To begin with, download and import a TFLite model from TinyMLPerf models. +# +# **Note:** By default this tutorial runs on X86 CPU using CRT, if you would like to run on Zephyr platform +# you need to export `TVM_MICRO_USE_HW` environment variable. +# +use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) +MODEL_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite" +MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", module="model") +SAMPLE_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy" +SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", module="data") + +tflite_model_buf = open(MODEL_PATH, "rb").read() +try: + import tflite + + tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) +except AttributeError: + import tflite.Model + + tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) + +input_shape = (1, 49, 10, 1) +INPUT_NAME = "input_1" +relay_mod, params = relay.frontend.from_tflite( + tflite_model, shape_dict={INPUT_NAME: input_shape}, dtype_dict={INPUT_NAME: "int8"} +) + +###################################################################### +# Defining the target +# ------------------- +# +# Now we need to define the target, runtime and executor. In this tutorial, we focused on +# using AOT host driven executor. We use the host micro target which is for running a model +# on X86 CPU using CRT runtime or running a model with Zephyr platform on qemu_x86 simulator +# board. In the case of a physical microcontoller, we get the target model for the physical +# board (E.g. nucleo_f746zg) and pass it to `tvm.target.target.micro` to create a full +# micro target. +# +RUNTIME = Runtime("crt", {"system-lib": True}) +TARGET = tvm.target.target.micro("host") +EXECUTOR = Executor("aot") + +if use_physical_hw: + boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json" + with open(boards_file) as f: + boards = json.load(f) + BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg") + TARGET = tvm.target.target.micro(boards[BOARD]["model"]) + +###################################################################### +# Compile the model +# ----------------- +# +# Now, we compile the model for the target: +# +with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): + module = tvm.relay.build( + relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR + ) + +###################################################################### +# Create a microTVM project +# ----------------------- +# +# Now that we have the comipled model as an IRModule, we need to create a project +# with the compiled model in microTVM. To do this, we use Project API. We have defined +# CRT and Zephyr microTVM template projects which are used for X86 CPU and Zephyr platforms +# respectively. +# +template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) +project_options = {} # You can use options to provide platform-specific options through TVM. + +if use_physical_hw: + template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) + project_options = {"project_type": "host_driven", "zephyr_board": BOARD} + +temp_dir = tvm.contrib.utils.tempdir() +generated_project_dir = temp_dir / "project" +project = tvm.micro.generate_project( + template_project_path, module, generated_project_dir, project_options +) + +###################################################################### +# Build, flash and execute the model +# ----------------------- +# Next, we build the microTVM project and flash it. Flash step is specific to +# physical microcontrollers and it is skipped if it is using CRT runtime or running +# on Zephyr simulator. Next, we define the labels for the model output and execute +# the model with a sample with expected value of 6 (label: left). +# +project.build() +project.flash() + +labels = [ + "_silence_", + "_unknown_", + "yes", + "no", + "up", + "down", + "left", + "right", + "on", + "off", + "stop", + "go", +] +with tvm.micro.Session(project.transport()) as session: + aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) + sample = np.load(SAMPLE_PATH) + aot_executor.get_input(INPUT_NAME).copyfrom(sample) + aot_executor.run() + result = aot_executor.get_output(0).numpy() + print(f"Label is `{labels[np.argmax(result)]}` with index `{np.argmax(result)}`") +# +# Output: +# Label is `left` with index `6` +# diff --git a/tests/scripts/task_python_microtvm.sh b/tests/scripts/task_python_microtvm.sh index e057883776bb..13a910eb2b20 100755 --- a/tests/scripts/task_python_microtvm.sh +++ b/tests/scripts/task_python_microtvm.sh @@ -44,6 +44,7 @@ run_pytest ctypes python-microtvm-common-due tests/micro/common --platform=ardu # Tutorials python3 gallery/how_to/work_with_microtvm/micro_tflite.py python3 gallery/how_to/work_with_microtvm/micro_autotune.py +python3 gallery/how_to/work_with_microtvm/micro_aot.py ./gallery/how_to/work_with_microtvm/micro_tvmc.sh # Tutorials running with Zephyr @@ -51,5 +52,6 @@ export TVM_MICRO_USE_HW=1 export TVM_MICRO_BOARD=qemu_x86 python3 gallery/how_to/work_with_microtvm/micro_tflite.py python3 gallery/how_to/work_with_microtvm/micro_autotune.py +python3 gallery/how_to/work_with_microtvm/micro_aot.py run_pytest ctypes python-relay-strategy-arm_cpu tests/python/relay/strategy/arm_cpu --enable-corstone300-tests From 12b8db7af7bd95ffc70c28dc4fd9dd53907820dc Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Mon, 25 Jul 2022 14:24:13 -0700 Subject: [PATCH 2/4] fix lint --- gallery/how_to/work_with_microtvm/micro_aot.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py b/gallery/how_to/work_with_microtvm/micro_aot.py index a1f61ffa06fb..44a627c040b7 100644 --- a/gallery/how_to/work_with_microtvm/micro_aot.py +++ b/gallery/how_to/work_with_microtvm/micro_aot.py @@ -28,6 +28,12 @@ or on Zephyr plarform on a microcontroller that supports Zephyr platform. """ +# sphinx_gallery_start_ignore +from tvm import testing + +testing.utils.install_request_hook(depth=3) +# sphinx_gallery_end_ignore + import numpy as np import pathlib import json From a276ffc4360923a0e6b6c6ba343df33612a8e381 Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Tue, 26 Jul 2022 10:57:38 -0700 Subject: [PATCH 3/4] address comments --- .../how_to/work_with_microtvm/micro_aot.py | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py b/gallery/how_to/work_with_microtvm/micro_aot.py index 44a627c040b7..88a5d0d9583d 100644 --- a/gallery/how_to/work_with_microtvm/micro_aot.py +++ b/gallery/how_to/work_with_microtvm/micro_aot.py @@ -24,8 +24,10 @@ `Alan MacDonald `_ This tutorial is showcasing microTVM host-driven AoT compilation with -a TFLite model. This tutorial can be executed on a X86 CPU using C runtime (CRT) -or on Zephyr plarform on a microcontroller that supports Zephyr platform. +a TFLite model. AoTExecutor reduces the overhead of parsing graph at runtime +compared to GraphExecutor. Also, we can have better memory management using Ahead +of time compilation. This tutorial can be executed on a x86 CPU using C runtime (CRT) +or on Zephyr platform on a microcontroller that supports Zephyr platform. """ # sphinx_gallery_start_ignore @@ -48,9 +50,11 @@ # Import a TFLite model # --------------------- # -# To begin with, download and import a TFLite model from TinyMLPerf models. +# To begin with, download and import a Keyword Spotting TFLite model. +# This model is originally from `MLPerf Tiny repository `_. +# To test this model, we use samples from `KWS dataset provided by Google `_. # -# **Note:** By default this tutorial runs on X86 CPU using CRT, if you would like to run on Zephyr platform +# **Note:** By default this tutorial runs on x86 CPU using CRT, if you would like to run on Zephyr platform # you need to export `TVM_MICRO_USE_HW` environment variable. # use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) @@ -81,20 +85,27 @@ # # Now we need to define the target, runtime and executor. In this tutorial, we focused on # using AOT host driven executor. We use the host micro target which is for running a model -# on X86 CPU using CRT runtime or running a model with Zephyr platform on qemu_x86 simulator -# board. In the case of a physical microcontoller, we get the target model for the physical -# board (E.g. nucleo_f746zg) and pass it to `tvm.target.target.micro` to create a full +# on x86 CPU using CRT runtime or running a model with Zephyr platform on qemu_x86 simulator +# board. In the case of a physical microcontroller, we get the target model for the physical +# board (E.g. nucleo_l4r5zi) and pass it to `tvm.target.target.micro` to create a full # micro target. # + +# Use the C runtime (crt) and enable static linking by setting system-lib to True RUNTIME = Runtime("crt", {"system-lib": True}) + +# Simulate a microcontroller on the host machine. Uses the main() from `src/runtime/crt/host/main.cc `_. +# To use physical hardware, replace "host" with something matching your hardware. TARGET = tvm.target.target.micro("host") + +# Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style. EXECUTOR = Executor("aot") if use_physical_hw: boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json" with open(boards_file) as f: boards = json.load(f) - BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg") + BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") TARGET = tvm.target.target.micro(boards[BOARD]["model"]) ###################################################################### @@ -110,11 +121,11 @@ ###################################################################### # Create a microTVM project -# ----------------------- +# ------------------------- # -# Now that we have the comipled model as an IRModule, we need to create a project -# with the compiled model in microTVM. To do this, we use Project API. We have defined -# CRT and Zephyr microTVM template projects which are used for X86 CPU and Zephyr platforms +# Now that we have the compiled model as an IRModule, we need to create a firmware project +# to use the compiled model with microTVM. To do this, we use Project API. We have defined +# CRT and Zephyr microTVM template projects which are used for x86 CPU and Zephyr platforms # respectively. # template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) @@ -132,11 +143,12 @@ ###################################################################### # Build, flash and execute the model -# ----------------------- +# ---------------------------------- # Next, we build the microTVM project and flash it. Flash step is specific to -# physical microcontrollers and it is skipped if it is using CRT runtime or running -# on Zephyr simulator. Next, we define the labels for the model output and execute -# the model with a sample with expected value of 6 (label: left). +# physical microcontrollers and it is skipped if it is simulating a microcontroller +# via the host main.cc or if a Zephyr emulated board is selected as the target. +# Next, we define the labels for the model output and execute the model with a +# sample with expected value of 6 (label: left). # project.build() project.flash() From 83124d909c185e5191730ef5cfa2ec094aa437cc Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Tue, 26 Jul 2022 13:19:44 -0700 Subject: [PATCH 4/4] address comments --- gallery/how_to/work_with_microtvm/micro_aot.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py b/gallery/how_to/work_with_microtvm/micro_aot.py index 88a5d0d9583d..9a177559e116 100644 --- a/gallery/how_to/work_with_microtvm/micro_aot.py +++ b/gallery/how_to/work_with_microtvm/micro_aot.py @@ -25,9 +25,9 @@ This tutorial is showcasing microTVM host-driven AoT compilation with a TFLite model. AoTExecutor reduces the overhead of parsing graph at runtime -compared to GraphExecutor. Also, we can have better memory management using Ahead +compared to GraphExecutor. Also, we can have better memory management using ahead of time compilation. This tutorial can be executed on a x86 CPU using C runtime (CRT) -or on Zephyr platform on a microcontroller that supports Zephyr platform. +or on Zephyr platform on a microcontroller/board supported by Zephyr. """ # sphinx_gallery_start_ignore @@ -125,7 +125,7 @@ # # Now that we have the compiled model as an IRModule, we need to create a firmware project # to use the compiled model with microTVM. To do this, we use Project API. We have defined -# CRT and Zephyr microTVM template projects which are used for x86 CPU and Zephyr platforms +# CRT and Zephyr microTVM template projects which are used for x86 CPU and Zephyr boards # respectively. # template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))