Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .ci/scripts/gather_test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from typing import Any

from examples.models import MODEL_NAME_TO_MODEL
from examples.recipes.xnnpack_optimization import MODEL_NAME_TO_OPTIONS
from examples.recipes.xnnpack import MODEL_NAME_TO_OPTIONS

BUILD_TOOLS = [
"buck2",
Expand Down Expand Up @@ -51,7 +51,8 @@ def export_models_for_ci() -> None:
for name in MODEL_NAME_TO_MODEL.keys():
quantization_configs = {
False,
name in MODEL_NAME_TO_OPTIONS and MODEL_NAME_TO_OPTIONS[name].quantization,
name in MODEL_NAME_TO_OPTIONS
and MODEL_NAME_TO_OPTIONS[name].xnnpack_quantization,
}
delegation_configs = {
False,
Expand Down
24 changes: 12 additions & 12 deletions .ci/scripts/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ build_cmake_executor_runner() {
}

test_model() {
"${PYTHON_EXECUTABLE}" -m examples.export.export_example --model_name="${MODEL_NAME}"
"${PYTHON_EXECUTABLE}" -m examples.export.portable --model_name="${MODEL_NAME}"

# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/executor_runner:executor_runner -- --model_path "./${MODEL_NAME}.pte"
buck2 run //examples/runtime/portable:executor_runner -- --model_path "./${MODEL_NAME}.pte"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
if [[ ! -f ${CMAKE_OUTPUT_DIR}/executor_runner ]]; then
build_cmake_executor_runner
Expand Down Expand Up @@ -92,24 +92,24 @@ test_model_with_xnnpack() {

# Quantization-only
if [[ ${WITH_QUANTIZATION} == true ]] && [[ ${WITH_DELEGATION} == false ]]; then
bash examples/quantization/test_quantize.sh "${BUILD_TOOL}" "${MODEL_NAME}"
bash examples/quantization/quant_flow/test_quantize.sh "${BUILD_TOOL}" "${MODEL_NAME}"
exit 0
fi

# Delegation
if [[ ${WITH_QUANTIZATION} == true ]]; then
SUFFIX="q8"
"${PYTHON_EXECUTABLE}" -m examples.backend.xnnpack_examples --model_name="${MODEL_NAME}" --delegate --quantize
"${PYTHON_EXECUTABLE}" -m examples.recipes.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --quantize
else
SUFFIX="fp32"
"${PYTHON_EXECUTABLE}" -m examples.backend.xnnpack_examples --model_name="${MODEL_NAME}" --delegate
"${PYTHON_EXECUTABLE}" -m examples.recipes.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate
fi

OUTPUT_MODEL_PATH="${MODEL_NAME}_xnnpack_${SUFFIX}.pte"

# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/backend:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
buck2 run //examples/runtime/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then
build_cmake_xnn_executor_runner
Expand All @@ -123,15 +123,15 @@ test_model_with_xnnpack() {

test_demo_backend_delegation() {
echo "Testing demo backend delegation on AddMul"
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "composite"
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "partition"
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "whole"
"${PYTHON_EXECUTABLE}" -m examples.recipes.export_and_delegate --option "composite"
"${PYTHON_EXECUTABLE}" -m examples.recipes.export_and_delegate --option "partition"
"${PYTHON_EXECUTABLE}" -m examples.recipes.export_and_delegate --option "whole"

# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/executor_runner:executor_runner -- --model_path "./composite_model.pte"
buck2 run //examples/executor_runner:executor_runner -- --model_path "./partition_lowered_model.pte"
buck2 run //examples/executor_runner:executor_runner -- --model_path "./whole.pte"
buck2 run //examples/runtime/portable:executor_runner -- --model_path "./composite_model.pte"
buck2 run //examples/runtime/portable:executor_runner -- --model_path "./partition_lowered_model.pte"
buck2 run //examples/runtime/portable:executor_runner -- --model_path "./whole.pte"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
if [[ ! -f ${CMAKE_OUTPUT_DIR}/executor_runner ]]; then
build_cmake_executor_runner
Expand Down
2 changes: 1 addition & 1 deletion .ci/scripts/utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ install_flatc_from_source() {

build_executorch_runner_buck2() {
# Build executorch runtime with retry as this step is flaky on macos CI
retry buck2 build //examples/executor_runner:executor_runner
retry buck2 build //examples/runtime/portable:executor_runner
}

build_executorch_runner_cmake() {
Expand Down
4 changes: 2 additions & 2 deletions build/cmake_deps.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ excludes = [

[targets.executor_runner]
buck_targets = [
"//examples/executor_runner:executor_runner",
"//examples/runtime/portable:executor_runner",
]
filters = [
".cpp$",
Expand Down Expand Up @@ -93,7 +93,7 @@ deps = [

[targets.xnn_executor_runner]
buck_targets = [
"//examples/backend:xnn_executor_runner",
"//examples/runtime/xnnpack:xnn_executor_runner",
]
filters = [
".cpp$",
Expand Down
10 changes: 5 additions & 5 deletions docs/source/getting-started-setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,17 +105,17 @@ how to do it, we will generate an ExecuTorch program file from an `nn.Module`.
You can generate an ExecuTorch program by using a sample script or by using
the Python interpreter.

We have created the `export_example.py` script that demonstrates a simple model
We have created the `portable.py` script that demonstrates a simple model
export to flatbuffer. This script is available
in the [pytorch/executorch](https://github.com/pytorch/executorch/tree/main/examples/export)
repository.

To generate a sample program, complete the following steps:

1. Run the `export_example.py` script:
1. Run the `portable.py` script:

```bash
python3 -m examples.export.export_example --model_name="add"
python3 -m examples.export.portable --model_name="add"
```

:::{dropdown} Output
Expand Down Expand Up @@ -193,7 +193,7 @@ Complete the following steps:

3. Build a binary:
```bash
/tmp/buck2 build //examples/executor_runner:executor_runner --show-output
/tmp/buck2 build //examples/runtime/portable:executor_runner --show-output
```

:::{dropdown} Output
Expand Down Expand Up @@ -227,7 +227,7 @@ the `buck run` command to run our program.
* To run the `add.pte` program:

```bash
/tmp/buck2 run //examples/executor_runner:executor_runner -- --model_path add.pte
/tmp/buck2 run //examples/runtime/portable:executor_runner -- --model_path add.pte
```

:::{dropdown} Sample Output
Expand Down
10 changes: 5 additions & 5 deletions docs/website/docs/tutorials/00_setting_up_executorch.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ corresponsing version of the repo.
Via python script:
```bash
# Creates the file `add.pte`
python3 -m examples.export.export_example --model_name="add"
python3 -m examples.export.portable --model_name="add"

# Creates the delegated program `composite_model.pte`, other options are "whole" and "partition"
python3 -m examples.export.export_and_delegate --option "composite"
python3 -m examples.recipes.export_and_delegate --option "composite"
```

Or via python interpreter:
Expand Down Expand Up @@ -98,7 +98,7 @@ You may want to copy the `buck2` binary into your `$PATH` so you can run it as `
`executor_runner` is an example wrapper around executorch runtime which includes all the operators and backends

```bash
/tmp/buck2 build //examples/executor_runner:executor_runner --show-output
/tmp/buck2 build //examples/runtime/portable:executor_runner --show-output
```

The `--show-output` flag will print the path to the executable if you want to run it directly.
Expand All @@ -112,10 +112,10 @@ conda install -c conda-forge lld

```bash
# add.pte is the program generated from export_example.py during AOT Setup Step 3
/tmp/buck2 run //examples/executor_runner:executor_runner -- --model_path add.pte
/tmp/buck2 run //examples/runtime/portable:executor_runner -- --model_path add.pte

# To run a delegated model
/tmp/buck2 run //examples/executor_runner:executor_runner -- --model_path composite_model.pte
/tmp/buck2 run //examples/runtime/portable:executor_runner -- --model_path composite_model.pte
```

or execute the binary directly from the `--show-output` path shown when building.
Expand Down
2 changes: 1 addition & 1 deletion docs/website/docs/tutorials/profiling.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ Here is an example of what a ExecuTorch run + profile + post-procesing workflow
This runs the sample program with profiling enabled
```bash
cd executorch
buck2 run -c executorch.prof_enabled=true examples/executor_runner:executor_runner -- --model_path add.pte
buck2 run -c executorch.prof_enabled=true examples/runtime/portable:executor_runner -- --model_path add.pte
```
Run the post-processing CLI tool that calls into the same API's listed above and prints out the profiling results in a tabulated format in the terminal.

Expand Down
20 changes: 10 additions & 10 deletions examples/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Examples

This dir contains scripts and other helper utilities to illustrate an end-to-end workflow to run a torch.nn.module on the ExecuTorch runtime.
This dir contains scripts and other helper utilities to illustrate an end-to-end workflow to run a torch.nn.module on the Executorch runtime.
It also includes a list of modules, from a simple `Add` to a full model like `MobileNetv2` and `MobileNetv3`, with more to come.


Expand All @@ -10,12 +10,12 @@ examples
|── backend # Contains examples for exporting delegate models and running them using custom executor runners
├── custom_ops # Contains examples to register custom operators into PyTorch as well as register its kernels into ExecuTorch runtime
├── example_quantizer_and_delegate # Contains examples to to fully lowered a MobileNetV2 model to the example backend with an example quantizer
├── executor_runner # This is an example C++ wrapper around the ET runtime
├── export # Python helper scripts to illustrate export workflow
├── export # Contains scripts to illustrate export workflow in portable mode
├── ios_demo_apps # Contains iOS demo apps
├── models # Contains a set of simple to PyTorch models
├── models # Contains a set of out-of-box PyTorch models
├── quantization # Contains examples of quantization workflow
├── arm # Contains examples of the Arm TOSA and Ethos-U NPU flows
├── recipes # Contains recipes for a set of demos
├── runtime # Contains examples of C++ wrapper around the ET runtime
└── README.md # This file
```

Expand All @@ -31,18 +31,18 @@ and executing previously exported binary file(s).
1. Following the setup guide in [Setting up ExecuTorch from GitHub](/docs/website/docs/tutorials/00_setting_up_executorch.md)
you should be able to get the basic development environment for ExecuTorch working.

2. Using the script `export/export_example.py` generate a model binary file by selecting a
2. Using the example script `portable/aot_compiler.py` generate a model binary file by selecting a
model name from the list of available models in the `models` dir.


```bash
cd executorch # To the top level dir

# To get a list of example models
python3 -m examples.export.export_example -h
python3 -m examples.export.portable -h

# To generate a specific pte model
python3 -m examples.export.export_example --model_name="mv2" # for MobileNetv2
python3 -m examples.export.portable --model_name="mv2" # for MobileNetv2

# This should generate ./mv2.pte file, if successful.
```
Expand All @@ -52,7 +52,7 @@ Use `-h` (or `--help`) to see all the supported models.
3. Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `executor_runner`.

```bash
buck2 run examples/executor_runner:executor_runner -- --model_path mv2.pte
buck2 run examples/runtime/portable:executor_runner -- --model_path mv2.pte
```

## Quantization
Expand Down Expand Up @@ -87,7 +87,7 @@ buck2 run executorch/examples/quantization:example -- --help
Quantized model can be run via executor_runner, similar to floating point model, via, as shown above:

```bash
buck2 run examples/executor_runner:executor_runner -- --model_path mv2.pte
buck2 run examples/runtime/portable:executor_runner -- --model_path mv2.pte
```

Note that, running quantized model, requires various quantized/dequantize operators, available in [quantized kernel lib](/kernels/quantized).
Expand Down
44 changes: 0 additions & 44 deletions examples/backend/targets.bzl

This file was deleted.

2 changes: 1 addition & 1 deletion examples/custom_ops/custom_ops_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

"""Example of showcasing registering custom operator through torch library API."""
import torch
from examples.export.export_example import export_to_exec_prog, save_pte_program
from examples.export.utils import export_to_exec_prog, save_pte_program

from executorch.exir import EdgeCompileConfig
from torch.library import impl, Library
Expand Down
2 changes: 1 addition & 1 deletion examples/custom_ops/custom_ops_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import argparse

import torch
from examples.export.export_example import export_to_exec_prog, save_pte_program
from examples.export.utils import export_to_exec_prog, save_pte_program
from executorch.exir import EdgeCompileConfig


Expand Down
4 changes: 2 additions & 2 deletions examples/custom_ops/test_custom_ops.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ test_buck2_custom_op_1() {
# should save file custom_ops_1.pte

echo 'Running executor_runner'
buck2 run //examples/executor_runner:executor_runner \
buck2 run //examples/runtime/portable:executor_runner \
--config=executorch.register_custom_op=1 -- --model_path="./${model_name}.pte"
# should give correct result

Expand Down Expand Up @@ -58,7 +58,7 @@ test_buck2_custom_op_2() {
${PYTHON_EXECUTABLE} -m "examples.custom_ops.${model_name}" --so_library="$SO_LIB"
# should save file custom_ops_2.pte

buck2 run //examples/executor_runner:executor_runner \
buck2 run //examples/runtime/portable:executor_runner \
--config=executorch.register_custom_op=2 -- --model_path="./${model_name}.pte"
# should give correct result
echo "Removing ${model_name}.pte"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

from ..models import MODEL_NAME_TO_MODEL
from ..models.model_factory import EagerModelFactory

from .utils import export_to_exec_prog, save_pte_program


Expand Down
2 changes: 1 addition & 1 deletion examples/ios_demo_apps/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ and UI looks like

![](./executorch_mobilenet_ui.png)

Step 1. Export a MobileNetV2 model following example follwing executorch/examples/export/export_example.py. Instead of export mv2 directly, add a softmax at the end
Step 1. Export a MobileNetV2 model following example follwing executorch/examples/export/portable.py. Instead of export mv2 directly, add a softmax at the end
```python
class MobileNetV2Wrapper(torch.nn.Module):
def __init__(self):
Expand Down
26 changes: 26 additions & 0 deletions examples/quantization/quant_flow/TARGETS
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")

runtime.python_binary(
name = "example",
main_src = "example.py",
preload_deps = ["//executorch/kernels/quantized:aot_lib"],
deps = [
":quant_utils",
"//caffe2:torch",
"//executorch/examples/export:lib",
"//executorch/examples/recipes/xnnpack:models",
],
)

runtime.python_library(
name = "quant_utils",
srcs = [
"utils.py",
],
visibility = [
"//executorch/examples/...",
],
deps = [
"//caffe2:torch",
],
)
Loading