From 8bd7c0a0b07527f6e322d885293dac7dbc7d128e Mon Sep 17 00:00:00 2001 From: Chris Hoge Date: Tue, 19 Oct 2021 01:12:41 +0000 Subject: [PATCH] Fix direct and broken links Updates links to use references instead of direct links, fixing broken links and making all internal docs links more durable to refactoring --- docs/dev/how_to/relay_add_op.rst | 18 ++++++++---------- docs/how_to/deploy/arm_compute_lib.rst | 6 +++--- docs/reference/api/python/tir.rst | 2 ++ docs/topic/vta/install.rst | 10 ++++------ .../deploy_prequantized_tflite.py | 10 +++++----- .../work_with_schedules/schedule_primitives.py | 2 ++ gallery/tutorial/autotvm_relay_x86.py | 12 +++++------- gallery/tutorial/install.py | 4 ++-- gallery/tutorial/intro_topi.py | 2 ++ gallery/tutorial/tensor_expr_get_started.py | 2 +- gallery/tutorial/tvmc_command_line_driver.py | 8 +++----- vta/tutorials/README.txt | 2 ++ 12 files changed, 39 insertions(+), 39 deletions(-) diff --git a/docs/dev/how_to/relay_add_op.rst b/docs/dev/how_to/relay_add_op.rst index f9ade45f0800..2a8c771dc63d 100644 --- a/docs/dev/how_to/relay_add_op.rst +++ b/docs/dev/how_to/relay_add_op.rst @@ -190,18 +190,16 @@ useful for fusing operators. ``kOpaque`` tells TVM to not bother trying to fuse While we've now defined the interface for our operations we still need to define how to perform the actual calculations for cumulative sum and product. -Writing this code is outside the scope of the tutorial. For now, we assume -we have a well tested implementation for the operation's compute. For -more details on how to do this, we recommend looking up the tutorials -on `tensor expressions`_, `TVM's operator inventory (topi)`_ and looking at the -example cumulative sum and product implementations found in `python/tvm/topi/scan.py`_ -and the gpu versions in `python/tvm/topi/cuda/scan.py`_. In the case of our cumulative -sum and product operations we write things directly in `TIR`_ which is the +Writing this code is outside the scope of the tutorial. For now, we assume we +have a well tested implementation for the operation's compute. For more details +on how to do this, we recommend looking up the tutorials on :ref:`tensor +expressions `, :ref:`TVM's operator inventory +(topi) ` and looking at the example cumulative sum and product +implementations found in `python/tvm/topi/scan.py`_ and the gpu versions in +`python/tvm/topi/cuda/scan.py`_. In the case of our cumulative sum and product +operations we write things directly in :ref:`TIR ` which is the representation where tensor expressions and topi will lower into. -.. _tensor expressions: https://tvm.apache.org/docs/tutorials/get_started/tensor_expr_get_started.html -.. _TVM's operator inventory (topi): https://tvm.apache.org/docs/tutorials/topi/intro_topi.html -.. _TIR: https://tvm.apache.org/docs/dev/index.html?highlight=tir#tvm-tir .. _python/tvm/topi/scan.py: https://github.com/apache/tvm/blob/main/python/tvm/topi/scan.py .. _python/tvm/topi/cuda/scan.py: https://github.com/apache/tvm/blob/main/python/tvm/topi/cuda/scan.py diff --git a/docs/how_to/deploy/arm_compute_lib.rst b/docs/how_to/deploy/arm_compute_lib.rst index 6fb531a0a8f6..831438273cca 100644 --- a/docs/how_to/deploy/arm_compute_lib.rst +++ b/docs/how_to/deploy/arm_compute_lib.rst @@ -142,9 +142,9 @@ Export the module. lib.export_library(lib_path, cc=cross_compile) -Run Inference. This must be on an Arm device. If compiling on x86 device and running on AArch64, -consider using the RPC mechanism. Tutorials for using the RPC mechanism: -https://tvm.apache.org/docs/tutorials/get_started/cross_compilation_and_rpc.html +Run Inference. This must be on an Arm device. If compiling on x86 device and +running on AArch64, consider using the RPC mechanism. :ref:`Tutorials for using +the RPC mechanism ` .. code:: python diff --git a/docs/reference/api/python/tir.rst b/docs/reference/api/python/tir.rst index b0b8f1cff5fb..2152be69ea6f 100644 --- a/docs/reference/api/python/tir.rst +++ b/docs/reference/api/python/tir.rst @@ -15,6 +15,8 @@ specific language governing permissions and limitations under the License. +.. _api-python-tir: + tvm.tir ------- .. automodule:: tvm.tir diff --git a/docs/topic/vta/install.rst b/docs/topic/vta/install.rst index 2248975b61b1..e4b309ea9b61 100644 --- a/docs/topic/vta/install.rst +++ b/docs/topic/vta/install.rst @@ -30,8 +30,8 @@ We present three installation guides, each extending on the previous one: VTA Simulator Installation -------------------------- -You need `TVM installed `_ on your machine. -For a quick and easy start, checkout the `Docker Guide `_. +You need :ref:`TVM installed ` on your machine. For a quick and +easy start, checkout the :ref:`Docker Guide `. You'll need to set the following paths to use VTA: @@ -65,7 +65,7 @@ To ensure that you've properly installed the VTA python package, run the followi python /vta/tests/python/integration/test_benchmark_topi_conv2d.py -You are invited to try out our `VTA programming tutorials `_. +You are invited to try out our :ref:`VTA programming tutorials `. **Note**: You'll notice that for every convolution layer, the throughput gets reported in GOPS. These numbers are actually the computational throughput that the simulator achieves, by evaluating the convolutions in software. @@ -222,9 +222,7 @@ The performance metrics measured on the Pynq board will be reported for each con **Tip**: You can track progress of the FPGA programming and the runtime rebuilding steps by looking at the RPC server's logging messages in your Pynq ``ssh`` session. -You can also try out our `VTA programming tutorials `_. - - +You can also try out our :ref:`VTA programming tutorials `. Intel DE10 FPGA Setup --------------------- diff --git a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py index 7bbb06bdf801..830e2ab07466 100644 --- a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py +++ b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py @@ -255,8 +255,8 @@ def run_tvm(lib): # * Set the environment variable TVM_NUM_THREADS to the number of physical cores # * Choose the best target for your hardware, such as "llvm -mcpu=skylake-avx512" or # "llvm -mcpu=cascadelake" (more CPUs with AVX512 would come in the future) -# * Perform autotuning - `Auto-tuning a convolution network for x86 CPU -# `_. -# * To get best inference performance on ARM CPU, change target argument according to your -# device and follow `Auto-tuning a convolution network for ARM CPU -# `_. +# * Perform autotuning - :ref:`Auto-tuning a convolution network for x86 CPU +# `. +# * To get best inference performance on ARM CPU, change target argument +# according to your device and follow :ref:`Auto-tuning a convolution +# network for ARM CPU `. diff --git a/gallery/how_to/work_with_schedules/schedule_primitives.py b/gallery/how_to/work_with_schedules/schedule_primitives.py index ade79f69707f..65fdeda57c3b 100644 --- a/gallery/how_to/work_with_schedules/schedule_primitives.py +++ b/gallery/how_to/work_with_schedules/schedule_primitives.py @@ -15,6 +15,8 @@ # specific language governing permissions and limitations # under the License. """ +.. _schedule_primitives: + Schedule Primitives in TVM ========================== **Author**: `Ziheng Jiang `_ diff --git a/gallery/tutorial/autotvm_relay_x86.py b/gallery/tutorial/autotvm_relay_x86.py index 67faec4505a6..8b9c45c2a859 100644 --- a/gallery/tutorial/autotvm_relay_x86.py +++ b/gallery/tutorial/autotvm_relay_x86.py @@ -81,10 +81,9 @@ # # .. note:: Working with Other Model Formats # -# TVM supports many popular model formats. A list can be found in the `Compile -# Deep Learning Models -# `_ -# section of the TVM Documentation. +# TVM supports many popular model formats. A list can be found in the +# :ref:`Compile Deep Learning Models ` section of the TVM +# Documentation. model_url = "".join( [ @@ -150,9 +149,8 @@ # # Specifying the correct target can have a huge impact on the performance of # the compiled module, as it can take advantage of hardware features -# available on the target. For more information, please refer to `Auto-tuning -# a convolutional network for x86 CPU -# `_. +# available on the target. For more information, please refer to +# :ref:`Auto-tuning a convolutional network for x86 CPU `. # We recommend identifying which CPU you are running, along with optional # features, and set the target appropriately. For example, for some # processors ``target = "llvm -mcpu=skylake"``, or ``target = "llvm diff --git a/gallery/tutorial/install.py b/gallery/tutorial/install.py index b69b8b493a4f..67ce093b9d7f 100644 --- a/gallery/tutorial/install.py +++ b/gallery/tutorial/install.py @@ -35,8 +35,8 @@ # allow you to enable specific features such as GPU support, microcontroller # support (microTVM), and a debugging runtime, and other features. You will also # want to install from source if you want to actively contribute to the TVM -# project. The full instructions are on the `Install TVM From Source -# `_ page. +# project. The full instructions are on the :ref:`Install TVM From Source +# ` page. ################################################################################ # Installing From Binary Packages diff --git a/gallery/tutorial/intro_topi.py b/gallery/tutorial/intro_topi.py index 8138e4718cd9..dad8c53bf4ae 100644 --- a/gallery/tutorial/intro_topi.py +++ b/gallery/tutorial/intro_topi.py @@ -15,6 +15,8 @@ # specific language governing permissions and limitations # under the License. """ +.. _tutorial-topi: + Introduction to TOPI ==================== **Author**: `Ehsan M. Kermani `_ diff --git a/gallery/tutorial/tensor_expr_get_started.py b/gallery/tutorial/tensor_expr_get_started.py index 310d6bdbfee4..fda332cb63ba 100644 --- a/gallery/tutorial/tensor_expr_get_started.py +++ b/gallery/tutorial/tensor_expr_get_started.py @@ -512,7 +512,7 @@ def evaluate_addition(func, target, optimization, log): # before it moves on to the next stage. # # A complete description of these primitives can be found in the -# [Schedule Primitives](https://tvm.apache.org/docs/tutorials/language/schedule_primitives.html) docs page. +# :ref:`Schedule Primitives ` docs page. ################################################################################ # Example 2: Manually Optimizing Matrix Multiplication with TE diff --git a/gallery/tutorial/tvmc_command_line_driver.py b/gallery/tutorial/tvmc_command_line_driver.py index ea3254054ecf..7a0b97895e4f 100644 --- a/gallery/tutorial/tvmc_command_line_driver.py +++ b/gallery/tutorial/tvmc_command_line_driver.py @@ -154,11 +154,9 @@ # Specifying the correct target (option ``--target``) can have a huge # impact on the performance of the compiled module, as it can take # advantage of hardware features available on the target. For more -# information, please refer to `Auto-tuning a convolutional network -# for x86 CPU `_. -# We recommend identifying which CPU you are running, along with optional features, -# and set the target appropriately. -# +# information, please refer to :ref:`Auto-tuning a convolutional network for +# x86 CPU `. We recommend identifying which CPU you are +# running, along with optional features, and set the target appropriately. ################################################################################ # Running the Model from The Compiled Module with TVMC diff --git a/vta/tutorials/README.txt b/vta/tutorials/README.txt index 3d3858b111ba..c1ff4ca0444d 100644 --- a/vta/tutorials/README.txt +++ b/vta/tutorials/README.txt @@ -1,3 +1,5 @@ +.. _vta-tutorials: + VTA Tutorials ============= This page contains tutorials about VTA and how to use TVM/Relay to target VTA.