From 7bcd0198a3f103c29e0c3e6e234609093cc38bdb Mon Sep 17 00:00:00 2001 From: driazati Date: Tue, 10 Jan 2023 16:35:08 -0800 Subject: [PATCH 1/3] Use sphinx reset functions, delete linter --- docs/conf.py | 6 + gallery/how_to/compile_models/from_coreml.py | 5 - gallery/how_to/compile_models/from_darknet.py | 6 - gallery/how_to/compile_models/from_keras.py | 5 - gallery/how_to/compile_models/from_mxnet.py | 5 - gallery/how_to/compile_models/from_oneflow.py | 5 - gallery/how_to/compile_models/from_onnx.py | 5 - gallery/how_to/compile_models/from_paddle.py | 5 - gallery/how_to/compile_models/from_pytorch.py | 6 - .../how_to/compile_models/from_tensorflow.py | 6 - gallery/how_to/compile_models/from_tflite.py | 6 - .../deploy_models/deploy_model_on_adreno.py | 6 - .../deploy_models/deploy_model_on_android.py | 5 - .../deploy_models/deploy_model_on_nano.py | 6 - .../deploy_models/deploy_model_on_rasp.py | 6 - .../deploy_object_detection_pytorch.py | 6 - .../deploy_models/deploy_prequantized.py | 5 - .../deploy_prequantized_tflite.py | 5 - .../how_to/deploy_models/deploy_quantized.py | 5 - gallery/how_to/deploy_models/deploy_sparse.py | 5 - .../deploy_models/deploy_ssd_gluoncv.py | 5 - .../extend_tvm/bring_your_own_datatypes.py | 5 - .../extend_tvm/low_level_custom_pass.py | 5 - gallery/how_to/extend_tvm/use_pass_infra.py | 5 - .../how_to/extend_tvm/use_pass_instrument.py | 5 - .../optimize_operators/opt_conv_cuda.py | 6 - .../optimize_operators/opt_conv_tensorcore.py | 6 - gallery/how_to/optimize_operators/opt_gemm.py | 5 - .../tune_conv2d_layer_cuda.py | 6 - .../tune_network_arm.py | 5 - .../tune_network_cuda.py | 5 - .../tune_network_mali.py | 5 - .../tune_network_x86.py | 5 - .../tune_sparse_x86.py | 5 - .../tune_with_autotvm/tune_conv2d_cuda.py | 6 - .../tune_with_autotvm/tune_relay_arm.py | 5 - .../tune_with_autotvm/tune_relay_cuda.py | 6 - .../tune_relay_mobile_gpu.py | 5 - .../tune_with_autotvm/tune_relay_x86.py | 5 - .../how_to/work_with_microtvm/micro_aot.py | 5 - .../work_with_microtvm/micro_autotune.py | 5 - .../how_to/work_with_microtvm/micro_ethosu.py | 5 - .../work_with_microtvm/micro_pytorch.py | 5 - .../work_with_microtvm/micro_reference_vm.py | 6 - .../how_to/work_with_microtvm/micro_tflite.py | 5 - .../work_with_pytorch/using_as_torch.py | 6 - .../using_optimized_torch.py | 6 - gallery/how_to/work_with_relay/build_gcn.py | 5 - .../work_with_relay/using_external_lib.py | 5 - .../using_pipeline_executor.py | 10 -- .../how_to/work_with_relay/using_relay_viz.py | 5 - .../how_to/work_with_schedules/extern_op.py | 5 - .../how_to/work_with_schedules/intrin_math.py | 7 +- .../how_to/work_with_schedules/reduction.py | 5 - gallery/how_to/work_with_schedules/scan.py | 5 - .../schedule_primitives.py | 5 - gallery/how_to/work_with_schedules/tedd.py | 5 - .../how_to/work_with_schedules/tensorize.py | 5 - .../work_with_schedules/tuple_inputs.py | 5 - gallery/tutorial/auto_scheduler_matmul_x86.py | 5 - gallery/tutorial/autotvm_matmul_x86.py | 5 - gallery/tutorial/autotvm_relay_x86.py | 5 - gallery/tutorial/cross_compilation_and_rpc.py | 5 - gallery/tutorial/install.py | 6 - gallery/tutorial/intro_topi.py | 6 - gallery/tutorial/introduction.py | 5 - gallery/tutorial/relay_quick_start.py | 6 - gallery/tutorial/tensor_expr_get_started.py | 5 - gallery/tutorial/tensor_ir_blitz_course.py | 6 - gallery/tutorial/tvmc_command_line_driver.py | 5 - gallery/tutorial/tvmc_python.py | 5 - gallery/tutorial/uma.py | 6 - tests/lint/check_request_hook.py | 161 ------------------ tests/scripts/task_lint.sh | 3 - 74 files changed, 7 insertions(+), 546 deletions(-) delete mode 100644 tests/lint/check_request_hook.py diff --git a/docs/conf.py b/docs/conf.py index 18c634c05d05..08fbedb8ffca 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -223,6 +223,10 @@ def rewrite_generic_admonition(match): return text +def install_request_hook(gallery_conf, fname): + testing.utils.install_request_hook(depth=3) + + INSTALL_TVM_DEV = f"""\ %%shell # Installs the latest dev build of TVM from PyPI. If you wish to build @@ -431,6 +435,7 @@ def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func): "topic/vta/tutorials", ] + subsection_order = ExplicitOrder( str(p) for p in [ @@ -563,6 +568,7 @@ def force_gc(gallery_conf, fname): "expected_failing_examples": [], "reset_modules": ("matplotlib", "seaborn", force_gc), "promote_jupyter_magic": True, + "reset_modules": (install_request_hook), } autodoc_default_options = { diff --git a/gallery/how_to/compile_models/from_coreml.py b/gallery/how_to/compile_models/from_coreml.py index 4d0eea2d8d52..b54329920b8d 100644 --- a/gallery/how_to/compile_models/from_coreml.py +++ b/gallery/how_to/compile_models/from_coreml.py @@ -34,11 +34,6 @@ https://github.com/apple/coremltools """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import tvm.relay as relay diff --git a/gallery/how_to/compile_models/from_darknet.py b/gallery/how_to/compile_models/from_darknet.py index 8397efa63b97..ef0a8583777f 100644 --- a/gallery/how_to/compile_models/from_darknet.py +++ b/gallery/how_to/compile_models/from_darknet.py @@ -32,12 +32,6 @@ """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - # numpy and matplotlib import numpy as np import matplotlib.pyplot as plt diff --git a/gallery/how_to/compile_models/from_keras.py b/gallery/how_to/compile_models/from_keras.py index ac961ca16ad0..1159088773a1 100644 --- a/gallery/how_to/compile_models/from_keras.py +++ b/gallery/how_to/compile_models/from_keras.py @@ -35,12 +35,7 @@ https://keras.io/#installation """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import tvm.relay as relay diff --git a/gallery/how_to/compile_models/from_mxnet.py b/gallery/how_to/compile_models/from_mxnet.py index cfd66ecdb74c..97b9a6b2dc89 100644 --- a/gallery/how_to/compile_models/from_mxnet.py +++ b/gallery/how_to/compile_models/from_mxnet.py @@ -33,12 +33,7 @@ https://mxnet.apache.org/versions/master/install/index.html """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore # some standard imports import mxnet as mx import tvm diff --git a/gallery/how_to/compile_models/from_oneflow.py b/gallery/how_to/compile_models/from_oneflow.py index 0925c9fe81ce..036c5c07a59f 100644 --- a/gallery/how_to/compile_models/from_oneflow.py +++ b/gallery/how_to/compile_models/from_oneflow.py @@ -37,12 +37,7 @@ Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable. """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os, math from matplotlib import pyplot as plt import numpy as np diff --git a/gallery/how_to/compile_models/from_onnx.py b/gallery/how_to/compile_models/from_onnx.py index 980091d391bd..c1f9be72c54c 100644 --- a/gallery/how_to/compile_models/from_onnx.py +++ b/gallery/how_to/compile_models/from_onnx.py @@ -32,11 +32,6 @@ https://github.com/onnx/onnx """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import onnx import numpy as np import tvm diff --git a/gallery/how_to/compile_models/from_paddle.py b/gallery/how_to/compile_models/from_paddle.py index 199547b814a4..5e78c8c3b06c 100644 --- a/gallery/how_to/compile_models/from_paddle.py +++ b/gallery/how_to/compile_models/from_paddle.py @@ -31,11 +31,6 @@ https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tarfile import paddle import numpy as np diff --git a/gallery/how_to/compile_models/from_pytorch.py b/gallery/how_to/compile_models/from_pytorch.py index 064ed70e4645..14c264b9f4ac 100644 --- a/gallery/how_to/compile_models/from_pytorch.py +++ b/gallery/how_to/compile_models/from_pytorch.py @@ -41,12 +41,6 @@ be unstable. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm from tvm import relay diff --git a/gallery/how_to/compile_models/from_tensorflow.py b/gallery/how_to/compile_models/from_tensorflow.py index b85b9e669a20..741d98109450 100644 --- a/gallery/how_to/compile_models/from_tensorflow.py +++ b/gallery/how_to/compile_models/from_tensorflow.py @@ -29,12 +29,6 @@ Please refer to https://www.tensorflow.org/install """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - # tvm, relay import tvm from tvm import te diff --git a/gallery/how_to/compile_models/from_tflite.py b/gallery/how_to/compile_models/from_tflite.py index a248346c2971..226e67c82e89 100644 --- a/gallery/how_to/compile_models/from_tflite.py +++ b/gallery/how_to/compile_models/from_tflite.py @@ -56,12 +56,6 @@ # Utils for downloading and extracting zip files # ---------------------------------------------- -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import os diff --git a/gallery/how_to/deploy_models/deploy_model_on_adreno.py b/gallery/how_to/deploy_models/deploy_model_on_adreno.py index 8d25e50b56b1..c120c5339b62 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_adreno.py +++ b/gallery/how_to/deploy_models/deploy_model_on_adreno.py @@ -120,12 +120,6 @@ # ----------------- # As an example we would use classical cat image from ImageNet -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - from PIL import Image from tvm.contrib.download import download_testdata from matplotlib import pyplot as plt diff --git a/gallery/how_to/deploy_models/deploy_model_on_android.py b/gallery/how_to/deploy_models/deploy_model_on_android.py index 4bf86e2981a1..2e5d916cd6f2 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_android.py +++ b/gallery/how_to/deploy_models/deploy_model_on_android.py @@ -25,11 +25,6 @@ This is an example of using Relay to compile a keras model and deploy it on Android device. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os import numpy as np diff --git a/gallery/how_to/deploy_models/deploy_model_on_nano.py b/gallery/how_to/deploy_models/deploy_model_on_nano.py index 3d8a4a796f8c..9e2f6cdc4c1e 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_nano.py +++ b/gallery/how_to/deploy_models/deploy_model_on_nano.py @@ -25,13 +25,7 @@ it on Jetson Nano. """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm from tvm import te import tvm.relay as relay diff --git a/gallery/how_to/deploy_models/deploy_model_on_rasp.py b/gallery/how_to/deploy_models/deploy_model_on_rasp.py index ab5374d93dbf..de4ed9aff074 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_rasp.py +++ b/gallery/how_to/deploy_models/deploy_model_on_rasp.py @@ -26,12 +26,6 @@ it on Raspberry Pi. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm from tvm import te import tvm.relay as relay diff --git a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py index ffde042e2b88..8400e82b4215 100644 --- a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py +++ b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py @@ -40,12 +40,6 @@ be unstable. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm from tvm import relay from tvm import relay diff --git a/gallery/how_to/deploy_models/deploy_prequantized.py b/gallery/how_to/deploy_models/deploy_prequantized.py index fdb4de289d91..b93ed5e4dacb 100644 --- a/gallery/how_to/deploy_models/deploy_prequantized.py +++ b/gallery/how_to/deploy_models/deploy_prequantized.py @@ -28,11 +28,6 @@ Once loaded, we can run compiled, quantized models on any hardware TVM supports. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################# # First, necessary imports diff --git a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py index 494b4a9e219b..2d0e225dce39 100644 --- a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py +++ b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py @@ -42,11 +42,6 @@ """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ############################################################################### # Necessary imports diff --git a/gallery/how_to/deploy_models/deploy_quantized.py b/gallery/how_to/deploy_models/deploy_quantized.py index 24c7ce3331f5..f1b45dd7c158 100644 --- a/gallery/how_to/deploy_models/deploy_quantized.py +++ b/gallery/how_to/deploy_models/deploy_quantized.py @@ -27,11 +27,6 @@ Relay, quantize the Relay model and then perform the inference. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te diff --git a/gallery/how_to/deploy_models/deploy_sparse.py b/gallery/how_to/deploy_models/deploy_sparse.py index b9a26e0d3053..c90a3b566e7a 100644 --- a/gallery/how_to/deploy_models/deploy_sparse.py +++ b/gallery/how_to/deploy_models/deploy_sparse.py @@ -70,11 +70,6 @@ sparse speed using fake weights to see the benefit of structured sparsity. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ############################################################################### # Load Required Modules diff --git a/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py b/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py index f39244a2eb03..af15a9337c25 100644 --- a/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py +++ b/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py @@ -24,11 +24,6 @@ We will use GluonCV pre-trained SSD model and convert it to Relay IR """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te diff --git a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py index bbd207dbac8b..f5ff89717c1a 100644 --- a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py +++ b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py @@ -52,11 +52,6 @@ ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL) """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ###################### # A Simple TVM Program diff --git a/gallery/how_to/extend_tvm/low_level_custom_pass.py b/gallery/how_to/extend_tvm/low_level_custom_pass.py index 0f99c72cee9c..50634116ce8e 100644 --- a/gallery/how_to/extend_tvm/low_level_custom_pass.py +++ b/gallery/how_to/extend_tvm/low_level_custom_pass.py @@ -41,11 +41,6 @@ """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np diff --git a/gallery/how_to/extend_tvm/use_pass_infra.py b/gallery/how_to/extend_tvm/use_pass_infra.py index a41a26fc0b1e..f82cf40029d4 100644 --- a/gallery/how_to/extend_tvm/use_pass_infra.py +++ b/gallery/how_to/extend_tvm/use_pass_infra.py @@ -40,11 +40,6 @@ The same approach can be used for tir as well. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np import tvm diff --git a/gallery/how_to/extend_tvm/use_pass_instrument.py b/gallery/how_to/extend_tvm/use_pass_instrument.py index 3079e2f0e763..fd965cdf973a 100644 --- a/gallery/how_to/extend_tvm/use_pass_instrument.py +++ b/gallery/how_to/extend_tvm/use_pass_instrument.py @@ -34,11 +34,6 @@ passes. Please also refer to the :ref:`pass-infra`. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm import tvm.relay as relay from tvm.relay.testing import resnet diff --git a/gallery/how_to/optimize_operators/opt_conv_cuda.py b/gallery/how_to/optimize_operators/opt_conv_cuda.py index 33e5d9855361..bff8cba2be77 100644 --- a/gallery/how_to/optimize_operators/opt_conv_cuda.py +++ b/gallery/how_to/optimize_operators/opt_conv_cuda.py @@ -30,13 +30,7 @@ """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - ################################################################ # Preparation and Algorithm # ------------------------- diff --git a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py index 5734f064f0dc..5a2ab716c0e7 100644 --- a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py +++ b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py @@ -51,13 +51,7 @@ # We use stride size 1 and padding size 1 for the convolution. In the example, we use # NHWCnc memory layout.The following code defines the convolution algorithm in TVM. -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm from tvm import te import numpy as np diff --git a/gallery/how_to/optimize_operators/opt_gemm.py b/gallery/how_to/optimize_operators/opt_gemm.py index 249a4e26e918..7ca423281570 100644 --- a/gallery/how_to/optimize_operators/opt_gemm.py +++ b/gallery/how_to/optimize_operators/opt_gemm.py @@ -48,11 +48,6 @@ Intel i7-4770HQ CPU. The cache line size should be 64 bytes for all the x86 CPUs. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################################ # Preparation and Baseline diff --git a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py index 7964694e68c0..873bb78859f6 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py @@ -37,13 +37,7 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import os import numpy as np diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py index 09a1d0cea520..a109acba0695 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py @@ -46,11 +46,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np import os diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py b/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py index a430411fd9ee..670996410359 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py @@ -44,11 +44,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py index 8ac0b235d72e..e72e261e4bc2 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py @@ -44,11 +44,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py b/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py index f8caba075de3..6eb1b79bfe0a 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py @@ -45,11 +45,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np diff --git a/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py b/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py index 0a2ddbd1bd81..3d810b25feb2 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py @@ -35,11 +35,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os diff --git a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py index a73b97525f12..784749705b05 100644 --- a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py +++ b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py @@ -48,13 +48,7 @@ # # Now return to python code. Import packages. -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import logging import sys import numpy as np diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py index a8f66d9b08a1..0cb02c036fd7 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py @@ -62,11 +62,6 @@ # # Now return to python code. Import packages. -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py index 7cb6cb8dd3f9..6ef71956ef4b 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py @@ -59,13 +59,7 @@ # # Now return to python code. Import packages. -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import os import numpy as np diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py index d73e46448b7d..dd0a3a9837ac 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py @@ -60,11 +60,6 @@ # # Now return to python code. Import packages. -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_x86.py b/gallery/how_to/tune_with_autotvm/tune_relay_x86.py index 2ba597d1da19..a44c30bb89f9 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_x86.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_x86.py @@ -29,11 +29,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os import numpy as np diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py b/gallery/how_to/work_with_microtvm/micro_aot.py index 8646b6d7ecfa..81109b2965ef 100644 --- a/gallery/how_to/work_with_microtvm/micro_aot.py +++ b/gallery/how_to/work_with_microtvm/micro_aot.py @@ -35,11 +35,6 @@ # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os diff --git a/gallery/how_to/work_with_microtvm/micro_autotune.py b/gallery/how_to/work_with_microtvm/micro_autotune.py index 3dd4cab6c9af..9edb9ae75e7f 100644 --- a/gallery/how_to/work_with_microtvm/micro_autotune.py +++ b/gallery/how_to/work_with_microtvm/micro_autotune.py @@ -32,11 +32,6 @@ # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore # You can skip the following two sections (installing Zephyr and CMSIS-NN) if the following flag is False. # Installing Zephyr takes ~20 min. diff --git a/gallery/how_to/work_with_microtvm/micro_ethosu.py b/gallery/how_to/work_with_microtvm/micro_ethosu.py index e80860dc0ce6..f257507bb5a5 100644 --- a/gallery/how_to/work_with_microtvm/micro_ethosu.py +++ b/gallery/how_to/work_with_microtvm/micro_ethosu.py @@ -37,11 +37,6 @@ TVM to offload operators to the Ethos(TM)-U55 where possible. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################ # Obtaining TVM diff --git a/gallery/how_to/work_with_microtvm/micro_pytorch.py b/gallery/how_to/work_with_microtvm/micro_pytorch.py index f7f0c9209a87..370e4d7e804b 100644 --- a/gallery/how_to/work_with_microtvm/micro_pytorch.py +++ b/gallery/how_to/work_with_microtvm/micro_pytorch.py @@ -34,11 +34,6 @@ # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import pathlib import torch diff --git a/gallery/how_to/work_with_microtvm/micro_reference_vm.py b/gallery/how_to/work_with_microtvm/micro_reference_vm.py index 80ab0edf8fae..3121bca353a5 100644 --- a/gallery/how_to/work_with_microtvm/micro_reference_vm.py +++ b/gallery/how_to/work_with_microtvm/micro_reference_vm.py @@ -157,9 +157,3 @@ """ - -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore diff --git a/gallery/how_to/work_with_microtvm/micro_tflite.py b/gallery/how_to/work_with_microtvm/micro_tflite.py index cbdf6cd6f4ca..86e5d6b4b1ae 100644 --- a/gallery/how_to/work_with_microtvm/micro_tflite.py +++ b/gallery/how_to/work_with_microtvm/micro_tflite.py @@ -30,11 +30,6 @@ # .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst # -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import os diff --git a/gallery/how_to/work_with_pytorch/using_as_torch.py b/gallery/how_to/work_with_pytorch/using_as_torch.py index e2351a0d7c65..46bd2df98076 100644 --- a/gallery/how_to/work_with_pytorch/using_as_torch.py +++ b/gallery/how_to/work_with_pytorch/using_as_torch.py @@ -32,12 +32,6 @@ """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - # Import PyTorch, as well as necessary libraries import torch import torch.nn.functional as F diff --git a/gallery/how_to/work_with_pytorch/using_optimized_torch.py b/gallery/how_to/work_with_pytorch/using_optimized_torch.py index baf80541b964..7a8b3d89843a 100644 --- a/gallery/how_to/work_with_pytorch/using_optimized_torch.py +++ b/gallery/how_to/work_with_pytorch/using_optimized_torch.py @@ -31,13 +31,7 @@ """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - # Import PyTorch import torch import torch.nn as nn diff --git a/gallery/how_to/work_with_relay/build_gcn.py b/gallery/how_to/work_with_relay/build_gcn.py index e6106dd95b84..16a87fb0f15f 100644 --- a/gallery/how_to/work_with_relay/build_gcn.py +++ b/gallery/how_to/work_with_relay/build_gcn.py @@ -125,11 +125,6 @@ def evaluate(data, logits): dimension of model output (Number of classes) """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore dataset = "cora" g, data = load_dataset(dataset) diff --git a/gallery/how_to/work_with_relay/using_external_lib.py b/gallery/how_to/work_with_relay/using_external_lib.py index c018ee13c724..38f5b2d460ba 100644 --- a/gallery/how_to/work_with_relay/using_external_lib.py +++ b/gallery/how_to/work_with_relay/using_external_lib.py @@ -32,11 +32,6 @@ To begin with, we import Relay and TVM. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np diff --git a/gallery/how_to/work_with_relay/using_pipeline_executor.py b/gallery/how_to/work_with_relay/using_pipeline_executor.py index 87516d656d70..8f6136865607 100755 --- a/gallery/how_to/work_with_relay/using_pipeline_executor.py +++ b/gallery/how_to/work_with_relay/using_pipeline_executor.py @@ -107,11 +107,6 @@ def @main(%data_n_0: Tensor[(1, 16, 8, 8), float16] /* ty=Tensor[(1, 16, 8, 8), """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ######################################### # Build the subgraph with cutlass target. @@ -188,11 +183,6 @@ def cutlass_build(mod, target, params=None, target_host=None, mod_name="default" |mod0.output(0)-> mod1.data_n_0 """ -# sphinx_gallery_start_ignore -from tvm import testing - -# testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ############################## # Build the pipeline executor. # ---------------------------- diff --git a/gallery/how_to/work_with_relay/using_relay_viz.py b/gallery/how_to/work_with_relay/using_relay_viz.py index ae22fe20e1f2..ce874ca48508 100644 --- a/gallery/how_to/work_with_relay/using_relay_viz.py +++ b/gallery/how_to/work_with_relay/using_relay_viz.py @@ -43,11 +43,6 @@ For more details, please refer to :py:mod:`tvm.contrib.relay_viz`. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore from typing import ( Dict, Union, diff --git a/gallery/how_to/work_with_schedules/extern_op.py b/gallery/how_to/work_with_schedules/extern_op.py index ad741a08d54c..9026eb016c56 100644 --- a/gallery/how_to/work_with_schedules/extern_op.py +++ b/gallery/how_to/work_with_schedules/extern_op.py @@ -32,11 +32,6 @@ from __future__ import absolute_import, print_function -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np diff --git a/gallery/how_to/work_with_schedules/intrin_math.py b/gallery/how_to/work_with_schedules/intrin_math.py index 5a8732abd776..5a35ae1cbd8e 100644 --- a/gallery/how_to/work_with_schedules/intrin_math.py +++ b/gallery/how_to/work_with_schedules/intrin_math.py @@ -30,12 +30,7 @@ """ from __future__ import absolute_import, print_function - -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignoreimport numpy as np +import numpy as np import tvm from tvm import te diff --git a/gallery/how_to/work_with_schedules/reduction.py b/gallery/how_to/work_with_schedules/reduction.py index c084c45d3839..4e71731a4e1b 100644 --- a/gallery/how_to/work_with_schedules/reduction.py +++ b/gallery/how_to/work_with_schedules/reduction.py @@ -28,12 +28,7 @@ from __future__ import absolute_import, print_function -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te diff --git a/gallery/how_to/work_with_schedules/scan.py b/gallery/how_to/work_with_schedules/scan.py index d523d5b9959d..c19e21ff1e72 100644 --- a/gallery/how_to/work_with_schedules/scan.py +++ b/gallery/how_to/work_with_schedules/scan.py @@ -25,12 +25,7 @@ from __future__ import absolute_import, print_function -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te diff --git a/gallery/how_to/work_with_schedules/schedule_primitives.py b/gallery/how_to/work_with_schedules/schedule_primitives.py index af67ed1527a0..a5c542df548b 100644 --- a/gallery/how_to/work_with_schedules/schedule_primitives.py +++ b/gallery/how_to/work_with_schedules/schedule_primitives.py @@ -29,11 +29,6 @@ from __future__ import absolute_import, print_function -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np diff --git a/gallery/how_to/work_with_schedules/tedd.py b/gallery/how_to/work_with_schedules/tedd.py index 7cb24f433587..7d7f8f149002 100644 --- a/gallery/how_to/work_with_schedules/tedd.py +++ b/gallery/how_to/work_with_schedules/tedd.py @@ -38,11 +38,6 @@ """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te from tvm import topi diff --git a/gallery/how_to/work_with_schedules/tensorize.py b/gallery/how_to/work_with_schedules/tensorize.py index 45eaf349f37b..63ba8299033c 100644 --- a/gallery/how_to/work_with_schedules/tensorize.py +++ b/gallery/how_to/work_with_schedules/tensorize.py @@ -35,11 +35,6 @@ from __future__ import absolute_import, print_function -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import tvm.testing diff --git a/gallery/how_to/work_with_schedules/tuple_inputs.py b/gallery/how_to/work_with_schedules/tuple_inputs.py index 86ec8b2d196b..edf82ddca75b 100644 --- a/gallery/how_to/work_with_schedules/tuple_inputs.py +++ b/gallery/how_to/work_with_schedules/tuple_inputs.py @@ -28,11 +28,6 @@ from __future__ import absolute_import, print_function -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np diff --git a/gallery/tutorial/auto_scheduler_matmul_x86.py b/gallery/tutorial/auto_scheduler_matmul_x86.py index 98fd95c33878..14f8040bf851 100644 --- a/gallery/tutorial/auto_scheduler_matmul_x86.py +++ b/gallery/tutorial/auto_scheduler_matmul_x86.py @@ -38,11 +38,6 @@ __name__ == "__main__":` block. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np import tvm diff --git a/gallery/tutorial/autotvm_matmul_x86.py b/gallery/tutorial/autotvm_matmul_x86.py index f074c454bde4..a2e355c8ca8f 100644 --- a/gallery/tutorial/autotvm_matmul_x86.py +++ b/gallery/tutorial/autotvm_matmul_x86.py @@ -64,11 +64,6 @@ # # Now return to python code. Begin by importing the required packages. -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import logging import sys diff --git a/gallery/tutorial/autotvm_relay_x86.py b/gallery/tutorial/autotvm_relay_x86.py index b7dfbe28f462..b7e9cebb5d6a 100644 --- a/gallery/tutorial/autotvm_relay_x86.py +++ b/gallery/tutorial/autotvm_relay_x86.py @@ -42,11 +42,6 @@ how to use them through the Python API. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################ # TVM is a deep learning compiler framework, with a number of different modules diff --git a/gallery/tutorial/cross_compilation_and_rpc.py b/gallery/tutorial/cross_compilation_and_rpc.py index feab28fa11c1..c7e302693de7 100644 --- a/gallery/tutorial/cross_compilation_and_rpc.py +++ b/gallery/tutorial/cross_compilation_and_rpc.py @@ -93,11 +93,6 @@ # # Here we will declare a simple kernel on the local machine: -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore import numpy as np diff --git a/gallery/tutorial/install.py b/gallery/tutorial/install.py index b864dbfa85f4..0eb3ccc94c06 100644 --- a/gallery/tutorial/install.py +++ b/gallery/tutorial/install.py @@ -48,9 +48,3 @@ # Check out `TLCPack `_ to learn more. Note that the # third party binary packages could contain additional licensing terms for # the hardware drivers that are bundled with it. - -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore diff --git a/gallery/tutorial/intro_topi.py b/gallery/tutorial/intro_topi.py index f2a4db608646..6647dd190e20 100644 --- a/gallery/tutorial/intro_topi.py +++ b/gallery/tutorial/intro_topi.py @@ -26,13 +26,7 @@ In this tutorial, we will see how TOPI can save us from writing boilerplate code in TVM. """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm import tvm.testing from tvm import te diff --git a/gallery/tutorial/introduction.py b/gallery/tutorial/introduction.py index 908a8e52c751..8d1f0e2699b2 100644 --- a/gallery/tutorial/introduction.py +++ b/gallery/tutorial/introduction.py @@ -45,11 +45,6 @@ #. :doc:`Compiling Deep Learning Models for GPUs ` """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################ # An Overview of TVM and Model Optimization diff --git a/gallery/tutorial/relay_quick_start.py b/gallery/tutorial/relay_quick_start.py index e59f0107f943..830b30232e9b 100644 --- a/gallery/tutorial/relay_quick_start.py +++ b/gallery/tutorial/relay_quick_start.py @@ -26,13 +26,7 @@ Notice that you need to build TVM with cuda and llvm enabled. """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - ###################################################################### # Overview for Supported Hardware Backend of TVM # ---------------------------------------------- diff --git a/gallery/tutorial/tensor_expr_get_started.py b/gallery/tutorial/tensor_expr_get_started.py index 11186d2f1458..ba7e0c027023 100644 --- a/gallery/tutorial/tensor_expr_get_started.py +++ b/gallery/tutorial/tensor_expr_get_started.py @@ -39,11 +39,6 @@ features of TVM. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################ # Example 1: Writing and Scheduling Vector Addition in TE for CPU diff --git a/gallery/tutorial/tensor_ir_blitz_course.py b/gallery/tutorial/tensor_ir_blitz_course.py index dc75a3fb9452..a46bd77803d9 100644 --- a/gallery/tutorial/tensor_ir_blitz_course.py +++ b/gallery/tutorial/tensor_ir_blitz_course.py @@ -29,13 +29,7 @@ """ -# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - import tvm from tvm.ir.module import IRModule from tvm.script import tir as T diff --git a/gallery/tutorial/tvmc_command_line_driver.py b/gallery/tutorial/tvmc_command_line_driver.py index 3f4413e848ce..27302b721bc1 100644 --- a/gallery/tutorial/tvmc_command_line_driver.py +++ b/gallery/tutorial/tvmc_command_line_driver.py @@ -41,11 +41,6 @@ capabilities, and set the stage for understanding how TVM works. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################ # Using TVMC diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py index 417f8ad88747..a92c3af626f0 100644 --- a/gallery/tutorial/tvmc_python.py +++ b/gallery/tutorial/tvmc_python.py @@ -36,11 +36,6 @@ Let's start editing the python file in your favorite text editor. """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore ################################################################################ # Step 0: Imports diff --git a/gallery/tutorial/uma.py b/gallery/tutorial/uma.py index ea38813a7ace..5380aa116fcb 100644 --- a/gallery/tutorial/uma.py +++ b/gallery/tutorial/uma.py @@ -41,12 +41,6 @@ # integrated into TVM using UMA. # -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3) -# sphinx_gallery_end_ignore - ###################################################################### # Vanilla diff --git a/tests/lint/check_request_hook.py b/tests/lint/check_request_hook.py deleted file mode 100644 index 925af5597c12..000000000000 --- a/tests/lint/check_request_hook.py +++ /dev/null @@ -1,161 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import argparse -import fnmatch -import re -from pathlib import Path -from typing import List, Optional - - -REPO_ROOT = Path(__file__).resolve().parent.parent.parent - -EXPECTED_HOOK = """ -# sphinx_gallery_start_ignore -from tvm import testing - -testing.utils.install_request_hook(depth=3)\ -# sphinx_gallery_end_ignore -""" - -# Extra sphinx-gallery config options may be passed inside the ignore block before the hook. This -# is a workaround that can be removed once sphinx-gallery #1059 merges and the version is updated. -EXPECTED_REGEX = re.compile( - r""" -\# sphinx_gallery_start_ignore -(?:.*\n)*from tvm import testing - -testing\.utils\.install_request_hook\(depth=3\)\ -\# sphinx_gallery_end_ignore -""".rstrip(), - re.MULTILINE, -) -IGNORE_PATTERNS = ["*/micro_tvmc.py", "*/micro_train.py"] -APACHE_HEADER_LINES = 16 - - -def find_code_block_line(lines: List[str]) -> Optional[int]: - """ - This returns the index in 'lines' of the first line of code in the tutorial - or none if there are no code blocks. - """ - in_multiline_string = False - in_sphinx_directive = False - - i = 0 - lines = lines[APACHE_HEADER_LINES:] - while i < len(lines): - line = lines[i].strip() - if '"""' in line: - in_multiline_string = not in_multiline_string - elif "# sphinx_gallery_" in line: - in_sphinx_directive = not in_sphinx_directive - elif line.startswith("#") or in_sphinx_directive or in_multiline_string or line == "": - pass - else: - return i - i += 1 - - return None - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Check that all tutorials/docs override urllib.request.Request" - ) - parser.add_argument( - "--fix", action="store_true", help="Insert expected code into erroring files" - ) - args = parser.parse_args() - - gallery_files = (REPO_ROOT / "gallery").glob("**/*.py") - # gallery_files = [x for x in gallery_files if "cross_compi" in str(x)] - - errors = [] - for file in gallery_files: - skip = False - for ignored_file in IGNORE_PATTERNS: - if fnmatch.fnmatch(str(file), ignored_file): - skip = True - break - if skip: - continue - - with open(file) as f: - content = f.read() - - regex_match = EXPECTED_REGEX.search(content) - if not regex_match: - errors.append((file, None)) - continue - - line = content.count("\n", 0, regex_match.end()) + 2 - expected = find_code_block_line(content.split("\n")) - if expected is not None and line < expected: - errors.append((file, (line, expected))) - - if args.fix: - for error, line_info in errors: - with open(error) as f: - content = f.read() - - # Note: There must be a little bit of care taken here since inserting - # the block between a comment and multiline string will lead to an - # empty code block in the HTML output - if "from __future__" in content: - # Place after the last __future__ import - new_content = re.sub( - r"((?:from __future__.*?\n)+)", r"\1\n" + EXPECTED_HOOK, content, flags=re.M - ) - else: - # Place in the first codeblock - lines = content.split("\n") - position = find_code_block_line(lines) - if position is None: - new_content = "\n".join(lines) + EXPECTED_HOOK + "\n" - else: - print(position) - new_content = ( - "\n".join(lines[:position]) - + EXPECTED_HOOK - + "\n\n" - + "\n".join(lines[position:]) - ) - - with open(error, "w") as f: - f.write(new_content) - else: - # Don't fix, just check and print an error message - if len(errors) > 0: - print( - f"These {len(errors)} file(s) did not contain the expected text to " - "override urllib.request.Request, it was at the wrong position, or " - "the whitespace is incorrect.\n" - "You can run 'python3 tests/lint/check_request_hook.py --fix' to " - "automatically fix these errors:\n" - f"{EXPECTED_HOOK}\n\nFiles:" - ) - for file, line_info in errors: - if line_info is None: - print(f"{file} (missing hook)") - else: - actual, expected = line_info - print(f"{file} (misplaced hook at {actual}, expected at {expected})") - exit(1) - else: - print("All files successfully override urllib.request.Request") - exit(0) diff --git a/tests/scripts/task_lint.sh b/tests/scripts/task_lint.sh index f71cb0f60243..83ea86ecccb8 100755 --- a/tests/scripts/task_lint.sh +++ b/tests/scripts/task_lint.sh @@ -40,9 +40,6 @@ function shard1 { echo "Checking CMake <-> LibInfo options mirroring" python3 tests/lint/check_cmake_options.py - echo "Checking that all sphinx-gallery docs override urllib.request.Request" - python3 tests/lint/check_request_hook.py - echo "black check..." tests/lint/git-black.sh From 8138731e64f8d8057015e4be04e58e5482475add Mon Sep 17 00:00:00 2001 From: driazati Date: Wed, 11 Jan 2023 12:47:47 -0800 Subject: [PATCH 2/3] Add ignore wrapper to requires_cuda --- docs/conf.py | 2 ++ gallery/how_to/compile_models/from_keras.py | 2 ++ gallery/how_to/compile_models/from_mxnet.py | 4 +++- gallery/how_to/compile_models/from_oneflow.py | 2 ++ .../how_to/deploy_models/deploy_model_on_nano.py | 2 ++ .../how_to/optimize_operators/opt_conv_cuda.py | 4 +++- .../optimize_operators/opt_conv_tensorcore.py | 2 ++ .../tune_conv2d_layer_cuda.py | 2 ++ .../how_to/tune_with_autotvm/tune_conv2d_cuda.py | 2 ++ .../how_to/tune_with_autotvm/tune_relay_cuda.py | 2 ++ gallery/how_to/work_with_pytorch/README.txt | 6 ++++++ .../how_to/work_with_pytorch/using_as_torch.py | 6 +++--- .../work_with_pytorch/using_optimized_torch.py | 16 +++++++++------- gallery/how_to/work_with_schedules/reduction.py | 2 ++ gallery/how_to/work_with_schedules/scan.py | 2 ++ gallery/tutorial/intro_topi.py | 2 ++ gallery/tutorial/relay_quick_start.py | 4 +++- gallery/tutorial/tensor_ir_blitz_course.py | 2 ++ 18 files changed, 51 insertions(+), 13 deletions(-) create mode 100644 gallery/how_to/work_with_pytorch/README.txt diff --git a/docs/conf.py b/docs/conf.py index 08fbedb8ffca..5189143b1250 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -418,6 +418,7 @@ def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func): tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"), tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"), tvm_path.joinpath("gallery", "how_to", "extend_tvm"), + tvm_path.joinpath("gallery", "how_to", "work_with_pytorch"), tvm_path.joinpath("vta", "tutorials"), ] @@ -432,6 +433,7 @@ def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func): "how_to/tune_with_autoscheduler", "how_to/work_with_microtvm", "how_to/extend_tvm", + "how_to/work_with_pytorch", "topic/vta/tutorials", ] diff --git a/gallery/how_to/compile_models/from_keras.py b/gallery/how_to/compile_models/from_keras.py index 1159088773a1..3da674c25086 100644 --- a/gallery/how_to/compile_models/from_keras.py +++ b/gallery/how_to/compile_models/from_keras.py @@ -35,7 +35,9 @@ https://keras.io/#installation """ +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm from tvm import te import tvm.relay as relay diff --git a/gallery/how_to/compile_models/from_mxnet.py b/gallery/how_to/compile_models/from_mxnet.py index 97b9a6b2dc89..0694d2aed081 100644 --- a/gallery/how_to/compile_models/from_mxnet.py +++ b/gallery/how_to/compile_models/from_mxnet.py @@ -33,8 +33,10 @@ https://mxnet.apache.org/versions/master/install/index.html """ -# sphinx_gallery_requires_cuda = True # some standard imports +# sphinx_gallery_start_ignore +# sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import mxnet as mx import tvm import tvm.relay as relay diff --git a/gallery/how_to/compile_models/from_oneflow.py b/gallery/how_to/compile_models/from_oneflow.py index 036c5c07a59f..64f659316bc4 100644 --- a/gallery/how_to/compile_models/from_oneflow.py +++ b/gallery/how_to/compile_models/from_oneflow.py @@ -37,7 +37,9 @@ Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable. """ +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import os, math from matplotlib import pyplot as plt import numpy as np diff --git a/gallery/how_to/deploy_models/deploy_model_on_nano.py b/gallery/how_to/deploy_models/deploy_model_on_nano.py index 9e2f6cdc4c1e..abd0b3fab61d 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_nano.py +++ b/gallery/how_to/deploy_models/deploy_model_on_nano.py @@ -25,7 +25,9 @@ it on Jetson Nano. """ +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm from tvm import te import tvm.relay as relay diff --git a/gallery/how_to/optimize_operators/opt_conv_cuda.py b/gallery/how_to/optimize_operators/opt_conv_cuda.py index bff8cba2be77..1ab38450f5c4 100644 --- a/gallery/how_to/optimize_operators/opt_conv_cuda.py +++ b/gallery/how_to/optimize_operators/opt_conv_cuda.py @@ -30,7 +30,6 @@ """ -# sphinx_gallery_requires_cuda = True ################################################################ # Preparation and Algorithm # ------------------------- @@ -41,6 +40,9 @@ # convolution. The following code defines the convolution algorithm in TVM. # +# sphinx_gallery_start_ignore +# sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import numpy as np import tvm from tvm import te diff --git a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py index 5a2ab716c0e7..b43fac913956 100644 --- a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py +++ b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py @@ -51,7 +51,9 @@ # We use stride size 1 and padding size 1 for the convolution. In the example, we use # NHWCnc memory layout.The following code defines the convolution algorithm in TVM. +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np diff --git a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py index 873bb78859f6..ea03869c8c72 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py @@ -37,7 +37,9 @@ __name__ == "__main__":` block. """ +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import os import numpy as np diff --git a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py index 784749705b05..d7047a0afbcc 100644 --- a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py +++ b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py @@ -48,7 +48,9 @@ # # Now return to python code. Import packages. +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import logging import sys import numpy as np diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py index 6ef71956ef4b..ee0a83ab8eb8 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py @@ -59,7 +59,9 @@ # # Now return to python code. Import packages. +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import os import numpy as np diff --git a/gallery/how_to/work_with_pytorch/README.txt b/gallery/how_to/work_with_pytorch/README.txt new file mode 100644 index 000000000000..ba80e1f58798 --- /dev/null +++ b/gallery/how_to/work_with_pytorch/README.txt @@ -0,0 +1,6 @@ +.. _tutorial-work-with-pytorch: + +Work With PyTorch +================= + +These tutorials demonstrate examples of integrating PyTorch and TVM. diff --git a/gallery/how_to/work_with_pytorch/using_as_torch.py b/gallery/how_to/work_with_pytorch/using_as_torch.py index 46bd2df98076..59c7f88845d9 100644 --- a/gallery/how_to/work_with_pytorch/using_as_torch.py +++ b/gallery/how_to/work_with_pytorch/using_as_torch.py @@ -16,7 +16,7 @@ # under the License. """ Wrap Your TVMScript as PyTorch Module -====================== +===================================== **Author**: `Yaoda Zhou `_ @@ -43,7 +43,7 @@ ###################################################################### # Write your own PyTorch operator by TVMScript -# ------------------------------- +# -------------------------------------------- # PyTorch is a very popular machine learning framework which contains # optimized implementations of most commonly used operators. # Nevertheless, sometimes you might want to write your own operators in PyTorch. @@ -124,7 +124,7 @@ def tvm_depthwise( ###################################################################### # Benchmark -# ------------------------------- +# --------- results = [] for i in range(5): diff --git a/gallery/how_to/work_with_pytorch/using_optimized_torch.py b/gallery/how_to/work_with_pytorch/using_optimized_torch.py index 7a8b3d89843a..0feafad7c3c3 100644 --- a/gallery/how_to/work_with_pytorch/using_optimized_torch.py +++ b/gallery/how_to/work_with_pytorch/using_optimized_torch.py @@ -31,8 +31,10 @@ """ -# sphinx_gallery_requires_cuda = True # Import PyTorch +# sphinx_gallery_start_ignore +# sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import torch import torch.nn as nn import torch.nn.functional as F @@ -47,7 +49,7 @@ ###################################################################### # Define a simple module written by PyTorch -# ------------------------------ +# ----------------------------------------- class SimpleModel(nn.Module): @@ -63,7 +65,7 @@ def forward(self, x): ###################################################################### # Optimize SimpleModel by TVM MetaSchedule -# ------------------------------ +# ---------------------------------------- # We provide the `optimize_torch` function, which has the similar usage as `torch.jit.trace`. # The PyTorch model to optimize, along with its example input, are provided by users. # The PyTorch module will be tuned by TVM for the target hardware. @@ -75,7 +77,7 @@ def forward(self, x): ###################################################################### # Save/Load module -# ------------------------------ +# ---------------- # We can save and load our tuned module like the standard `nn.Module`. # Let us run our tuned module. @@ -98,7 +100,7 @@ def forward(self, x): ###################################################################### # Optimize resnet18 -# ------------------------------ +# ----------------- # In the following, we will show that our approach is able to # accelerate common models, such as resnet18. @@ -117,8 +119,8 @@ def forward(self, x): ###################################################################### -# Compare the performance between two approaches. -# ------------------------------ +# Compare the performance between two approaches +# ---------------------------------------------- results = [] for i in range(5): diff --git a/gallery/how_to/work_with_schedules/reduction.py b/gallery/how_to/work_with_schedules/reduction.py index 4e71731a4e1b..72c8d691a9e0 100644 --- a/gallery/how_to/work_with_schedules/reduction.py +++ b/gallery/how_to/work_with_schedules/reduction.py @@ -28,7 +28,9 @@ from __future__ import absolute_import, print_function +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te diff --git a/gallery/how_to/work_with_schedules/scan.py b/gallery/how_to/work_with_schedules/scan.py index c19e21ff1e72..4c5ce94e0121 100644 --- a/gallery/how_to/work_with_schedules/scan.py +++ b/gallery/how_to/work_with_schedules/scan.py @@ -25,7 +25,9 @@ from __future__ import absolute_import, print_function +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te diff --git a/gallery/tutorial/intro_topi.py b/gallery/tutorial/intro_topi.py index 6647dd190e20..cfebc36b8128 100644 --- a/gallery/tutorial/intro_topi.py +++ b/gallery/tutorial/intro_topi.py @@ -26,7 +26,9 @@ In this tutorial, we will see how TOPI can save us from writing boilerplate code in TVM. """ +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te diff --git a/gallery/tutorial/relay_quick_start.py b/gallery/tutorial/relay_quick_start.py index 830b30232e9b..0cbe35b3e075 100644 --- a/gallery/tutorial/relay_quick_start.py +++ b/gallery/tutorial/relay_quick_start.py @@ -26,7 +26,6 @@ Notice that you need to build TVM with cuda and llvm enabled. """ -# sphinx_gallery_requires_cuda = True ###################################################################### # Overview for Supported Hardware Backend of TVM # ---------------------------------------------- @@ -38,6 +37,9 @@ # In this tutorial, we'll choose cuda and llvm as target backends. # To begin with, let's import Relay and TVM. +# sphinx_gallery_start_ignore +# sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import numpy as np from tvm import relay diff --git a/gallery/tutorial/tensor_ir_blitz_course.py b/gallery/tutorial/tensor_ir_blitz_course.py index a46bd77803d9..346dc6154f9b 100644 --- a/gallery/tutorial/tensor_ir_blitz_course.py +++ b/gallery/tutorial/tensor_ir_blitz_course.py @@ -29,7 +29,9 @@ """ +# sphinx_gallery_start_ignore # sphinx_gallery_requires_cuda = True +# sphinx_gallery_end_ignore import tvm from tvm.ir.module import IRModule from tvm.script import tir as T From 3aa7ad2783329e9e7f5badb1f93ab632eceefff5 Mon Sep 17 00:00:00 2001 From: driazati Date: Wed, 11 Jan 2023 15:09:38 -0800 Subject: [PATCH 3/3] Remove work_with_pytorch from tutorials list, see #13766 --- docs/conf.py | 2 -- gallery/how_to/work_with_pytorch/README.txt | 6 ------ 2 files changed, 8 deletions(-) delete mode 100644 gallery/how_to/work_with_pytorch/README.txt diff --git a/docs/conf.py b/docs/conf.py index 5189143b1250..08fbedb8ffca 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -418,7 +418,6 @@ def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func): tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"), tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"), tvm_path.joinpath("gallery", "how_to", "extend_tvm"), - tvm_path.joinpath("gallery", "how_to", "work_with_pytorch"), tvm_path.joinpath("vta", "tutorials"), ] @@ -433,7 +432,6 @@ def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func): "how_to/tune_with_autoscheduler", "how_to/work_with_microtvm", "how_to/extend_tvm", - "how_to/work_with_pytorch", "topic/vta/tutorials", ] diff --git a/gallery/how_to/work_with_pytorch/README.txt b/gallery/how_to/work_with_pytorch/README.txt deleted file mode 100644 index ba80e1f58798..000000000000 --- a/gallery/how_to/work_with_pytorch/README.txt +++ /dev/null @@ -1,6 +0,0 @@ -.. _tutorial-work-with-pytorch: - -Work With PyTorch -================= - -These tutorials demonstrate examples of integrating PyTorch and TVM.