diff --git a/.github/ISSUE_TEMPLATE/ci-problem.md b/.github/ISSUE_TEMPLATE/ci-problem.md index f46a42f42cf5..5a5977ab5000 100644 --- a/.github/ISSUE_TEMPLATE/ci-problem.md +++ b/.github/ISSUE_TEMPLATE/ci-problem.md @@ -19,4 +19,4 @@ Provide a link to the specific run that has failed. ### Flakiness -Have you seen this multiple times in this branch or in other branches? \ No newline at end of file +Have you seen this multiple times in this branch or in other branches? diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md index a1c1facb7e1b..0ad9424c1b5d 100644 --- a/.github/ISSUE_TEMPLATE/documentation.md +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -19,4 +19,3 @@ Include the title of the document (e.g. "Getting Started with TVM"), and the typ If an RFC/discuss post exists, link it here. Otherwise, specify what actions should be taken to provide additional clarity/readability/reproducibility to the document. Include code snippets from the previous documentation if applicable. - diff --git a/Jenkinsfile b/Jenkinsfile index fa1629205080..dce2f25f14a6 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -230,11 +230,15 @@ stage('Build') { // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit "build/pytest-results/*.xml" } } } - }, + post { + always { + junit "build/pytest-results/*.xml" + } + } + }, 'BUILD: WASM': { node('CPU') { ws(per_exec_ws("tvm/build-wasm")) { @@ -277,10 +281,14 @@ stage('Build') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" - junit "build/pytest-results/*.xml" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } } } @@ -295,10 +303,14 @@ stage('Unit Test') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" - junit "build/pytest-results/*.xml" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } }, 'python3: i386': { node('CPU') { @@ -310,10 +322,14 @@ stage('Unit Test') { sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" - junit "build/pytest-results/*.xml" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } }, 'python3: arm': { node('ARM') { @@ -324,11 +340,15 @@ stage('Unit Test') { sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" - junit "build/pytest-results/*.xml" // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } }, 'java: GPU': { node('GPU') { @@ -353,10 +373,14 @@ stage('Integration Test') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh" - junit "build/pytest-results/*.xml" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } }, 'frontend: GPU': { node('GPU') { @@ -366,10 +390,14 @@ stage('Integration Test') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" - junit "build/pytest-results/*.xml" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } }, 'frontend: CPU': { node('CPU') { @@ -379,10 +407,14 @@ stage('Integration Test') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" - junit "build/pytest-results/*.xml" } } } + post { + always { + junit "build/pytest-results/*.xml" + } + } }, 'docs: GPU': { node('TensorCore') { diff --git a/README.md b/README.md index 09ceb7ab1d07..ee43174705ff 100644 --- a/README.md +++ b/README.md @@ -26,8 +26,11 @@ [![WinMacBuild](https://github.com/apache/tvm/workflows/WinMacBuild/badge.svg)](https://github.com/apache/tvm/actions?query=workflow%3AWinMacBuild) Apache TVM is a compiler stack for deep learning systems. It is designed to close the gap between the -productivity-focused deep learning frameworks, and the performance- and efficiency-focused hardware backends. -TVM works with deep learning frameworks to provide end to end compilation to different backends. +ASDADA +productivity-focused +deep learning frameworks, and the performance- and efficiency-focused hardware backends. +TVM works with +deep learning frameworks to provide end to end compilation to different backends. License ------- diff --git a/apps/microtvm/reference-vm/arduino/base-box/base_box_provision.sh b/apps/microtvm/reference-vm/arduino/base-box/base_box_provision.sh index 11d89f2cd44e..d55100693d5c 100644 --- a/apps/microtvm/reference-vm/arduino/base-box/base_box_provision.sh +++ b/apps/microtvm/reference-vm/arduino/base-box/base_box_provision.sh @@ -16,7 +16,7 @@ # specific language governing permissions and limitations # under the License. # -# Using this script we can reuse docker/install scripts to configure the reference +# Using this script we can reuse docker/install scripts to configure the reference # virtual machine similar to CI QEMU setup. # diff --git a/cmake/modules/Hexagon.cmake b/cmake/modules/Hexagon.cmake index eb3ad1f5ae4a..238e17ba268b 100644 --- a/cmake/modules/Hexagon.cmake +++ b/cmake/modules/Hexagon.cmake @@ -105,4 +105,3 @@ endif() file(GLOB RUNTIME_HEXAGON_SRCS src/runtime/hexagon/*.cc) list(APPEND RUNTIME_SRCS ${RUNTIME_HEXAGON_SRCS} ${RUNTIME_HEXAGON_SIM_SRCS} ${RUNTIME_HEXAGON_DEVICE_SRCS}) - diff --git a/cmake/modules/HexagonSDK.cmake b/cmake/modules/HexagonSDK.cmake index 9541f5be821c..bffbeb1192ee 100644 --- a/cmake/modules/HexagonSDK.cmake +++ b/cmake/modules/HexagonSDK.cmake @@ -120,4 +120,3 @@ function(find_hexagon_sdk_root HEXAGON_SDK_PATH HEXAGON_ARCH) set(FOUND_HEXAGON_SDK_ROOT TRUE) endfunction() - diff --git a/cmake/modules/contrib/EthosU.cmake b/cmake/modules/contrib/EthosU.cmake index 8f3e09b8179b..899f2da1c577 100644 --- a/cmake/modules/contrib/EthosU.cmake +++ b/cmake/modules/contrib/EthosU.cmake @@ -18,4 +18,4 @@ if(USE_ETHOSU) file(GLOB ETHOSU_RELAY_CONTRIB_SRC src/relay/backend/contrib/ethosu/*) list(APPEND COMPILER_SRCS ${ETHOSU_RELAY_CONTRIB_SRC}) -endif(USE_ETHOSU) \ No newline at end of file +endif(USE_ETHOSU) diff --git a/docker/install/ubuntu_install_vitis_ai_packages_ci.sh b/docker/install/ubuntu_install_vitis_ai_packages_ci.sh index 25c214068cd0..f6f270a07a83 100644 --- a/docker/install/ubuntu_install_vitis_ai_packages_ci.sh +++ b/docker/install/ubuntu_install_vitis_ai_packages_ci.sh @@ -6,9 +6,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/docs/langref/relay_pattern.rst b/docs/langref/relay_pattern.rst index 4682e5aa5b33..16211b2cb125 100644 --- a/docs/langref/relay_pattern.rst +++ b/docs/langref/relay_pattern.rst @@ -89,7 +89,7 @@ Or a convolution with a specific kernel size: x = relay.var('x') y = relay.var('y') assert is_conv2d.match(relay.op.nn.conv2d(x, y, kernel_size=[3, 3])) - + Matching an Optional Op diff --git a/docs/t.txt b/docs/t.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/python/relay/aot/corstone300.mk b/tests/python/relay/aot/corstone300.mk index 3a946f2cd876..2f07a3d1cb96 100644 --- a/tests/python/relay/aot/corstone300.mk +++ b/tests/python/relay/aot/corstone300.mk @@ -115,4 +115,4 @@ run: $(build_dir)/aot_test_runner .DEFAULT: aot_test_runner -.PHONY: run \ No newline at end of file +.PHONY: run diff --git a/tests/scripts/task_config_build_i386.sh b/tests/scripts/task_config_build_i386.sh index 298259682972..86ebb0c4f19f 100755 --- a/tests/scripts/task_config_build_i386.sh +++ b/tests/scripts/task_config_build_i386.sh @@ -35,4 +35,3 @@ echo set\(USE_VTA_FSIM ON\) >> config.cmake echo set\(USE_VTA_TSIM ON\) >> config.cmake echo set\(USE_VERILATOR ON\) >> config.cmake echo set\(USE_CCACHE OFF\) >> config.cmake - diff --git a/tutorials/auto_scheduler/tune_network_cuda.py b/tutorials/auto_scheduler/tune_network_cuda.py index 08c15264e3c1..fb555235ceae 100644 --- a/tutorials/auto_scheduler/tune_network_cuda.py +++ b/tutorials/auto_scheduler/tune_network_cuda.py @@ -23,7 +23,7 @@ best performance. This is a tutorial on how to tune a whole neural network for NVIDIA GPU with the auto-scheduler. -To auto-tune a neural network, we partition the network into small subgraphs and +To auto-tune a neural network, we partition the network into small subgraphs and tune them independently. Each subgraph is treated as one search task. A task scheduler slices the time and dynamically allocates time resources to these tasks. The task scheduler predicts the impact of each task on the end-to-end diff --git a/tutorials/auto_scheduler/tune_network_x86.py b/tutorials/auto_scheduler/tune_network_x86.py index 6cb8d6f14cb9..665e20e003b3 100644 --- a/tutorials/auto_scheduler/tune_network_x86.py +++ b/tutorials/auto_scheduler/tune_network_x86.py @@ -24,7 +24,7 @@ best performance. This is a tutorial on how to tune a whole neural network for x86 CPU with the auto-scheduler. -To auto-tune a neural network, we partition the network into small subgraphs and +To auto-tune a neural network, we partition the network into small subgraphs and tune them independently. Each subgraph is treated as one search task. A task scheduler slices the time and dynamically allocates time resources to these tasks. The task scheduler predicts the impact of each task on the end-to-end diff --git a/tutorials/frontend/deploy_sparse.py b/tutorials/frontend/deploy_sparse.py index 768a697f45cf..05926c847730 100644 --- a/tutorials/frontend/deploy_sparse.py +++ b/tutorials/frontend/deploy_sparse.py @@ -36,7 +36,7 @@ Pruning is a technique primarily used to reduce the parameter size of a model by replacing weight values with 0s. Although many methods exist for choosing which -weights should be set to 0, the most straight forward is by picking the +weights should be set to 0, the most straight forward is by picking the weights with the smallest value. Typically, weights are pruned to a desired sparsity percentage. For example, a 95% sparse model would have only 5% of its weights non-zero. Pruning to very high sparsities often requires @@ -50,8 +50,8 @@ value and location. The benefit of bunching up pruned weights is that it allows an algorithm such as matrix multiplication to skip entire blocks. It turns out that some degree of *block sparsity* is very important to realizing significant -speedups on most hardware available today. -This is because when loading memory in most CPUs or GPUs, +speedups on most hardware available today. +This is because when loading memory in most CPUs or GPUs, it doesn't save any work to skip reading a single value at a time, instead an entire chunk or tile is read in and executed using something like vectorized instructions.