From edf8029ca79ee0b4fc90d518291dcb62ad7d1d16 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Thu, 16 Sep 2021 13:52:43 +0300 Subject: [PATCH 01/23] Add script to look for changed in doc dir --- tests/scripts/git_check_tree.sh | 39 +++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100755 tests/scripts/git_check_tree.sh diff --git a/tests/scripts/git_check_tree.sh b/tests/scripts/git_check_tree.sh new file mode 100755 index 000000000000..9def40d7b096 --- /dev/null +++ b/tests/scripts/git_check_tree.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +DOCS_DIR=0 +OTHER_DIR=0 +DOC_DIR="\docs" + +changed_files=`git diff --no-commit-id --name-only -r origin/main` + +for file in $changed_files; do + if grep -q "$DOC_DIR" <<< "$file"; then + DOCS_DIR=1 + else + OTHER_DIR=1 + fi +done + +if [[ ($DOCS_DIR -eq !$OTHER_DIR) || ($OTHER_DIR -eq 1) ]]; then + exit 1 +else + exit 0 +fi + From 046218f299e29f568a70c2374d467bd1597baa13 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Thu, 16 Sep 2021 14:01:15 +0300 Subject: [PATCH 02/23] Modify Jenkinsfile --- Jenkinsfile | 153 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 100 insertions(+), 53 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index fa1629205080..93edc8ba626f 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -148,11 +148,16 @@ stage('Prepare') { } } + stage("Sanity Check") { timeout(time: max_time, unit: 'MINUTES') { node('CPU') { ws(per_exec_ws("tvm/sanity")) { init_git() + def docs = sh (returnStatus: true, script: ''' + ./git_changed_status.sh + ''' + ) sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" } } @@ -215,23 +220,25 @@ stage('Build') { } }, 'BUILD: CPU': { - node('CPU') { - ws(per_exec_ws("tvm/build-cpu")) { - init_git() - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" - make(ci_cpu, 'build', '-j2') - pack_lib('cpu', tvm_multilib_tsim) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" - // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" - // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit "build/pytest-results/*.xml" - } + if (docs == 1 ) { + node('CPU') { + ws(per_exec_ws("tvm/build-cpu")) { + init_git() + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" + make(ci_cpu, 'build', '-j2') + pack_lib('cpu', tvm_multilib_tsim) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" + // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" + junit "build/pytest-results/*.xml" + } + } } } }, @@ -239,11 +246,13 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-wasm")) { init_git() - sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" - make(ci_wasm, 'build', '-j2') - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh" + if (docs == 0 ) { + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" + make(ci_wasm, 'build', '-j2') + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh" + } } } } @@ -252,9 +261,15 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-i386")) { init_git() - sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" - make(ci_i386, 'build', '-j2') - pack_lib('i386', tvm_multilib_tsim) + def docs = sh (returnStatus: true, script: ''' + git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ + ''' + ) + if (docs == 0 ) { + sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" + make(ci_i386, 'build', '-j2') + pack_lib('i386', tvm_multilib_tsim) + } } } }, @@ -262,9 +277,11 @@ stage('Build') { node('ARM') { ws(per_exec_ws("tvm/build-arm")) { init_git() - sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" - make(ci_arm, 'build', '-j4') - pack_lib('arm', tvm_multilib) + if (docs == 0 ) { + sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" + make(ci_arm, 'build', '-j4') + pack_lib('arm', tvm_multilib) + } } } }, @@ -272,12 +289,18 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-qemu")) { init_git() - sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" - make(ci_qemu, 'build', '-j2') - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" - junit "build/pytest-results/*.xml" + def docs = sh (returnStatus: true, script: ''' + git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ + ''' + ) + if (docs == 0 ) { + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" + make(ci_qemu, 'build', '-j2') + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" + junit "build/pytest-results/*.xml" + } } } } @@ -289,13 +312,19 @@ stage('Unit Test') { node('TensorCore') { ws(per_exec_ws("tvm/ut-python-gpu")) { init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" - junit "build/pytest-results/*.xml" + def docs = sh (returnStatus: true, script: ''' + git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ + ''' + ) + if (docs == 0 ) { + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" + junit "build/pytest-results/*.xml" + } } } } @@ -304,13 +333,19 @@ stage('Unit Test') { node('CPU') { ws(per_exec_ws("tvm/ut-python-i386")) { init_git() - unpack_lib('i386', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" - junit "build/pytest-results/*.xml" + def docs = sh (returnStatus: true, script: ''' + git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ + ''' + ) + if (docs == 0 ) { + unpack_lib('i386', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" + junit "build/pytest-results/*.xml" + } } } } @@ -319,13 +354,19 @@ stage('Unit Test') { node('ARM') { ws(per_exec_ws("tvm/ut-python-arm")) { init_git() + def docs = sh (returnStatus: true, script: ''' + git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ + ''' + ) + if (docs == 0 ) { unpack_lib('arm', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" junit "build/pytest-results/*.xml" - // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" + // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" + } } } } @@ -334,10 +375,16 @@ stage('Unit Test') { node('GPU') { ws(per_exec_ws("tvm/ut-java")) { init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" + def docs = sh (returnStatus: true, script: ''' + git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ + ''' + ) + if (docs == 0 ) { + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" + } } } } From 60b1d02ba11f7496fbb9a2a70eee3af9d45adf1e Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Thu, 16 Sep 2021 14:04:56 +0300 Subject: [PATCH 03/23] Minor changes in scripts --- Jenkinsfile | 205 +++++++++++++------------------- tests/scripts/git_check_tree.sh | 1 - 2 files changed, 84 insertions(+), 122 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 93edc8ba626f..8d9cf8886d24 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -153,12 +153,12 @@ stage("Sanity Check") { timeout(time: max_time, unit: 'MINUTES') { node('CPU') { ws(per_exec_ws("tvm/sanity")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - ./git_changed_status.sh + init_git() + docs = sh (returnStatus: true, script: ''' + ./tests/scripts/git_check_tree.sh ''' - ) - sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" + ) + // sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" } } } @@ -206,47 +206,46 @@ def unpack_lib(name, libs) { } stage('Build') { - parallel 'BUILD: GPU': { - node('GPUBUILD') { - ws(per_exec_ws("tvm/build-gpu")) { - init_git() - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" - make(ci_gpu, 'build', '-j2') - pack_lib('gpu', tvm_multilib) - // compiler test - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh" - make(ci_gpu, 'build2', '-j2') + parallel 'BUILD: GPU': { + if( docs == 0) { + node('GPUBUILD') { + ws(per_exec_ws("tvm/build-gpu")) { + init_git() + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" + make(ci_gpu, 'build', '-j2') + pack_lib('gpu', tvm_multilib) + // compiler test + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh" + make(ci_gpu, 'build2', '-j2') + } } } }, 'BUILD: CPU': { - if (docs == 1 ) { - node('CPU') { - ws(per_exec_ws("tvm/build-cpu")) { - init_git() - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" - make(ci_cpu, 'build', '-j2') - pack_lib('cpu', tvm_multilib_tsim) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" - // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" - // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit "build/pytest-results/*.xml" - } - } + node('CPU') { + ws(per_exec_ws("tvm/build-cpu")) { + init_git() + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" + make(ci_cpu, 'build', '-j2') + pack_lib('cpu', tvm_multilib_tsim) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" + // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" + junit "build/pytest-results/*.xml" + } } } - }, + }, 'BUILD: WASM': { - node('CPU') { - ws(per_exec_ws("tvm/build-wasm")) { - init_git() - if (docs == 0 ) { + node('CPU') { + ws(per_exec_ws("tvm/build-wasm")) { + init_git() sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" make(ci_wasm, 'build', '-j2') timeout(time: max_time, unit: 'MINUTES') { @@ -255,45 +254,31 @@ stage('Build') { } } } - } }, 'BUILD : i386': { - node('CPU') { - ws(per_exec_ws("tvm/build-i386")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ - ''' - ) - if (docs == 0 ) { + node('CPU') { + ws(per_exec_ws("tvm/build-i386")) { + init_git() sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" make(ci_i386, 'build', '-j2') pack_lib('i386', tvm_multilib_tsim) } } - } }, 'BUILD : arm': { - node('ARM') { - ws(per_exec_ws("tvm/build-arm")) { - init_git() - if (docs == 0 ) { + node('ARM') { + ws(per_exec_ws("tvm/build-arm")) { + init_git() sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" make(ci_arm, 'build', '-j4') pack_lib('arm', tvm_multilib) } } - } }, 'BUILD: QEMU': { - node('CPU') { - ws(per_exec_ws("tvm/build-qemu")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ - ''' - ) - if (docs == 0 ) { + node('CPU') { + ws(per_exec_ws("tvm/build-qemu")) { + init_git() sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" make(ci_qemu, 'build', '-j2') timeout(time: max_time, unit: 'MINUTES') { @@ -302,21 +287,18 @@ stage('Build') { junit "build/pytest-results/*.xml" } } - } - } - } + } + } } stage('Unit Test') { - parallel 'python3: GPU': { - node('TensorCore') { - ws(per_exec_ws("tvm/ut-python-gpu")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ - ''' - ) - if (docs == 0 ) { + when { + expression { docs == 1 } + } + parallel 'python3: GPU': { + node('TensorCore') { + ws(per_exec_ws("tvm/ut-python-gpu")) { + init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" @@ -327,17 +309,11 @@ stage('Unit Test') { } } } - } - }, - 'python3: i386': { - node('CPU') { - ws(per_exec_ws("tvm/ut-python-i386")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ - ''' - ) - if (docs == 0 ) { + }, + 'python3: i386': { + node('CPU') { + ws(per_exec_ws("tvm/ut-python-i386")) { + init_git() unpack_lib('i386', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" @@ -348,47 +324,34 @@ stage('Unit Test') { } } } - } - }, - 'python3: arm': { - node('ARM') { - ws(per_exec_ws("tvm/ut-python-arm")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ - ''' - ) - if (docs == 0 ) { - unpack_lib('arm', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" - junit "build/pytest-results/*.xml" - // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" - } + }, + 'python3: arm': { + node('ARM') { + ws(per_exec_ws("tvm/ut-python-arm")) { + init_git() + unpack_lib('arm', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" + junit "build/pytest-results/*.xml" + // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" + } } } - } - }, - 'java: GPU': { - node('GPU') { - ws(per_exec_ws("tvm/ut-java")) { - init_git() - def docs = sh (returnStatus: true, script: ''' - git diff-tree origin/main --no-commit-id --name-only -r HEAD | grep -v -q docs/ - ''' - ) - if (docs == 0 ) { - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" - } + }, + 'java: GPU': { + node('GPU') { + ws(per_exec_ws("tvm/ut-java")) { + init_git() + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" + } } } } - } } stage('Integration Test') { diff --git a/tests/scripts/git_check_tree.sh b/tests/scripts/git_check_tree.sh index 9def40d7b096..df0b529c6d22 100755 --- a/tests/scripts/git_check_tree.sh +++ b/tests/scripts/git_check_tree.sh @@ -36,4 +36,3 @@ if [[ ($DOCS_DIR -eq !$OTHER_DIR) || ($OTHER_DIR -eq 1) ]]; then else exit 0 fi - From 2441390184cc19c424f3bdbe1c713711a20c0898 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Thu, 16 Sep 2021 15:29:42 +0300 Subject: [PATCH 04/23] Working Jenkinsfile on selective stages on docs --- Jenkinsfile | 91 +++++++++++++++++++-------------- tests/scripts/git_check_tree.sh | 2 +- 2 files changed, 53 insertions(+), 40 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 8d9cf8886d24..e02ea34595d5 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -158,7 +158,7 @@ stage("Sanity Check") { ./tests/scripts/git_check_tree.sh ''' ) - // sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" + sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" } } } @@ -207,7 +207,6 @@ def unpack_lib(name, libs) { stage('Build') { parallel 'BUILD: GPU': { - if( docs == 0) { node('GPUBUILD') { ws(per_exec_ws("tvm/build-gpu")) { init_git() @@ -219,30 +218,32 @@ stage('Build') { make(ci_gpu, 'build2', '-j2') } } - } }, 'BUILD: CPU': { - node('CPU') { - ws(per_exec_ws("tvm/build-cpu")) { - init_git() - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" - make(ci_cpu, 'build', '-j2') - pack_lib('cpu', tvm_multilib_tsim) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" - // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" - // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit "build/pytest-results/*.xml" + if( docs == 1) { + node('CPU') { + ws(per_exec_ws("tvm/build-cpu")) { + init_git() + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" + make(ci_cpu, 'build', '-j2') + pack_lib('cpu', tvm_multilib_tsim) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" + // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" + junit "build/pytest-results/*.xml" + } } } } }, 'BUILD: WASM': { + if( docs == 1) { node('CPU') { ws(per_exec_ws("tvm/build-wasm")) { init_git() @@ -254,8 +255,10 @@ stage('Build') { } } } + } }, 'BUILD : i386': { + if( docs == 1) { node('CPU') { ws(per_exec_ws("tvm/build-i386")) { init_git() @@ -264,8 +267,10 @@ stage('Build') { pack_lib('i386', tvm_multilib_tsim) } } + } }, 'BUILD : arm': { + if( docs == 1) { node('ARM') { ws(per_exec_ws("tvm/build-arm")) { init_git() @@ -274,8 +279,10 @@ stage('Build') { pack_lib('arm', tvm_multilib) } } + } }, 'BUILD: QEMU': { + if( docs == 1) { node('CPU') { ws(per_exec_ws("tvm/build-qemu")) { init_git() @@ -287,14 +294,13 @@ stage('Build') { junit "build/pytest-results/*.xml" } } - } + } } + } } stage('Unit Test') { - when { - expression { docs == 1 } - } + if( docs == 1) { parallel 'python3: GPU': { node('TensorCore') { ws(per_exec_ws("tvm/ut-python-gpu")) { @@ -352,10 +358,12 @@ stage('Unit Test') { } } } + } } stage('Integration Test') { parallel 'topi: GPU': { + if( docs == 1) { node('GPU') { ws(per_exec_ws("tvm/topi-python-gpu")) { init_git() @@ -367,29 +375,34 @@ stage('Integration Test') { } } } + } }, 'frontend: GPU': { - node('GPU') { - ws(per_exec_ws("tvm/frontend-python-gpu")) { - init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" - junit "build/pytest-results/*.xml" + if( docs == 1) { + node('GPU') { + ws(per_exec_ws("tvm/frontend-python-gpu")) { + init_git() + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" + junit "build/pytest-results/*.xml" + } } } } }, 'frontend: CPU': { - node('CPU') { - ws(per_exec_ws("tvm/frontend-python-cpu")) { - init_git() - unpack_lib('cpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" - junit "build/pytest-results/*.xml" + if( docs == 1) { + node('CPU') { + ws(per_exec_ws("tvm/frontend-python-cpu")) { + init_git() + unpack_lib('cpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" + junit "build/pytest-results/*.xml" + } } } } diff --git a/tests/scripts/git_check_tree.sh b/tests/scripts/git_check_tree.sh index df0b529c6d22..1c82aab354b4 100755 --- a/tests/scripts/git_check_tree.sh +++ b/tests/scripts/git_check_tree.sh @@ -31,7 +31,7 @@ for file in $changed_files; do fi done -if [[ ($DOCS_DIR -eq !$OTHER_DIR) || ($OTHER_DIR -eq 1) ]]; then +if [[ ($DOCS_DIR -eq !$OTHER_DIR) || ($OTHER_DIR -eq 1) ]]; then exit 1 else exit 0 From 594addc597f8b8b9dcc3329277043a4561504abb Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Thu, 16 Sep 2021 16:06:12 +0300 Subject: [PATCH 05/23] Pass groovy formater on Jenkinsfile --- Jenkinsfile | 243 ++++++++++++++++++-------------- tests/scripts/git_check_tree.sh | 6 +- 2 files changed, 138 insertions(+), 111 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e02ea34595d5..590066af6026 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,15 +42,16 @@ // Hashtag in the source to build current CI docker builds // // +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils // NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> -ci_lint = "tlcpack/ci-lint:v0.67" -ci_gpu = "tlcpack/ci-gpu:v0.77" -ci_cpu = "tlcpack/ci-cpu:v0.77" -ci_wasm = "tlcpack/ci-wasm:v0.71" -ci_i386 = "tlcpack/ci-i386:v0.73" -ci_qemu = "tlcpack/ci-qemu:v0.08" -ci_arm = "tlcpack/ci-arm:v0.06" +ci_lint = 'tlcpack/ci-lint:v0.67' +ci_gpu = 'tlcpack/ci-gpu:v0.77' +ci_cpu = 'tlcpack/ci-cpu:v0.77' +ci_wasm = 'tlcpack/ci-wasm:v0.71' +ci_i386 = 'tlcpack/ci-i386:v0.73' +ci_qemu = 'tlcpack/ci-qemu:v0.08' +ci_arm = 'tlcpack/ci-arm:v0.06' // <--- End of regex-scanned config. // Parameters to allow overriding (in Jenkins UI), the images @@ -58,25 +59,25 @@ ci_arm = "tlcpack/ci-arm:v0.06" // over default values above. properties([ parameters([ - string(name: 'ci_lint_param', defaultValue: ""), - string(name: 'ci_cpu_param', defaultValue: ""), - string(name: 'ci_gpu_param', defaultValue: ""), - string(name: 'ci_wasm_param', defaultValue: ""), - string(name: 'ci_i386_param', defaultValue: ""), - string(name: 'ci_qemu_param', defaultValue: ""), - string(name: 'ci_arm_param', defaultValue: "") + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_qemu_param', defaultValue: ''), + string(name: 'ci_arm_param', defaultValue: '') ]) ]) // tvm libraries -tvm_runtime = "build/libtvm_runtime.so, build/config.cmake" -tvm_lib = "build/libtvm.so, " + tvm_runtime +tvm_runtime = 'build/libtvm_runtime.so, build/config.cmake' +tvm_lib = 'build/libtvm.so, ' + tvm_runtime // LLVM upstream lib -tvm_multilib = "build/libtvm.so, " + - "build/libvta_fsim.so, " + +tvm_multilib = 'build/libtvm.so, ' + + 'build/libvta_fsim.so, ' + tvm_runtime -tvm_multilib_tsim = "build/libvta_tsim.so, " + +tvm_multilib_tsim = 'build/libvta_tsim.so, ' + tvm_multilib // command to start a docker container @@ -103,23 +104,23 @@ def init_git() { } def init_git_win() { - checkout scm - retry(5) { + checkout scm + retry(5) { timeout(time: 2, unit: 'MINUTES') { - bat 'git submodule update --init -f' + bat 'git submodule update --init -f' } - } + } } def cancel_previous_build() { - // cancel previous build if it is not on main. - if (env.BRANCH_NAME != "main") { - def buildNumber = env.BUILD_NUMBER as int - // Milestone API allows us to cancel previous build - // with the same milestone number - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) - } + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } } cancel_previous_build() @@ -148,17 +149,16 @@ stage('Prepare') { } } - -stage("Sanity Check") { +stage('Sanity Check') { timeout(time: max_time, unit: 'MINUTES') { node('CPU') { - ws(per_exec_ws("tvm/sanity")) { - init_git() - docs = sh (returnStatus: true, script: ''' + ws(per_exec_ws('tvm/sanity')) { + init_git() + docs = sh (returnStatus: true, script: ''' ./tests/scripts/git_check_tree.sh ''' ) - sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" + // sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" } } } @@ -195,7 +195,6 @@ def pack_lib(name, libs) { stash includes: libs, name: name } - // unpack libraries saved before def unpack_lib(name, libs) { unstash name @@ -208,7 +207,7 @@ def unpack_lib(name, libs) { stage('Build') { parallel 'BUILD: GPU': { node('GPUBUILD') { - ws(per_exec_ws("tvm/build-gpu")) { + ws(per_exec_ws('tvm/build-gpu')) { init_git() sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" make(ci_gpu, 'build', '-j2') @@ -220,9 +219,9 @@ stage('Build') { } }, 'BUILD: CPU': { - if( docs == 1) { + if (docs == 1) { node('CPU') { - ws(per_exec_ws("tvm/build-cpu")) { + ws(per_exec_ws('tvm/build-cpu')) { init_git() sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" make(ci_cpu, 'build', '-j2') @@ -236,16 +235,18 @@ stage('Build') { // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit "build/pytest-results/*.xml" + junit 'build/pytest-results/*.xml' } } } + } else { + Utils.markStageSkippedForConditional('BUILD: CPU') } }, 'BUILD: WASM': { - if( docs == 1) { + if (docs == 1) { node('CPU') { - ws(per_exec_ws("tvm/build-wasm")) { + ws(per_exec_ws('tvm/build-wasm')) { init_git() sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" make(ci_wasm, 'build', '-j2') @@ -255,161 +256,185 @@ stage('Build') { } } } + } else { + Utils.markStageSkippedForConditional('BUILD: WASM') } }, 'BUILD : i386': { - if( docs == 1) { + if ( docs == 1) { node('CPU') { - ws(per_exec_ws("tvm/build-i386")) { + ws(per_exec_ws('tvm/build-i386')) { init_git() sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" make(ci_i386, 'build', '-j2') pack_lib('i386', tvm_multilib_tsim) } } + } else { + Utils.markStageSkippedForConditional('BUILD : i386') } }, 'BUILD : arm': { - if( docs == 1) { + if (docs == 1) { node('ARM') { - ws(per_exec_ws("tvm/build-arm")) { + ws(per_exec_ws('tvm/build-arm')) { init_git() sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" make(ci_arm, 'build', '-j4') pack_lib('arm', tvm_multilib) } } + } else { + Utils.markStageSkippedForConditional('BUILD : arm') } }, 'BUILD: QEMU': { - if( docs == 1) { + if (docs == 1) { node('CPU') { - ws(per_exec_ws("tvm/build-qemu")) { + ws(per_exec_ws('tvm/build-qemu')) { init_git() sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" make(ci_qemu, 'build', '-j2') timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" - junit "build/pytest-results/*.xml" + junit 'build/pytest-results/*.xml' } } + } + } else { + Utils.markStageSkippedForConditional('BUILD: QEMU') } - } } } stage('Unit Test') { - if( docs == 1) { parallel 'python3: GPU': { - node('TensorCore') { - ws(per_exec_ws("tvm/ut-python-gpu")) { - init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" - junit "build/pytest-results/*.xml" + node('TensorCore') { + ws(per_exec_ws('tvm/ut-python-gpu')) { + init_git() + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" + junit 'build/pytest-results/*.xml' + } } } - } }, 'python3: i386': { - node('CPU') { - ws(per_exec_ws("tvm/ut-python-i386")) { - init_git() - unpack_lib('i386', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" - junit "build/pytest-results/*.xml" + if (docs == 1) { + node('CPU') { + ws(per_exec_ws('tvm/ut-python-i386')) { + init_git() + unpack_lib('i386', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" + junit 'build/pytest-results/*.xml' + } } } + } else { + Utils.markStageSkippedForConditional('python3: i386') } }, 'python3: arm': { - node('ARM') { - ws(per_exec_ws("tvm/ut-python-arm")) { - init_git() - unpack_lib('arm', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" - junit "build/pytest-results/*.xml" - // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" + if (docs == 1) { + node('ARM') { + ws(per_exec_ws('tvm/ut-python-arm')) { + init_git() + unpack_lib('arm', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" + junit 'build/pytest-results/*.xml' + // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" + } } } + } else { + Utils.markStageSkippedForConditional('python3: arm') } }, 'java: GPU': { - node('GPU') { - ws(per_exec_ws("tvm/ut-java")) { - init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" + if (docs == 1) { + node('GPU') { + ws(per_exec_ws('tvm/ut-java')) { + init_git() + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" + } } } + } else { + Utils.markStageSkippedForConditional('java: GPU') } } - } } stage('Integration Test') { parallel 'topi: GPU': { - if( docs == 1) { + if (docs == 1) { node('GPU') { - ws(per_exec_ws("tvm/topi-python-gpu")) { + ws(per_exec_ws('tvm/topi-python-gpu')) { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh" - junit "build/pytest-results/*.xml" + junit 'build/pytest-results/*.xml' } } } + } else { + Utils.markStageSkippedForConditional('topi: GPU') } }, 'frontend: GPU': { - if( docs == 1) { + if (docs == 1) { node('GPU') { - ws(per_exec_ws("tvm/frontend-python-gpu")) { + ws(per_exec_ws('tvm/frontend-python-gpu')) { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" - junit "build/pytest-results/*.xml" + junit 'build/pytest-results/*.xml' } } } + } else { + Utils.markStageSkippedForConditional('frontend: GPU') } }, 'frontend: CPU': { - if( docs == 1) { + if (docs == 1) { node('CPU') { - ws(per_exec_ws("tvm/frontend-python-cpu")) { + ws(per_exec_ws('tvm/frontend-python-cpu')) { init_git() unpack_lib('cpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" - junit "build/pytest-results/*.xml" + junit 'build/pytest-results/*.xml' } } } + } else { + Utils.markStageSkippedForConditional('frontend: CPU') } }, 'docs: GPU': { node('TensorCore') { - ws(per_exec_ws("tvm/docs-python-gpu")) { + ws(per_exec_ws('tvm/docs-python-gpu')) { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { @@ -435,18 +460,18 @@ stage('Build packages') { sh "${docker_run} tlcpack/conda-cuda100 ./conda/build_cuda.sh } } - // Here we could upload the packages to anaconda for releases - // and/or the main branch +// Here we could upload the packages to anaconda for releases +// and/or the main branch } */ stage('Deploy') { node('doc') { - ws(per_exec_ws("tvm/deploy-docs")) { - if (env.BRANCH_NAME == "main") { - unpack_lib('mydocs', 'docs.tgz') - sh "cp docs.tgz /var/docs/docs.tgz" - sh "tar xf docs.tgz -C /var/docs" + ws(per_exec_ws('tvm/deploy-docs')) { + if (env.BRANCH_NAME == 'main') { + unpack_lib('mydocs', 'docs.tgz') + sh 'cp docs.tgz /var/docs/docs.tgz' + sh 'tar xf docs.tgz -C /var/docs' } } } diff --git a/tests/scripts/git_check_tree.sh b/tests/scripts/git_check_tree.sh index 1c82aab354b4..121287b69419 100755 --- a/tests/scripts/git_check_tree.sh +++ b/tests/scripts/git_check_tree.sh @@ -21,18 +21,20 @@ DOCS_DIR=0 OTHER_DIR=0 DOC_DIR="\docs" -changed_files=`git diff --no-commit-id --name-only -r origin/main` +changed_files=`git diff --no-commit-id --name-only -r HEAD~1` for file in $changed_files; do if grep -q "$DOC_DIR" <<< "$file"; then DOCS_DIR=1 else OTHER_DIR=1 + break fi done -if [[ ($DOCS_DIR -eq !$OTHER_DIR) || ($OTHER_DIR -eq 1) ]]; then +if [[ ($OTHER_DIR -eq 1) ]]; then exit 1 else exit 0 fi + From e72fcf9191b923b806bdfd22510cc52312ebd20f Mon Sep 17 00:00:00 2001 From: Christopher Sidebottom Date: Thu, 16 Sep 2021 16:23:39 +0100 Subject: [PATCH 06/23] Implementation of relay_to_tir target hook (#8423) This the first new hook proposed in the Additional Target Hooks RFC, longer term the compilation should move to using `Target` proper but this unblocks our current work whilst illustrating the eventual interface via `Target` in `src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc` Ideally the host target would be annotated onto the `IRModule` so as this `Pass` could use it instead of defaulting to C but this is fine for now. --- CMakeLists.txt | 1 + .../modules/contrib/ExampleTargetHooks.cmake | 19 +++ include/tvm/relay/transform.h | 7 + .../example_target_hooks/relay_to_tir.cc | 131 ++++++++++++++++++ src/relay/backend/te_compiler.cc | 35 +++-- src/relay/transforms/target_hooks.cc | 86 ++++++++++++ tests/python/relay/test_target_hooks.py | 53 +++++++ 7 files changed, 323 insertions(+), 9 deletions(-) create mode 100644 cmake/modules/contrib/ExampleTargetHooks.cmake create mode 100644 src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc create mode 100644 src/relay/transforms/target_hooks.cc create mode 100644 tests/python/relay/test_target_hooks.py diff --git a/CMakeLists.txt b/CMakeLists.txt index cf92359475b3..bf0a1c61a341 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -416,6 +416,7 @@ include(cmake/modules/contrib/EthosU.cmake) include(cmake/modules/contrib/BLAS.cmake) include(cmake/modules/contrib/CODEGENC.cmake) include(cmake/modules/contrib/DNNL.cmake) +include(cmake/modules/contrib/ExampleTargetHooks.cmake) include(cmake/modules/contrib/Random.cmake) include(cmake/modules/contrib/Posit.cmake) include(cmake/modules/contrib/MicroStandaloneRuntime.cmake) diff --git a/cmake/modules/contrib/ExampleTargetHooks.cmake b/cmake/modules/contrib/ExampleTargetHooks.cmake new file mode 100644 index 000000000000..eb53dda133d2 --- /dev/null +++ b/cmake/modules/contrib/ExampleTargetHooks.cmake @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +file(GLOB EXAMPLE_TARGET_HOOKS_SRC src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc) +list(APPEND COMPILER_SRCS ${EXAMPLE_TARGET_HOOKS_SRC}) diff --git a/include/tvm/relay/transform.h b/include/tvm/relay/transform.h index bdc46d71a77d..912879dc8a4b 100644 --- a/include/tvm/relay/transform.h +++ b/include/tvm/relay/transform.h @@ -426,6 +426,13 @@ TVM_DLL Pass RemoveUnusedFunctions(Array entry_functions); */ TVM_DLL Pass SimplifyExpr(); +/*! + * \brief Run any registered RelayToTIR passes registered on the functions in a module. + * + * \return The pass. + */ +TVM_DLL Pass RelayToTIRTargetHook(); + /*! * \brief A pass for manifesting explicit memory allocations and rewriting * specific dialects. diff --git a/src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc b/src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc new file mode 100644 index 000000000000..6d332803041d --- /dev/null +++ b/src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc @@ -0,0 +1,131 @@ + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include +#include +#include +#include +#include +#include +#include + +namespace tvm { +namespace relay { +namespace contrib { +namespace example_target_hooks { + +class ConvertAddToSubtract : public MixedModeMutator { + public: + explicit ConvertAddToSubtract(IRModule ir_module, Target host_target) + : ir_module_(ir_module), host_target_(host_target) {} + + IRModule Mutate() { + GlobalVar main_global_var = ir_module_->GetGlobalVar("main"); + BaseFunc main = ir_module_->Lookup(main_global_var); + Function main_func = GetRef(main.as()); + + // Copy everything across and mutate the body + Function mutated_main = + Function(main_func->params, VisitExpr(main_func->body), main_func->ret_type, + main_func->type_params, main_func->attrs, main_func->span); + + ir_module_->Update(main_global_var, mutated_main); + + return ir_module_; + } + + private: + tir::Load LoadIndex(const tir::Buffer& buffer, const PrimExpr& index) { + return tir::Load(DataType::Float(32), buffer->data, index, tir::const_true()); + } + + void ReplaceAddWithSubtractPrimFunc(const GlobalVar& new_global_var, const Function& func) { + tir::Buffer x_buffer = tir::decl_buffer({8}, DataType::Float(32), "x"); + tir::Buffer y_buffer = tir::decl_buffer({8}, DataType::Float(32), "y"); + tir::Buffer out_buffer = tir::decl_buffer({8}, DataType::Float(32)); + + tir::Var x_var("x", DataType::Handle()); + tir::Var y_var("y", DataType::Handle()); + tir::Var out_var("out", DataType::Handle()); + + Map dict_attrs; + dict_attrs.Set("global_symbol", new_global_var->name_hint); + dict_attrs.Set("tir.noalias", Bool(true)); + + te::Var index("index", DataType::Int(32)); + tir::Sub indexed_sub = tir::Sub(LoadIndex(x_buffer, index), LoadIndex(y_buffer, index)); + tir::Stmt math_body = tir::Store(out_buffer->data, indexed_sub, index, tir::const_true()); + tir::Stmt math_loop = tir::For(index, 0, 8, tir::ForKind::kSerial, math_body); + + Map buffer_map = { + {x_var, x_buffer}, + {y_var, y_buffer}, + {out_var, out_buffer}, + }; + + tir::PrimFunc replacement_func = tir::PrimFunc({x_var, y_var, out_var}, math_loop, VoidType(), + buffer_map, DictAttrs(dict_attrs)); + replacement_func = WithAttr(replacement_func, ::tvm::attr::kTarget, host_target_); + ir_module_->Add(new_global_var, replacement_func); + } + + Expr Rewrite_(const CallNode* pre, const Expr& post) override { + if (const CallNode* call = post.as()) { + auto* func = call->op.as(); + if (func == nullptr) { + return post; + } + + auto func_name = func->GetAttr(::tvm::attr::kGlobalSymbol); + if (func_name.defined() && func_name == "replace_add_with_subtract") { + // Introduce a new global var to map the function to and copy the source type + // over for InferType + GlobalVar new_global_var(func_name.value()); + new_global_var->checked_type_ = func->checked_type(); + ReplaceAddWithSubtractPrimFunc(new_global_var, GetRef(func)); + return Call(new_global_var, call->args, call->attrs, call->type_args, call->span); + } + } + + return post; + } + + public: + IRModule ir_module_; + Target host_target_; +}; + +transform::Pass RelayToTIR() { + runtime::TypedPackedFunc pass_func = + [=](IRModule ir_module, transform::PassContext pass_context) { + auto relay_to_tir = ConvertAddToSubtract(ir_module, Target("c")); + return relay_to_tir.Mutate(); + }; + return tvm::transform::CreateModulePass(pass_func, 0, "RelayToTIR", {}); +} + +} // namespace example_target_hooks +} // namespace contrib +} // namespace relay + +TVM_REGISTER_TARGET_KIND("example_target_hook", kDLCPU) + .set_attr("RelayToTIR", + relay::contrib::example_target_hooks::RelayToTIR()); + +} // namespace tvm diff --git a/src/relay/backend/te_compiler.cc b/src/relay/backend/te_compiler.cc index 2e7eb6f9aa6b..e322ccaff1ce 100644 --- a/src/relay/backend/te_compiler.cc +++ b/src/relay/backend/te_compiler.cc @@ -131,6 +131,7 @@ class TECompilerImpl : public TECompilerNode { Array ret; std::unordered_map cached_symbol; std::vector cached_ext_funcs; + for (const auto& it : cache_) { auto src_func = it.first->source_func; ICHECK(src_func.defined()); @@ -383,10 +384,12 @@ class LowerTensorExprMutator : public ExprMutator { * \brief Returns the primitive function associated with \p expr, or * nullptr if none. */ - Function ResolveToPrimitive(Expr expr) { + BaseFunc ResolveToPrimitive(Expr expr) { if (const GlobalVarNode* gvn = expr.as()) { BaseFunc base_func = module_->Lookup(GetRef(gvn)); return ResolveToPrimitive(base_func); + } else if (const tir::PrimFuncNode* prim_func = expr.as()) { + return GetRef(prim_func); } else if (const VarNode* vn = expr.as()) { auto itr = primitive_functions_.find(GetRef(vn)); return itr == primitive_functions_.end() ? Function() : itr->second; @@ -516,10 +519,17 @@ class LowerTensorExprMutator : public ExprMutator { Expr VisitExpr_(const LetNode* let) override { Var var = Downcast(Mutate(let->var)); Expr value = Mutate(let->value); - Function prim_func = ResolveToPrimitive(value); + BaseFunc prim_func = ResolveToPrimitive(value); + if (prim_func.defined()) { + // Already lowered by other means, no need to mutate the Let node + if (prim_func->IsInstance()) { + return GetRef(let); + } + // Remember let var is bound to (possibly indirectly) to a primitive. - primitive_functions_.emplace(let->var, prim_func); + Function func = Downcast(prim_func); + primitive_functions_.emplace(let->var, func); } Expr body = Mutate(let->body); if (prim_func.defined()) { @@ -537,7 +547,7 @@ class LowerTensorExprMutator : public ExprMutator { Call expr = GetRef(call); // Look for (indirect) calls to primitives. - Function prim_func = ResolveToPrimitive(call->op); + BaseFunc prim_func = ResolveToPrimitive(call->op); if (!prim_func.defined()) { // Not a call to a primitive function. if (const FunctionNode* fn = call->op.as()) { @@ -546,6 +556,12 @@ class LowerTensorExprMutator : public ExprMutator { return ExprMutator::VisitExpr_(call); } + // Already lowered by other means so we don't need to mutate + // the call + if (prim_func->IsInstance()) { + return expr; + } + // Find the desired target device. Target target; if (prim_func->GetAttr(attr::kCompiler).defined()) { @@ -565,7 +581,8 @@ class LowerTensorExprMutator : public ExprMutator { } // Lower the primitive function for that target. - std::pair pair = LowerFunction(prim_func, target); + Function func = Downcast(prim_func); + std::pair pair = LowerFunction(func, target); // Similarly transform arguments. Array args; @@ -639,8 +656,6 @@ Pass LowerTensorExpr(TargetMap targets, DeviceMap device_context_map, const Stri backend::FunctionInfo UpdateMainWorkspaceSize(const IRModule& mod, tec::TargetMap targets, Map storage_info_map) { - CHECK_EQ(mod->functions.size(), 1) - << "There should only be one function in the module passed to UpdateMainWorkspaceSize"; Function func = Downcast(mod->Lookup("main")); // This is a Map> @@ -909,8 +924,10 @@ Pass LowerTEPass(TargetMap targets, DeviceMap device_context_map, const String& PassContext ctx) { return LowerTE(module, targets, device_context_map, module_name, process_fn); }; - return tvm::transform::Sequential( - {tvm::transform::CreateModulePass(pass_func, 0, "LowerTE", {}), InferType()}); + + return tvm::transform::Sequential({tvm::relay::transform::RelayToTIRTargetHook(), + tvm::transform::CreateModulePass(pass_func, 0, "LowerTE", {}), + InferType()}); } } // namespace tec } // namespace relay diff --git a/src/relay/transforms/target_hooks.cc b/src/relay/transforms/target_hooks.cc new file mode 100644 index 000000000000..40287ded1dd8 --- /dev/null +++ b/src/relay/transforms/target_hooks.cc @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file target_hooks.cc + * \brief Relay passes for processing Target Hooks which have been registered on functions within + * the IRModule + */ + +#include +#include + +namespace tvm { +namespace relay { +namespace transform { + +class TargetHookVisitor : public tvm::relay::MixedModeVisitor { + /*! \brief Collected pass list for all nodes */ + std::vector pass_list_; + /*! \brief Attribute map for all registered targets */ + TargetKindAttrMap target_attr_map_; + + public: + TargetHookVisitor() : target_attr_map_(tvm::TargetKind::GetAttrMap("RelayToTIR")) {} + + std::vector Visit(const IRModule& ir_mod) { + for (const auto& it : ir_mod->functions) { + const BaseFunc& base_func = it.second; + VisitExpr(base_func); + } + return pass_list_; + } + + void VisitExpr_(const CallNode* call) override { + // Descend the call tree + for (auto arg : call->args) { + VisitExpr(arg); + } + + if (const FunctionNode* func = call->op.as()) { + if (!func->GetAttr(attr::kCompiler).defined()) { + return; + } + String code_gen_name = func->GetAttr(attr::kCompiler).value(); + Optional target_kind = tvm::TargetKind::Get(code_gen_name); + if (!target_kind || !target_attr_map_.count(target_kind.value())) { + return; + } + Pass custom_target_pass = target_attr_map_[target_kind.value()]; + if (std::find(pass_list_.begin(), pass_list_.end(), custom_target_pass) == pass_list_.end()) { + pass_list_.push_back(custom_target_pass); + } + } + } +}; + +Pass RelayToTIRTargetHook() { + auto pass_func = [=](IRModule mod, const PassContext& pass_ctx) { + auto target_hook_visitor = TargetHookVisitor(); + std::vector pass_list = target_hook_visitor.Visit(mod); + Sequential run_hooks(pass_list); + + return run_hooks(mod); + }; + return tvm::transform::CreateModulePass(pass_func, 0, "RelayToTIRTargetHook", {}); +} + +} // namespace transform +} // namespace relay +} // namespace tvm diff --git a/tests/python/relay/test_target_hooks.py b/tests/python/relay/test_target_hooks.py new file mode 100644 index 000000000000..4d7a7fcdc15b --- /dev/null +++ b/tests/python/relay/test_target_hooks.py @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Unit tests for target hooks.""" +import sys +import numpy as np +import pytest + +from tvm import relay, IRModule + +from utils.external_codegen import ( + set_external_func_attr, + check_aot_executor_result, + check_graph_executor_result, +) + + +@pytest.mark.parametrize("check_result", [check_aot_executor_result, check_graph_executor_result]) +def test_tir_external_generation(check_result): + shape = (8,) + x_data = np.random.randint(255, size=shape).astype("float32") + y_data = np.random.randint(255, size=shape).astype("float32") + inputs = {"x": x_data, "y": y_data} + + x0 = relay.var("x0", shape=shape, dtype="float32") + y0 = relay.var("y0", shape=shape, dtype="float32") + z = x0 + y0 + f = relay.Function([x0, y0], z) + f = set_external_func_attr(f, "example_target_hook", "replace_add_with_subtract") + + x = relay.var("x", shape=(8,), dtype="float32") + y = relay.var("y", shape=(8,), dtype="float32") + call = relay.Call(f, [x, y]) + func = IRModule.from_expr(call) + + check_result(func, inputs, (8,), x_data - y_data) + + +if __name__ == "__main__": + sys.exit(pytest.main([__file__] + sys.argv[1:])) From dac11faeeedd0f88c3714efc9906554a3c1a02d7 Mon Sep 17 00:00:00 2001 From: masahi Date: Fri, 17 Sep 2021 06:34:16 +0900 Subject: [PATCH 07/23] [CUDA] Fix dense tensorcore legalize type error when units is specified (#9030) * Fix dense tensorcore legalize type error when units is specified * revert black change due to different version from CI --- python/tvm/topi/cuda/tensorcore_alter_op.py | 6 ++++++ tests/python/relay/test_pass_legalize_tensorcore.py | 12 ++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/python/tvm/topi/cuda/tensorcore_alter_op.py b/python/tvm/topi/cuda/tensorcore_alter_op.py index 50bcafd9f9a7..080ddf28b7c2 100644 --- a/python/tvm/topi/cuda/tensorcore_alter_op.py +++ b/python/tvm/topi/cuda/tensorcore_alter_op.py @@ -176,6 +176,12 @@ def _dense_legalize(attrs, inputs, arg_types): x_ = relay.nn.pad(x, pad_width=((0, dm), (0, dk))) if dm or dk else x y_ = relay.nn.pad(y, pad_width=((0, dn), (0, dk))) if dn or dk else y + + # If units is explicitly specified, it is used to compute the output shape. + # We need to update units after padding to prevent a type error. + if attrs["units"] is not None: + new_attrs["units"] = N + dn + out_ = relay.nn.dense(x_, y_, **new_attrs) out = ( relay.strided_slice(out_, begin=[0, 0], end=[x.value for x in output_tensor.shape]) diff --git a/tests/python/relay/test_pass_legalize_tensorcore.py b/tests/python/relay/test_pass_legalize_tensorcore.py index bcd69f7253ef..97860630dea5 100644 --- a/tests/python/relay/test_pass_legalize_tensorcore.py +++ b/tests/python/relay/test_pass_legalize_tensorcore.py @@ -206,7 +206,7 @@ def expected(): @tvm.testing.uses_gpu def test_legalize_dense(): - def _test_legalize_dense(data_shape, kernel_shape, pad_shape, dtype, do_pad=True): + def _test_legalize_dense(data_shape, kernel_shape, pad_shape, dtype, do_pad=True, units=None): """test legalize dense to enable tensorcore""" M, K = data_shape N, _ = kernel_shape @@ -216,7 +216,7 @@ def _test_legalize_dense(data_shape, kernel_shape, pad_shape, dtype, do_pad=True def before(): x = relay.var("x", shape=data_shape, dtype=dtype) weight = relay.var("weight", shape=kernel_shape, dtype=dtype) - y = relay.nn.dense(x, weight) + y = relay.nn.dense(x, weight, units) y = relay.Function([x, weight], y) return y @@ -237,10 +237,7 @@ def expected(): weight_pad = relay.nn.pad(weight, pad_width=((0, dn), (0, dk))) else: weight_pad = weight - y_pad = relay.nn.dense( - x_pad, - weight_pad, - ) + y_pad = relay.nn.dense(x_pad, weight_pad, units=N + dn if units else None) if dm or dn: y = relay.strided_slice(y_pad, begin=[0, 0], end=out_shape) else: @@ -264,6 +261,9 @@ def expected(): _test_legalize_dense((3, 16), (32, 16), (5, 0, 0), dtype) _test_legalize_dense((2, 16), (32, 16), (0, 0, 0), dtype, False) + # Test if units parameter is correctly updated + _test_legalize_dense((8, 16), (30, 16), (0, 0, 2), "float16", units=30) + _test_legalize_dense((8, 32), (32, 32), (0, 0, 0), "int4", False) _test_legalize_dense((7, 32), (32, 32), (1, 0, 0), "int4") _test_legalize_dense((8, 31), (32, 31), (0, 1, 0), "int4") From 0ba08b1102a248a2887827bff9d242fa55da16b3 Mon Sep 17 00:00:00 2001 From: Anirudh Sundar Date: Fri, 17 Sep 2021 04:27:43 +0530 Subject: [PATCH 08/23] [ONNX] QLinearAveragePool and QLinearGlobalAveragePool contrib op (#9017) * [ONNX] QLinearAveragePool and QLinearGlobalAveragePool contrib op * Fix linter error for variable name and else after return * Separate quantized avg_pool impl and add TODO for global_avg_pool * Fix comment typo --- python/tvm/relay/frontend/onnx.py | 89 +++++++++++-- tests/python/frontend/onnx/test_forward.py | 146 +++++++++++++++++++++ 2 files changed, 225 insertions(+), 10 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index b30db2e99418..c49f7c675d13 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -276,6 +276,13 @@ class Pool(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): + attr_cvt, data = cls._run_calculation(inputs, attr, params) + return attr_cvt([data], attr, params) + + @classmethod + def _run_calculation(cls, inputs, attr, params): + """Helper method to return the processed input data and AttrCvt object""" + data = inputs[0] input_shape = infer_shape(data) input_dtype = infer_type(data).checked_type.dtype @@ -325,16 +332,19 @@ def _impl_v1(cls, inputs, attr, params): else: attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=cls.name) - return AttrCvt( - op_name=dimension_picker(cls.name), - transforms={ - "kernel_shape": "pool_size", - "pads": ("padding", 0), - "dilations": ("dilation", 1), - }, - ignores=["storage_order"], - custom_check=dimension_constraint(), - )([data], attr, params) + return ( + AttrCvt( + op_name=dimension_picker(cls.name), + transforms={ + "kernel_shape": "pool_size", + "pads": ("padding", 0), + "dilations": ("dilation", 1), + }, + ignores=["storage_order"], + custom_check=dimension_constraint(), + ), + data, + ) class Absolute(Unary): @@ -355,6 +365,29 @@ class AveragePool(Pool): name = "avg_pool" +class QLinearAveragePool(Pool): + """Operator converter for QLinearAveragePool from Microsoft onnxruntime contrib opset.""" + + name = "avg_pool" + + @classmethod + def _impl_v1(cls, inputs, attr, params): + x_scale = get_scalar(inputs[1], params) + x_zero_point = get_scalar(inputs[2], params, dtype="int32") + y_scale = fold_constant(get_scalar(inputs[3], params)) + y_zero_point = get_scalar(inputs[4], params, dtype="int32") + + attr_cvt, data = cls._run_calculation(inputs, attr, params) + + input_dtype = infer_type(data).checked_type.dtype + # Onnxruntime doesn't actually do this op in integer, they dequantize to fp32 + # and then requantize afer (according to documentation below) + # https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearAveragePool + float_node = _qnn.op.dequantize(data, x_scale, x_zero_point) + out = attr_cvt([float_node], attr, params) + return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype) + + class BatchNorm(OnnxOpConverter): """Operator converter for BatchNorm.""" @@ -658,6 +691,40 @@ def _impl_v1(cls, inputs, attr, params): ) +class QLinearGlobalAveragePool(OnnxOpConverter): + "Operator converter for QLinearGlobalAveragePool from Microsoft onnxruntime contrib opset." + + @classmethod + def _impl_v1(cls, inputs, attr, params): + rank = len(infer_shape(inputs[0])) + + x_scale = get_scalar(inputs[1], params) + x_zero_point = get_scalar(inputs[2], params, dtype="int32") + y_scale = fold_constant(get_scalar(inputs[3], params)) + y_zero_point = get_scalar(inputs[4], params, dtype="int32") + + input_dtype = infer_type(inputs[0]).checked_type.dtype + + # Onnxruntime documentation does not mention that this global avg_pool should follow the + # sequence dequantize -> float op -> quantize, but that is how QLinearAveragePool is done. + # + # This op also follows the same pattern since qnn op is not available right now. + # TODO: Generate QNN op to perform quantized operation instead of dequant -> op -> quant + x = _qnn.op.dequantize(inputs[0], x_scale, x_zero_point) + if rank == 3: + out = _op.nn.global_avg_pool1d(x) + elif rank == 4: + out = _op.nn.global_avg_pool2d(x) + elif rank == 5: + out = _op.nn.global_avg_pool3d(x) + else: + raise NotImplementedError( + "Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." + % (rank - 2), + ) + return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype) + + class GlobalMaxPool(OnnxOpConverter): """Operator converter for GlobalMaxPool""" @@ -3964,6 +4031,8 @@ def _get_convert_map(opset): "QLinearAdd": QLinearAdd.get_converter(opset), "QLinearMul": QLinearMul.get_converter(opset), "ConvInteger": ConvInteger.get_converter(opset), + "QLinearAveragePool": QLinearAveragePool.get_converter(opset), + "QLinearGlobalAveragePool": QLinearGlobalAveragePool.get_converter(opset), # Random number generation. "RandomUniform": RandomUniform.get_converter(opset), # Loss functions / training diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 7318ff7a3c7c..35abc6d896b3 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3056,6 +3056,152 @@ def verify_global_pooling(x_shape, mode): verify_global_pooling([4, 1, 2, 6, 4], mode) +@tvm.testing.parametrize_targets +def test_qlinear_average_pool(target, dev): + def verify_qlinear_average_pool( + x_shape, kernel_shape, strides, pads, out_shape, auto_pad="NOTSET" + ): + input_nodes = [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)), + ] + + output_nodes = [ + helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(out_shape)), + ] + + input_names = ["X"] + + node = helper.make_node( + "AveragePool", + inputs=input_names, + outputs=["Y"], + kernel_shape=kernel_shape, + strides=strides, + ) + + if pads is None: + pad_attr = helper.make_attribute("auto_pad", auto_pad) + else: + pad_attr = helper.make_attribute("pads", pads) + node.attribute.append(pad_attr) + + graph = helper.make_graph( + [node], + "qlinear_average_pool_test", + inputs=input_nodes, + outputs=output_nodes, + ) + + model = helper.make_model(graph, producer_name="qlinear_average_pool_Test") + quantize_and_verify_with_ort(model, input_names, [x_shape], target, dev) + + # Pool1D + verify_qlinear_average_pool( + x_shape=[1, 1, 32], + kernel_shape=[3], + strides=[1], + pads=[1, 1], + out_shape=[1, 1, 32], + ) + # Pool2D + verify_qlinear_average_pool( + x_shape=[1, 1, 32, 32], + kernel_shape=[3, 3], + strides=[1, 1], + pads=[1, 1, 1, 1], + out_shape=[1, 1, 32, 32], + ) + + # Pool1D with stride + verify_qlinear_average_pool( + x_shape=[1, 1, 32], + kernel_shape=[3], + strides=[2], + pads=[1, 1], + out_shape=[1, 1, 16], + ) + # Pool2D with stride + verify_qlinear_average_pool( + x_shape=[1, 1, 32, 32], + kernel_shape=[3, 3], + strides=[2, 2], + pads=[1, 1, 1, 1], + out_shape=[1, 1, 16, 16], + ) + + # Pool1D with stride and autopadding + verify_qlinear_average_pool( + x_shape=[1, 1, 32], + kernel_shape=[3], + strides=[2], + pads=None, + out_shape=[1, 1, 16], + auto_pad="SAME_UPPER", + ) + # Pool2D with stride and autopadding + verify_qlinear_average_pool( + x_shape=[1, 1, 32, 32], + kernel_shape=[3, 3], + strides=[2, 2], + pads=None, + out_shape=[1, 1, 16, 16], + auto_pad="SAME_UPPER", + ) + + # Pool3D with stride + verify_qlinear_average_pool( + x_shape=[1, 1, 32, 32, 32], + kernel_shape=[3, 3, 3], + strides=[2, 2, 2], + pads=[1, 1, 1, 1, 1, 1], + out_shape=[1, 1, 16, 16, 16], + ) + + # Pool3D with stride and autopadding + verify_qlinear_average_pool( + x_shape=[1, 1, 32, 32, 32], + kernel_shape=[3, 3, 3], + strides=[2, 2, 2], + pads=None, + out_shape=[1, 1, 16, 16, 16], + auto_pad="SAME_UPPER", + ) + + +@tvm.testing.parametrize_targets +def test_qlinear_global_average_pool(target, dev): + def verify_qlinear_global_average_pool(x_shape): + out_shape = x_shape[:2] + [1] * (len(x_shape) - 2) + + node_type = "GlobalAveragePool" + + input_names = ["X"] + + pool_node = helper.make_node(node_type, inputs=input_names, outputs=["Y"]) + + graph = helper.make_graph( + [pool_node], + "qlinear_global_average_pool_test", + inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape))], + outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(out_shape))], + ) + + model = helper.make_model(graph, producer_name="qlinear_global_average_pool_test") + quantize_and_verify_with_ort(model, input_names, [x_shape], target, dev) + + # 1D Pooling (NCW) + verify_qlinear_global_average_pool([1, 8, 8]) + verify_qlinear_global_average_pool([4, 1, 4]) + + # 2D Pooling (NCHW) + verify_qlinear_global_average_pool([1, 8, 8, 8]) + verify_qlinear_global_average_pool([4, 1, 6, 4]) + + # 3D Pooling (NCDHW) + verify_qlinear_global_average_pool([1, 8, 6, 8, 8]) + verify_qlinear_global_average_pool([4, 1, 2, 6, 4]) + + @tvm.testing.parametrize_targets def test_mod(target, dev): def verify_mod(x_shape, y_shape, fmod, out_shape, dtype="float32"): From 390152932ee751a477940f3686b47f160f45ba29 Mon Sep 17 00:00:00 2001 From: Leandro Nunes Date: Fri, 17 Sep 2021 02:16:57 +0100 Subject: [PATCH 09/23] Fix line break in `setup.py` (#9029) --- python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index f6afa42610d4..1b2a9d3ee965 100644 --- a/python/setup.py +++ b/python/setup.py @@ -166,7 +166,7 @@ def is_pure(self): if os.path.isfile(path): shutil.copy(path, os.path.join(CURRENT_DIR, "tvm")) _, libname = os.path.split(path) - fo.write(f"include tvm/{libname}%s") + fo.write(f"include tvm/{libname}\n") if os.path.isdir(path): _, libname = os.path.split(path) From d59034977c6e07f3da7f76a8623f903f822fbfd4 Mon Sep 17 00:00:00 2001 From: AndrewZhaoLuo Date: Thu, 16 Sep 2021 18:18:31 -0700 Subject: [PATCH 10/23] [Onnx] Add SoftmaxCrossEntropyLoss (#8906) * nll loss v1 * add converter * decode strings in byte form * decode variable length inputs * make shapes correct * unsqueeze * proper weight handling * simplify if statement * fix tests * add comment about tests * delete extra file * lint * so cool * Update CI Lint Image Version (#8841) * Update CI Lint Image Version * trigger * [BUG] ToBasicBlockNormalForm immutability (#8778) * ToBasicBlockNormalForm immutability * better comment on ToBasicBlock * refine comment of ToBasicBlockForm * [GRAPH EXECUTOR,VM] Add benchmarking function to graph executor and vm (#8807) * [GRAPH EXECUTOR,VM] Add benchmarking function to graph executor and vm This new benchmarking function is just a convenience function for calling time_evaluator on the underlying module. Hopefully this should make it easier for users to get good benchmarks of their code. * formatting * import order * more test, more comments, more precision * fix tests * add seconds descriptions to doc * Apply CPPLint to CRT Tests (#8844) This one was a bit trickier as there was more usage of dynamic arrays and less safe casts. I've tried to minimise the changes to just those required to passing linting. * [Relay][TOPI] Support of depthwise conv2d NHWC for Mali/Bifrost. (#8584) * [Relay][TOPI] Support of depthwise conv2d NHWC for Mali/Bifrost. Added initial tunable autotvm templates for depthwise conv2d with NHWC layout for Mali and Bifrost. * [Relay][TOPI] Misc fixes for depthwise conv2d Mali/Bifrost. - Fix assert for Bifrost. - Set reasonable default axis splits to avoid using tophub for NHWC. - Fixed typo: arm cpu -> Mali. * [Relay][TOPI] Fixed formatting in depthwise conv2d Mali/Bifrost. * Support for CMSIS-NN in Corstone300 Makefile (#8831) Change-Id: Ifc2305db4e11d1d15d45407287f8f0bea469100a * [microtvm][Zephyr] Increase timeout to fix flaky tests (#8846) * increase timeout * trigger * [AMP] Bump up tolerance on flaky test (#8850) * bumpy up tol * bumped tolerance up even more * jostle ci * [Hexagon] Rework tvm.target.hexagon() interface (#8823) * [Hexagon] Rework tvm.target.hexagon() interface Make the tvm.target.hexagon() function take most options as keyword parameters. This will allow adding additional parameters without changing the interface. No changes are required to existing code, except for changing positional parameters following the CPU version to keyword parameters, and updating the names of the keyword parameters: sim_args -> sim_options, llvm_args -> llvm_options, although the old names will be accepted for the time being. * formatting * change ' to " * Rename 'args' to 'config' for clarity * Use 'strip' instad of 'replace' * Restart build * [Pattern matching] Add an option to rewrite the graph only once (#8843) * [Pattern matching] Add an option to rewrite the graph only once If the graph returned from the callback consists of the original pattern, the rewriter will run in the loop, which is not always desired. So this patch proposes an option to run the rewriter only once. Change-Id: I85cf0a055b8961d52394f21c1e4d7aad0a7e1d06 * Make rewrite_once default to false Change-Id: Idf6f01f254c403158883681e75c2a5978efbd2d0 * update gpu and cpu (#8853) * VTA cmake change to include Verilator header for building tsim library (#8797) * VTA cmake file require Verilator include for tsim target. VTA module.cc uses svOpenArrayHandle to send wide data through DPI * Refactor Verialtor check conditions * Build TSIM only for CPU target. CPU target don't use -Werror to compile with Verilator. Jenkinsfile to have tvm_multilib_tsim defined for CPU build target. * remove build/libvta_tsim.so from non tsim targeting builds * Revert to enable TSIM build i386. Revert to -Werror in CPU config. Remove verilator CPP objects from cmake config for tsim and put them as include into vta module.cc to avoid Verilator compilation warnings * [FIX] Bug fix for a floormod rewrite simplify rule (#8852) * Update rewrite_simplify.cc * Update test_arith_rewrite_simplify.py * Update test_arith_rewrite_simplify.py * Update test_arith_rewrite_simplify.py * move rust lint script (#8726) * [AMP] Disallow fp16 conversion for summation-like ops (#8810) * [AMP] Disallow fp16 conversion for summation-like ops * test only structural equality * [TOPI] [Relay] Sparse Conv2d Implementation for 3x3 kernels (#8605) * [topi] add spconv2d_3x3 nhwc * [relay] sparse_conv2d: add kernel_size attr * [relay] add strategy for spconv2d_3x3 nhwc * [relay] pass to convert spconv2d with const args * [relay] convert sparse conv2d pass fixes * use array for sparse conv2d attr * fixup 1x1 tests; new 3x3 tests * extend repeat_interleave op for relay.Expr (#8839) Co-authored-by: Valery Chernov * Change AOT from ExprVisitor to MixedModeVisitor (#8856) This should allow better scale-ability for AOT when targeting larger networks. * Add a PaddlePaddle Frontend (#8645) * fix some problems for matmul * fix some problems for matmul * add alpha parameter for matmul * remove unnecessary condition * add TranslatedLayer which support model loaded by jit.load * add mul operator support * Add padding mode support for conv/pool2d * support 4 two-tuples * add paddle test case * add paddle conv2d case * update test_forward.py * fix paddle convert_matmul * add paddle multiply and matmul op test case * add test case and fix bug * delete import pandas * add paddlepaddle tests * modify the variable name of convert_reshape * formatting * formatting * use black to format python code * pylint check * Remove fluid api * black format Co-authored-by: root Co-authored-by: wjj19950828 Co-authored-by: heliqi <1101791222@qq.com> Co-authored-by: Junru Shao * [Runtime] add set_output_zero_copy (#8497) * Update graph_executor.h * Update graph_executor.cc * modify zero copy UT add set input zero copy * modify C style * add runtime test * realy build generatr the json Co-authored-by: hwstaff * [Hexagon] Change declaration order of unique_ptr objects to fix crash (#8859) A crash occurs when automatically deleting an instance of CodeGenHexagon because the LLVMContext object has already been freed. Objects of both types are created using unique_ptr, but the object managed by the LLVMContext unique_ptr is passed to CodeGenHexagon object (not as a unique_ptr). This crash is fixed by moving the declaration of the LLVMContext object before the CodeGenHexagon object. I'm not sure if this is the best way to fix this, but it does fix the crash. Also, in other files, the LLVMContext object is always created first. Co-authored-by: Cahoon, Brendon * [Graph Executor, VM] Add end to end benchmarking of models (#8858) Add benchmarking that includes ovearhead of transfering inputs and outputs to and from the device. This should give an accurate measurement of the runtime a user would see when using the model. This is accomplished by adding functions that run from inputs to return values into the graph executor and the VM. * [UnitTests] Expose TVM pytest helpers as plugin (#8532) * [UnitTests] Expose TVM pytest helpers as plugin Previously, pytest helper utilities such as automatic parametrization of `target`/`dev`, or `tvm.testing.parameter` were only available for tests within the `${TVM_HOME}/tests` directory. This PR extracts the helper utilities into an importable plugin, which can be used in external tests (e.g. one-off debugging). * [UnitTests] Refactor the plugin-specific logic out into plugin.py. * [UnitTests] Moved marker definition out to global variable. * Remove AOT Executor header from Arduino project (#8857) * [Community] @mdw-octoml -> Reviewer (#8868) * [TIR] Fix opaque access in buffer locator pass and match_buffer in region detector (#8855) * init * fix * Update src/tir/transforms/plan_update_buffer_allocation_location.cc Co-authored-by: Ruihang Lai * Update src/tir/transforms/plan_update_buffer_allocation_location.cc Co-authored-by: Ruihang Lai * address Co-authored-by: Junru Shao Co-authored-by: Ruihang Lai * [Autoscheduler] Configurable workload keys (#8862) * change workload keys * remove binary string comparison * append the tuple not every integer * clean up * lint * dump workload keys to dags * fix things * change some strings * misc fixes, add tests * jostle ci * [Tutorial][Executor] Fix the usage of executors in tutorials (#8586) * fix: executor usage for keras tutorial * fix: executor usage for onnx tutorial * [Tutorial][Executor] Fix executors in tutorials * [Frontend][Onnx] Simplify onnx input since name accesses are not reliable. (#8867) * Simplify onnx input since name accesses are no longer supported. * move Celu importer. * [TIR] GetBlockReadWriteRegion (#8875) * [TIR] GetBlockReadWriteRegion * Fix black issue * Use constant reference for the interface * Fix lint issue * [RISCV] Add support for llvm parameter -mabi (-target-abi) (#8860) * [Community] @manupa-arm -> Committer (#8870) * adding Manupa to the contributors list * re-trigger CI * [RPC] Fix ios_rpc build (#8864) * [Vulkan][Target] Added the driver name to the vulkan target string. (#8882) Driver name (e.g. "NVIDIA", "radv", "AMD open-source driver") is read from the `driverName` property in [VkPhysicalDeviceDriverProperties](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceDriverProperties.html), or is left as `"unknown_driver_name"` if the driver does not support querying the driver name. * [ONNX][TOPI] Support select_last_index for argmin/max (#8816) * support select_last_index for argmin/max * reverse conditions which made on accident * forward args in reduce.py * make proper nodes for reduction ops * remove complicated nested lambdas * fix lambda capture for conversion * forward more arguments * forward more args * enable onnx tests * wrapping casts to remove ambiguity * revert changes extraneous * correct incorrect attrs being used for ops * change attributes * remove old impl * register new attribute node * clean up test * reformat * reformat * coolio * stable comparison * casts to avoid ambiguity * casting more * correct arg passing * support select_last_index for argmin/max * reverse conditions which made on accident * forward args in reduce.py * make proper nodes for reduction ops * remove complicated nested lambdas * fix lambda capture for conversion * forward more arguments * forward more args * enable onnx tests * wrapping casts to remove ambiguity * revert changes extraneous * correct incorrect attrs being used for ops * change attributes * remove old impl * register new attribute node * clean up test * reformat * reformat * coolio * stable comparison * casts to avoid ambiguity * casting more * correct arg passing * fix broken input * OneElementReduceAttrs-->ArgReduceAttrs" * reduce boilerplate * change names * remove log statement * jostle ci Co-authored-by: Andrew Zhao Luo * refactor optimize GEMM on CPU tutorial (#8825) * refactor optimize GEMM on CPU tutorial * fix lint errors * fix more lint errors * fix typo * fix problem with redefinition of `k` add TODO and comments around loop unrolling clarify note on the array packing figure * reword general description of array packing * grap kaxis from compute definition * remove duplicate comments on unrolling * Change target string to Target object in the TE compiler and interpreter (#8835) * # This is a combination of 2 commits. # This is the 1st commit message: Initial changes # This is the commit message #2: Ftarget string -> Target object works! * Fix remaining target strings * fix bad rebase * Fix typo * 1 more bad rebase fix * Lint * typo * Forgot to commit this * Add TargetStrHash and Map * [TensorIR][M2a] CacheRead/Write (#8863) Co-authored-by: Junru Shao Co-authored-by: Wuwei Lin Co-authored-by: Ruihang Lai Co-authored-by: Hongyi Jin <3231950289@qq.com> Co-authored-by: Siyuan Feng Co-authored-by: Bohan Hou <32121147+spectrometerHBH@users.noreply.github.com> * [CI] make pre-commit hooks to run on every push instead of every commit (#8888) * [TVMScript] Fix printing ForNode annotations (#8891) * [1/10] CMSIS-NN graph partitioner for softmax (#8653) * cmsis graph partitioner for softmax Change-Id: I80ecd7bc5351f241b4674ef53b36e4398c8adb83 * Updated docstring in the partioning function Change-Id: Ieb4b623e5929cfdb6aa0235db64c825fac8d7055 * [microTVM][RVM] Add Arduino RVM (#8748) * Functioning Arduino Vagrant VM Begin building Arduino Vagrant VM Mostly working Vagrant VM Changes for debugging Add ignored json file Fix venv path * Generalize parts of RVM for multiple platforms cwd hack Add unit tests from apps directory to task_python_microtvm.sh Generalize parts of RVM for multiple platforms * Add Vagrantfile lint exceptions * Address PR comments Address Mehrdad's PR comments More PR comments Documentation tweaks Add dialout group to user * Rerun tests * Spresense fix * Rerun CI tests * Rerun tests * sce loss example * add comments, remove other tests * lint * lint * jostle * lint up * jostle * uncomment some tests * proper return * clean up * lint * minor merge errors Co-authored-by: Andrew Zhao Luo Co-authored-by: Mehrdad Hessar Co-authored-by: Jiawei Liu Co-authored-by: Tristan Konolige Co-authored-by: Christopher Sidebottom Co-authored-by: Anastasia Stulova <38433336+AnastasiaStulova@users.noreply.github.com> Co-authored-by: Ashutosh Parkhi <86472128+ashutosh-arm@users.noreply.github.com> Co-authored-by: Krzysztof Parzyszek Co-authored-by: Elen Kalda Co-authored-by: Anton Sorokin Co-authored-by: Chenfan Co-authored-by: masahi Co-authored-by: Tantalus13A98B5F Co-authored-by: Valery Chernov Co-authored-by: Valery Chernov Co-authored-by: Jason <928090362@qq.com> Co-authored-by: root Co-authored-by: wjj19950828 Co-authored-by: heliqi <1101791222@qq.com> Co-authored-by: Junru Shao Co-authored-by: Swift.Sun Co-authored-by: hwstaff Co-authored-by: Cahoon, Brendon Co-authored-by: Lunderberg Co-authored-by: Yizhi Liu Co-authored-by: Siyuan Feng Co-authored-by: Ruihang Lai Co-authored-by: Josh Fromm Co-authored-by: Alexander Pivovarov Co-authored-by: Thierry Moreau Co-authored-by: Egor Churaev Co-authored-by: Adam Straw Co-authored-by: Lily Orth-Smith Co-authored-by: Jared Roesch Co-authored-by: Siyuan Feng Co-authored-by: Wuwei Lin Co-authored-by: Hongyi Jin <3231950289@qq.com> Co-authored-by: Bohan Hou <32121147+spectrometerHBH@users.noreply.github.com> Co-authored-by: Michalis Papadimitriou Co-authored-by: Gavin Uberti --- python/tvm/relay/frontend/onnx.py | 107 ++++++++++++++++----- tests/python/frontend/onnx/test_forward.py | 41 ++------ 2 files changed, 88 insertions(+), 60 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index c49f7c675d13..c6eed9c64e6c 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -19,6 +19,7 @@ """ONNX: Open Neural Network Exchange frontend for Relay.""" import copy import warnings +from typing import Optional import numpy as np import tvm @@ -1926,6 +1927,14 @@ def _impl_v13(cls, inputs, attr, params): class LogSoftmax(OnnxOpConverter): """Operator converter for Softmax.""" + @classmethod + def run_calculation(cls, x, axes): + """Run the calculation for Log Softmax calculation.""" + m = _op.max(x, axes, keepdims=True) + e = _op.exp(x - m) + s = _op.sum(e, axes, keepdims=True) + return x - m - _op.log(s) + @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 1) @@ -1933,11 +1942,7 @@ def _impl_v1(cls, inputs, attr, params): if axis < 0: axis += ndim axes = list(range(axis, ndim)) - x = inputs[0] - m = _op.max(x, axes, keepdims=True) - e = _op.exp(x - m) - s = _op.sum(e, axes, keepdims=True) - return x - m - _op.log(s) + return cls.run_calculation(inputs[0], axes) @classmethod def _impl_v13(cls, inputs, attr, params): @@ -1946,11 +1951,7 @@ def _impl_v13(cls, inputs, attr, params): if axis < 0: axis += ndim axes = [axis] - x = inputs[0] - m = _op.max(x, axes, keepdims=True) - e = _op.exp(x - m) - s = _op.sum(e, axes, keepdims=True) - return x - m - _op.log(s) + return cls.run_calculation(inputs[0], axes) class Hardmax(OnnxOpConverter): @@ -3611,33 +3612,30 @@ def _impl_v1(cls, inputs, attr, params): class NegativeLogLikelihoodLoss(OnnxOpConverter): - """Operator converter for random_uniform""" + """Operator converter for NegativeLogLikehoodLoss""" VALID_REDUCTIONS = {"mean", "sum", "none"} @classmethod - def _impl_v13(cls, inputs, attr, params): - ignore_index = attr.get("ignore_index", None) - reduction = attr.get("reduction", b"mean").decode("utf-8") - - if reduction not in cls.VALID_REDUCTIONS: - raise ValueError( - f"Unknown reduction type {reduction}, choices are {cls.VALID_REDUCTIONS}" - ) - - input_tensor, target_tensor = inputs[0], inputs[1] - + def run_calculation( + cls: "NegativeLogLikelihoodLoss", + input_tensor: relay.Expr, + target_tensor: relay.Expr, + weight_tensor: Optional[relay.Expr], + ignore_index: int, + ): + """Run calculation for NegativeLogLikelihood, returning output tensor and + weight tensor used for mean-style reductions. + """ # Convert negative indices --> positive indices for gather ops, note we have to # use the original target tensor to interact with ignore_index to have proper behavior. normalized_target_tensor = normalize_gather_indices(input_tensor, target_tensor, 1) - if len(inputs) == 3: - weight_tensor = inputs[2] - else: + if weight_tensor is None: channels = infer_shape(input_tensor)[1] weight_tensor = relay.ones( [channels], - dtype=input_tensor.type_annotation.dtype, + dtype=infer_type(input_tensor).checked_type.dtype, ) loss = -relay.gather( @@ -3670,7 +3668,30 @@ def _impl_v13(cls, inputs, attr, params): select_weights *= relay.cast_like(mask_tensor, select_weights) weight_total = relay.sum(select_weights) + return loss, weight_total + + @classmethod + def _impl_v13(cls, inputs, attr, params): + ignore_index = attr.get("ignore_index", None) + reduction = attr.get("reduction", b"mean").decode("utf-8") + + if reduction not in cls.VALID_REDUCTIONS: + raise ValueError( + f"Unknown reduction type {reduction}, choices are {cls.VALID_REDUCTIONS}" + ) + input_tensor, target_tensor = inputs[0], inputs[1] + if len(inputs) == 3: + weight_tensor = inputs[2] + else: + weight_tensor = None + + loss, weight_total = cls.run_calculation( + input_tensor, + target_tensor, + weight_tensor=weight_tensor, + ignore_index=ignore_index, + ) if reduction == "mean": return relay.sum(loss) / weight_total if reduction == "sum": @@ -3679,6 +3700,39 @@ def _impl_v13(cls, inputs, attr, params): return loss +class SoftmaxCrossEntropyLoss(OnnxOpConverter): + """Operator converter for SCE_loss""" + + @classmethod + def _impl_v13(cls, inputs, attr, params): + ignore_index = attr.get("ignore_index", None) + reduction = attr.get("reduction", b"mean").decode("utf-8") + input_tensor, target_tensor = inputs[0], inputs[1] + if len(inputs) == 3: + weight_tensor = inputs[2] + else: + weight_tensor = None + + get_log_prob = attr["tvm_custom"]["num_outputs"] == 2 + log_softmax_tensor = LogSoftmax.run_calculation(input_tensor, axes=[1]) + + loss, weight_total = NegativeLogLikelihoodLoss.run_calculation( + log_softmax_tensor, + target_tensor, + weight_tensor, + ignore_index=ignore_index, + ) + + if reduction == "mean": + loss = relay.sum(loss) / weight_total + elif reduction == "sum": + loss = relay.sum(loss) + + if get_log_prob: + return relay.TupleWrapper(relay.Tuple((loss, log_softmax_tensor)), 2) + return loss + + class Adagrad(OnnxOpConverter): """Operator converter for adagrad op.""" @@ -4037,6 +4091,7 @@ def _get_convert_map(opset): "RandomUniform": RandomUniform.get_converter(opset), # Loss functions / training "NegativeLogLikelihoodLoss": NegativeLogLikelihoodLoss.get_converter(opset), + "SoftmaxCrossEntropyLoss": SoftmaxCrossEntropyLoss.get_converter(opset), "Adagrad": Adagrad.get_converter(opset), "Adam": Adam.get_converter(opset), "Momentum": Momentum.get_converter(opset), diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 35abc6d896b3..3aef9a2a2ceb 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -4944,73 +4944,40 @@ def verify_eyelike(indata): "test_round", "test_scan9_sum", "test_scan_sum", - "test_sce_NCd1_mean_weight_negative_ii", + # With reduce_sum supported fully, these expanded tests should pass "test_sce_NCd1_mean_weight_negative_ii_expanded", - "test_sce_NCd1_mean_weight_negative_ii_log_prob", "test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded", - "test_sce_NCd1d2d3_none_no_weight_negative_ii", "test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded", - "test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob", "test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded", - "test_sce_NCd1d2d3_sum_weight_high_ii", "test_sce_NCd1d2d3_sum_weight_high_ii_expanded", - "test_sce_NCd1d2d3_sum_weight_high_ii_log_prob", "test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded", - "test_sce_NCd1d2d3d4d5_mean_weight", "test_sce_NCd1d2d3d4d5_mean_weight_expanded", - "test_sce_NCd1d2d3d4d5_mean_weight_log_prob", "test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded", - "test_sce_NCd1d2d3d4d5_none_no_weight", "test_sce_NCd1d2d3d4d5_none_no_weight_expanded", - "test_sce_NCd1d2d3d4d5_none_no_weight_log_prob", "test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded", - "test_sce_mean", - "test_sce_mean_3d", "test_sce_mean_3d_expanded", - "test_sce_mean_3d_log_prob", "test_sce_mean_3d_log_prob_expanded", "test_sce_mean_expanded", - "test_sce_mean_log_prob", "test_sce_mean_log_prob_expanded", - "test_sce_mean_no_weight_ii", - "test_sce_mean_no_weight_ii_3d", "test_sce_mean_no_weight_ii_3d_expanded", - "test_sce_mean_no_weight_ii_3d_log_prob", "test_sce_mean_no_weight_ii_3d_log_prob_expanded", - "test_sce_mean_no_weight_ii_4d", "test_sce_mean_no_weight_ii_4d_expanded", - "test_sce_mean_no_weight_ii_4d_log_prob", "test_sce_mean_no_weight_ii_4d_log_prob_expanded", "test_sce_mean_no_weight_ii_expanded", - "test_sce_mean_no_weight_ii_log_prob", "test_sce_mean_no_weight_ii_log_prob_expanded", - "test_sce_mean_weight", "test_sce_mean_weight_expanded", - "test_sce_mean_weight_ii", - "test_sce_mean_weight_ii_3d", "test_sce_mean_weight_ii_3d_expanded", - "test_sce_mean_weight_ii_3d_log_prob", "test_sce_mean_weight_ii_3d_log_prob_expanded", - "test_sce_mean_weight_ii_4d", "test_sce_mean_weight_ii_4d_expanded", - "test_sce_mean_weight_ii_4d_log_prob", "test_sce_mean_weight_ii_4d_log_prob_expanded", "test_sce_mean_weight_ii_expanded", - "test_sce_mean_weight_ii_log_prob", "test_sce_mean_weight_ii_log_prob_expanded", - "test_sce_mean_weight_log_prob", "test_sce_mean_weight_log_prob_expanded", - "test_sce_none", "test_sce_none_expanded", - "test_sce_none_log_prob", "test_sce_none_log_prob_expanded", - "test_sce_none_weights", "test_sce_none_weights_expanded", - "test_sce_none_weights_log_prob", "test_sce_none_weights_log_prob_expanded", - "test_sce_sum", "test_sce_sum_expanded", - "test_sce_sum_log_prob", "test_sce_sum_log_prob_expanded", "test_sequence_insert_at_back", "test_sequence_insert_at_front", @@ -5093,6 +5060,12 @@ def test_onnx_nodes(target, dev, onnx_test): # for some reason the ONNX test crops the # roialign results to 4 decimal places atol = 1e-4 + + if "_sce_" in test_dir: + # complicated loss functions like SoftmaxCrossEntropy can have minor variations + # in accuracy depending on implementation + atol = 1e-4 + onnx_model = onnx.load(test_dir + "/model.onnx") inputs = [] outputs = [] From c3571c677258c471c8893cfa2d08691235d8dc9c Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Thu, 16 Sep 2021 22:08:26 -0500 Subject: [PATCH 11/23] [Hexagon] Don't use {} initialization with FastRPC structures (#9033) The data members in FastRPC structures aren't guaranteed to remain in the same order. Replace aggregate initialization with direct, member-by-member initialization. --- src/runtime/hexagon/launcher/launcher_android.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/runtime/hexagon/launcher/launcher_android.cc b/src/runtime/hexagon/launcher/launcher_android.cc index c0e428cb63ca..008e4fdfe1c4 100644 --- a/src/runtime/hexagon/launcher/launcher_android.cc +++ b/src/runtime/hexagon/launcher/launcher_android.cc @@ -32,7 +32,9 @@ #include "launcher_rpc.h" AEEResult enable_unsigned_pd(bool enable) { - remote_rpc_control_unsigned_module data{static_cast(enable), CDSP_DOMAIN_ID}; + remote_rpc_control_unsigned_module data; + data.domain = CDSP_DOMAIN_ID; + data.enable = static_cast(enable); AEEResult rc = remote_session_control(DSPRPC_CONTROL_UNSIGNED_MODULE, &data, sizeof(data)); if (rc != AEE_SUCCESS) { std::cout << "error " << (enable ? "enabling" : "disabling") << " unsigned PD\n"; @@ -41,8 +43,11 @@ AEEResult enable_unsigned_pd(bool enable) { } AEEResult set_remote_stack_size(int size) { - remote_rpc_thread_params th_data{CDSP_DOMAIN_ID, -1, size}; - AEEResult rc = remote_session_control(FASTRPC_THREAD_PARAMS, &th_data, sizeof(th_data)); + remote_rpc_thread_params data; + data.domain = CDSP_DOMAIN_ID; + data.prio = -1; + data.stack_size = size; + AEEResult rc = remote_session_control(FASTRPC_THREAD_PARAMS, &data, sizeof(data)); if (rc != AEE_SUCCESS) { std::cout << "error setting remote stack size: " << std::hex << rc << '\n'; } From 9c1ff6876c67479053444a4ad6652d2de8838815 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 09:46:20 +0300 Subject: [PATCH 12/23] Test --- docs/newf.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/newf.txt diff --git a/docs/newf.txt b/docs/newf.txt new file mode 100644 index 000000000000..e69de29bb2d1 From 5aecd1717b48220b794eab0fb0147673d6596579 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 10:01:16 +0300 Subject: [PATCH 13/23] Minor checkstyle issue --- Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 590066af6026..83ee7148e401 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -106,9 +106,9 @@ def init_git() { def init_git_win() { checkout scm retry(5) { - timeout(time: 2, unit: 'MINUTES') { - bat 'git submodule update --init -f' - } + timeout(time: 2, unit: 'MINUTES') { + bat 'git submodule update --init -f' + } } } From 0b6c479056e9868cf69a3942559b68d505fced3a Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 12:38:27 +0300 Subject: [PATCH 14/23] Test --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 83ee7148e401..e3856b4ab809 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -363,7 +363,7 @@ stage('Unit Test') { } }, 'java: GPU': { - if (docs == 1) { + if (true) { node('GPU') { ws(per_exec_ws('tvm/ut-java')) { init_git() From e1e732a00b0020edfeb6be53741a45909e4a64c3 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 12:38:52 +0300 Subject: [PATCH 15/23] Test file --- docs/empty.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/empty.txt diff --git a/docs/empty.txt b/docs/empty.txt new file mode 100644 index 000000000000..e69de29bb2d1 From 8b068a88b98de5293e3dceb41bb02c2ca07a6d28 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 19:09:56 +0300 Subject: [PATCH 16/23] Revert changed in unit tests --- Jenkinsfile | 8 ++++++-- docs/empty.txt | 0 docs/newf.txt | 0 tests/scripts/git_check_tree.sh | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) delete mode 100644 docs/empty.txt delete mode 100644 docs/newf.txt diff --git a/Jenkinsfile b/Jenkinsfile index e3856b4ab809..748d47c20f64 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -310,6 +310,7 @@ stage('Build') { stage('Unit Test') { parallel 'python3: GPU': { + if (docs == 1) { node('TensorCore') { ws(per_exec_ws('tvm/ut-python-gpu')) { init_git() @@ -323,6 +324,9 @@ stage('Unit Test') { } } } + } else { + Utils.markStageSkippedForConditional('python3: i386') + } }, 'python3: i386': { if (docs == 1) { @@ -340,7 +344,7 @@ stage('Unit Test') { } } } else { - Utils.markStageSkippedForConditional('python3: i386') + Utils.markStageSkippedForConditional('python3: i386') } }, 'python3: arm': { @@ -363,7 +367,7 @@ stage('Unit Test') { } }, 'java: GPU': { - if (true) { + if ( docs == 1 ) { node('GPU') { ws(per_exec_ws('tvm/ut-java')) { init_git() diff --git a/docs/empty.txt b/docs/empty.txt deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/docs/newf.txt b/docs/newf.txt deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/scripts/git_check_tree.sh b/tests/scripts/git_check_tree.sh index 121287b69419..79a8b57019b8 100755 --- a/tests/scripts/git_check_tree.sh +++ b/tests/scripts/git_check_tree.sh @@ -21,7 +21,7 @@ DOCS_DIR=0 OTHER_DIR=0 DOC_DIR="\docs" -changed_files=`git diff --no-commit-id --name-only -r HEAD~1` +changed_files=`git diff --no-commit-id --name-only -r origin/main` for file in $changed_files; do if grep -q "$DOC_DIR" <<< "$file"; then From fc397e9da75b75e764d6e173fff281ce4efa7574 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 20:58:18 +0300 Subject: [PATCH 17/23] Change script name --- tests/scripts/{git_check_tree.sh => git_change_docs.sh} | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) rename tests/scripts/{git_check_tree.sh => git_change_docs.sh} (94%) diff --git a/tests/scripts/git_check_tree.sh b/tests/scripts/git_change_docs.sh similarity index 94% rename from tests/scripts/git_check_tree.sh rename to tests/scripts/git_change_docs.sh index 79a8b57019b8..60c3d26cff54 100755 --- a/tests/scripts/git_check_tree.sh +++ b/tests/scripts/git_change_docs.sh @@ -17,9 +17,11 @@ # specific language governing permissions and limitations # under the License. +set -eux + DOCS_DIR=0 OTHER_DIR=0 -DOC_DIR="\docs" +DOC_DIR="docs/" changed_files=`git diff --no-commit-id --name-only -r origin/main` @@ -32,7 +34,7 @@ for file in $changed_files; do fi done -if [[ ($OTHER_DIR -eq 1) ]]; then +if [ ${OTHER_DIR} -eq 1 ]; then exit 1 else exit 0 From 59196a2b0b21869659ed68a333e6c333f1a652f8 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Fri, 17 Sep 2021 20:58:51 +0300 Subject: [PATCH 18/23] Test --- docs/kg | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/kg diff --git a/docs/kg b/docs/kg new file mode 100644 index 000000000000..e69de29bb2d1 From 68be1dba64ec548aff82e640628bce76394c7be3 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Sat, 18 Sep 2021 11:20:31 +0300 Subject: [PATCH 19/23] Revert format on groovy file --- Jenkinsfile | 66 ++++++++++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4c2de9503d92..6a46259f1ac1 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -45,13 +45,13 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils // NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> -ci_lint = 'tlcpack/ci-lint:v0.67' -ci_gpu = 'tlcpack/ci-gpu:v0.77' -ci_cpu = 'tlcpack/ci-cpu:v0.77' -ci_wasm = 'tlcpack/ci-wasm:v0.71' -ci_i386 = 'tlcpack/ci-i386:v0.73' -ci_qemu = 'tlcpack/ci-qemu:v0.08' -ci_arm = 'tlcpack/ci-arm:v0.06' +ci_lint = "tlcpack/ci-lint:v0.67" +ci_gpu = "tlcpack/ci-gpu:v0.77" +ci_cpu = "tlcpack/ci-cpu:v0.77" +ci_wasm = "tlcpack/ci-wasm:v0.71" +ci_i386 = "tlcpack/ci-i386:v0.73" +ci_qemu = "tlcpack/ci-qemu:v0.08" +ci_arm = "tlcpack/ci-arm:v0.06" // <--- End of regex-scanned config. // Parameters to allow overriding (in Jenkins UI), the images @@ -59,25 +59,25 @@ ci_arm = 'tlcpack/ci-arm:v0.06' // over default values above. properties([ parameters([ - string(name: 'ci_lint_param', defaultValue: ''), - string(name: 'ci_cpu_param', defaultValue: ''), - string(name: 'ci_gpu_param', defaultValue: ''), - string(name: 'ci_wasm_param', defaultValue: ''), - string(name: 'ci_i386_param', defaultValue: ''), - string(name: 'ci_qemu_param', defaultValue: ''), - string(name: 'ci_arm_param', defaultValue: '') + string(name: 'ci_lint_param', defaultValue: ""), + string(name: 'ci_cpu_param', defaultValue: ""), + string(name: 'ci_gpu_param', defaultValue: ""), + string(name: 'ci_wasm_param', defaultValue: ""), + string(name: 'ci_i386_param', defaultValue: ""), + string(name: 'ci_qemu_param', defaultValue: ""), + string(name: 'ci_arm_param', defaultValue: "") ]) ]) // tvm libraries -tvm_runtime = 'build/libtvm_runtime.so, build/config.cmake' -tvm_lib = 'build/libtvm.so, ' + tvm_runtime +tvm_runtime = "build/libtvm_runtime.so, build/config.cmake" +tvm_lib = "build/libtvm.so, " + tvm_runtime // LLVM upstream lib -tvm_multilib = 'build/libtvm.so, ' + - 'build/libvta_fsim.so, ' + +tvm_multilib = "build/libtvm.so, " + + "build/libvta_fsim.so, " + tvm_runtime -tvm_multilib_tsim = 'build/libvta_tsim.so, ' + +tvm_multilib_tsim = "build/libvta_tsim.so, " + tvm_multilib // command to start a docker container @@ -114,7 +114,7 @@ def init_git_win() { def cancel_previous_build() { // cancel previous build if it is not on main. - if (env.BRANCH_NAME != 'main') { + if (env.BRANCH_NAME != "main") { def buildNumber = env.BUILD_NUMBER as int // Milestone API allows us to cancel previous build // with the same milestone number @@ -154,7 +154,7 @@ stage('Sanity Check') { node('CPU') { ws(per_exec_ws('tvm/sanity')) { init_git() - docs = sh (returnStatus: true, script: ''' + is_docs_only_build = sh (returnStatus: true, script: ''' ./tests/scripts/git_check_tree.sh ''' ) @@ -219,7 +219,7 @@ stage('Build') { } }, 'BUILD: CPU': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('CPU') { ws(per_exec_ws('tvm/build-cpu')) { init_git() @@ -244,7 +244,7 @@ stage('Build') { } }, 'BUILD: WASM': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('CPU') { ws(per_exec_ws('tvm/build-wasm')) { init_git() @@ -261,7 +261,7 @@ stage('Build') { } }, 'BUILD : i386': { - if ( docs == 1) { + if ( is_docs_only_build == 1) { node('CPU') { ws(per_exec_ws('tvm/build-i386')) { init_git() @@ -275,7 +275,7 @@ stage('Build') { } }, 'BUILD : arm': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('ARM') { ws(per_exec_ws('tvm/build-arm')) { init_git() @@ -289,7 +289,7 @@ stage('Build') { } }, 'BUILD: QEMU': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('CPU') { ws(per_exec_ws('tvm/build-qemu')) { init_git() @@ -310,7 +310,7 @@ stage('Build') { stage('Unit Test') { parallel 'python3: GPU': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('TensorCore') { ws(per_exec_ws('tvm/ut-python-gpu')) { init_git() @@ -329,7 +329,7 @@ stage('Unit Test') { } }, 'python3: i386': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('CPU') { ws(per_exec_ws('tvm/ut-python-i386')) { init_git() @@ -348,7 +348,7 @@ stage('Unit Test') { } }, 'python3: arm': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('ARM') { ws(per_exec_ws('tvm/ut-python-arm')) { init_git() @@ -367,7 +367,7 @@ stage('Unit Test') { } }, 'java: GPU': { - if ( docs == 1 ) { + if (is_docs_only_build == 1 ) { node('GPU') { ws(per_exec_ws('tvm/ut-java')) { init_git() @@ -386,7 +386,7 @@ stage('Unit Test') { stage('Integration Test') { parallel 'topi: GPU': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('GPU') { ws(per_exec_ws('tvm/topi-python-gpu')) { init_git() @@ -403,7 +403,7 @@ stage('Integration Test') { } }, 'frontend: GPU': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('GPU') { ws(per_exec_ws('tvm/frontend-python-gpu')) { init_git() @@ -420,7 +420,7 @@ stage('Integration Test') { } }, 'frontend: CPU': { - if (docs == 1) { + if (is_docs_only_build == 1) { node('CPU') { ws(per_exec_ws('tvm/frontend-python-cpu')) { init_git() From c9a377152b8c86215411ac95a6e13ea3c9a3162b Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Sat, 18 Sep 2021 11:26:51 +0300 Subject: [PATCH 20/23] Remove test file --- docs/kg | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/kg diff --git a/docs/kg b/docs/kg deleted file mode 100644 index e69de29bb2d1..000000000000 From 759f2ed195ec928481914d0311f91d40a917937a Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Tue, 21 Sep 2021 11:22:33 +0300 Subject: [PATCH 21/23] Minor change in script --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6a46259f1ac1..0accad6821d4 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -155,7 +155,7 @@ stage('Sanity Check') { ws(per_exec_ws('tvm/sanity')) { init_git() is_docs_only_build = sh (returnStatus: true, script: ''' - ./tests/scripts/git_check_tree.sh + ./tests/scripts/git_change_docs.sh ''' ) sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" From bc60997d57fde23303cee94c50b9c8f657fb1461 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Tue, 21 Sep 2021 11:28:32 +0300 Subject: [PATCH 22/23] Minor formating changes --- Jenkinsfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0accad6821d4..0f9b963ca41e 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -235,7 +235,7 @@ stage('Build') { // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } @@ -298,7 +298,7 @@ stage('Build') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } @@ -320,7 +320,7 @@ stage('Unit Test') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } @@ -339,7 +339,7 @@ stage('Unit Test') { sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } @@ -357,7 +357,7 @@ stage('Unit Test') { sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" } } @@ -394,7 +394,7 @@ stage('Integration Test') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } @@ -411,7 +411,7 @@ stage('Integration Test') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } @@ -428,7 +428,7 @@ stage('Integration Test') { timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" - junit 'build/pytest-results/*.xml' + junit "build/pytest-results/*.xml" } } } From da32792d78dd113587d65563f5cda854731029c9 Mon Sep 17 00:00:00 2001 From: Michalis Papadimitriou Date: Tue, 28 Sep 2021 12:48:02 +0300 Subject: [PATCH 23/23] Revert logic in conditions for changed files --- Jenkinsfile | 24 ++++++++++++------------ tests/scripts/git_change_docs.sh | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0f9b963ca41e..f21e9c9dd22a 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -219,7 +219,7 @@ stage('Build') { } }, 'BUILD: CPU': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('CPU') { ws(per_exec_ws('tvm/build-cpu')) { init_git() @@ -244,7 +244,7 @@ stage('Build') { } }, 'BUILD: WASM': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('CPU') { ws(per_exec_ws('tvm/build-wasm')) { init_git() @@ -261,7 +261,7 @@ stage('Build') { } }, 'BUILD : i386': { - if ( is_docs_only_build == 1) { + if ( is_docs_only_build != 1) { node('CPU') { ws(per_exec_ws('tvm/build-i386')) { init_git() @@ -275,7 +275,7 @@ stage('Build') { } }, 'BUILD : arm': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('ARM') { ws(per_exec_ws('tvm/build-arm')) { init_git() @@ -289,7 +289,7 @@ stage('Build') { } }, 'BUILD: QEMU': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('CPU') { ws(per_exec_ws('tvm/build-qemu')) { init_git() @@ -310,7 +310,7 @@ stage('Build') { stage('Unit Test') { parallel 'python3: GPU': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('TensorCore') { ws(per_exec_ws('tvm/ut-python-gpu')) { init_git() @@ -329,7 +329,7 @@ stage('Unit Test') { } }, 'python3: i386': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('CPU') { ws(per_exec_ws('tvm/ut-python-i386')) { init_git() @@ -348,7 +348,7 @@ stage('Unit Test') { } }, 'python3: arm': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('ARM') { ws(per_exec_ws('tvm/ut-python-arm')) { init_git() @@ -367,7 +367,7 @@ stage('Unit Test') { } }, 'java: GPU': { - if (is_docs_only_build == 1 ) { + if (is_docs_only_build != 1 ) { node('GPU') { ws(per_exec_ws('tvm/ut-java')) { init_git() @@ -386,7 +386,7 @@ stage('Unit Test') { stage('Integration Test') { parallel 'topi: GPU': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('GPU') { ws(per_exec_ws('tvm/topi-python-gpu')) { init_git() @@ -403,7 +403,7 @@ stage('Integration Test') { } }, 'frontend: GPU': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('GPU') { ws(per_exec_ws('tvm/frontend-python-gpu')) { init_git() @@ -420,7 +420,7 @@ stage('Integration Test') { } }, 'frontend: CPU': { - if (is_docs_only_build == 1) { + if (is_docs_only_build != 1) { node('CPU') { ws(per_exec_ws('tvm/frontend-python-cpu')) { init_git() diff --git a/tests/scripts/git_change_docs.sh b/tests/scripts/git_change_docs.sh index 60c3d26cff54..e623b5d92511 100755 --- a/tests/scripts/git_change_docs.sh +++ b/tests/scripts/git_change_docs.sh @@ -35,8 +35,8 @@ for file in $changed_files; do done if [ ${OTHER_DIR} -eq 1 ]; then - exit 1 -else exit 0 +else + exit 1 fi